diff --git a/Gopkg.lock b/Gopkg.lock index 7359d1c..68ff4d6 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -1,12 +1,24 @@ # This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. +[[projects]] + name = "github.com/go-stack/stack" + packages = ["."] + revision = "2fee6af1a9795aafbe0253a0cfbdf668e1fb8a9a" + version = "v1.8.0" + [[projects]] name = "github.com/golang/protobuf" packages = ["proto","ptypes","ptypes/any","ptypes/duration","ptypes/timestamp"] revision = "b5d812f8a3706043e23a9cd5babf2e5423744d30" version = "v1.3.1" +[[projects]] + name = "github.com/golang/snappy" + packages = ["."] + revision = "2a8bb927dd31d8daada140a5d09578521ce5c36a" + version = "v0.0.1" + [[projects]] name = "github.com/google/uuid" packages = ["."] @@ -38,15 +50,33 @@ version = "v2.1.2" [[projects]] - name = "github.com/satori/go.uuid" + branch = "master" + name = "github.com/streadway/amqp" + packages = ["."] + revision = "75d898a42a940fbc854dfd1a4199eabdc00cf024" + +[[projects]] + branch = "master" + name = "github.com/xdg/scram" + packages = ["."] + revision = "7eeb5667e42c09cb51bf7b7c28aea8c56767da90" + +[[projects]] + branch = "master" + name = "github.com/xdg/stringprep" packages = ["."] - revision = "f58768cc1a7a7e77a3bd49e98cdd21419399b6a3" - version = "v1.2.0" + revision = "73f8eece6fdcd902c185bf651de50f3828bed5ed" + +[[projects]] + name = "go.mongodb.org/mongo-driver" + packages = ["bson","bson/bsoncodec","bson/bsonrw","bson/bsontype","bson/primitive","event","internal","mongo","mongo/options","mongo/readconcern","mongo/readpref","mongo/writeconcern","tag","version","x/bsonx","x/bsonx/bsoncore","x/mongo/driver","x/mongo/driver/auth","x/mongo/driver/auth/internal/gssapi","x/mongo/driver/session","x/mongo/driver/topology","x/mongo/driver/uuid","x/network/address","x/network/command","x/network/compressor","x/network/connection","x/network/connstring","x/network/description","x/network/result","x/network/wiremessage"] + revision = "582ff343271e8893d785ff094855498c285bce0a" + version = "v1.0.3" [[projects]] branch = "master" name = "golang.org/x/crypto" - packages = ["bcrypt","blowfish"] + packages = ["bcrypt","blowfish","pbkdf2"] revision = "5c40567a22f818bd14a1ea7245dad9f8ef0691aa" [[projects]] @@ -55,6 +85,12 @@ packages = ["http/httpguts","http2","http2/hpack","idna","internal/timeseries","trace"] revision = "d28f0bde5980168871434b95cfc858db9f2a7a99" +[[projects]] + branch = "master" + name = "golang.org/x/sync" + packages = ["semaphore"] + revision = "112230192c580c3556b8cee6403af37a4fc5f28c" + [[projects]] branch = "master" name = "golang.org/x/sys" @@ -82,6 +118,6 @@ [solve-meta] analyzer-name = "dep" analyzer-version = 1 - inputs-digest = "326088dcfac41c8d718fd4dcfa7eba523b632f22288651ca3dadb91493aaa836" + inputs-digest = "137dce0c56494b4d2265f1f5b6f867cabf12bb65d5d96fd8d8498307f7f1fbef" solver-name = "gps-cdcl" solver-version = 1 diff --git a/cmd/go-event/client.go b/cmd/go-event/client.go new file mode 100644 index 0000000..16aaee8 --- /dev/null +++ b/cmd/go-event/client.go @@ -0,0 +1,69 @@ +package main + +import ( + "context" + "encoding/json" + "github.com/adriancarayol/go-event/config/mongo" + "github.com/adriancarayol/go-event/config/rabbit" + "github.com/adriancarayol/go-event/pkg/dao" + "go.mongodb.org/mongo-driver/bson" + "log" + "time" +) + +type Data struct { + Username string `json:"username"` + Email string `json:"email"` + Password string `json:"password"` +} + +func main() { + rabbit.Init() + mongo.Init() + + amqpConnection := rabbit.Get() + ch, err := amqpConnection.Channel() + + if err != nil { + log.Fatalln("Fail creating channel") + } + + channel, err := ch.Consume("user_events", "", false, false, false, false, nil) + + forever := make(chan bool) + + collection := mongo.Get().Database("testing").Collection("numbers") + + go func() { + ctx, _ := context.WithTimeout(context.Background(), 30*time.Second) + + for d := range channel { + log.Printf("Received a message: %s", d.Body) + var event dao.Event + var data Data + + err = json.Unmarshal(d.Body, &event) + + if err != nil { + log.Printf("Error: %s", err) + } + + err = json.Unmarshal([]byte(event.Data), &data) + + if err != nil { + log.Printf("Error: %s", err) + } + + res, err := collection.InsertOne(ctx, bson.M{"id": event.AggregateID.String(), "password": data.Password, "username": data.Username, "email": data.Email}) + + if err != nil { + log.Printf("Error: %s", err) + } + + log.Printf("Inserted into mongo: %s", res) + } + }() + + log.Printf(" [*] Waiting for messages. To exit press CTRL+C") + <-forever +} diff --git a/cmd/go-event/main.go b/cmd/go-event/main.go index 7cb7e47..9cc5e9a 100644 --- a/cmd/go-event/main.go +++ b/cmd/go-event/main.go @@ -2,6 +2,7 @@ package main import ( "github.com/adriancarayol/go-event/config/db" + "github.com/adriancarayol/go-event/config/rabbit" "github.com/adriancarayol/go-event/pkg/interfaces/registry" "github.com/adriancarayol/go-event/pkg/usecases" "log" @@ -9,6 +10,7 @@ import ( func main() { db.Init() + rabbit.Init() container, err := registry.NewContainer() if err != nil { @@ -21,4 +23,8 @@ func main() { if err != nil { log.Fatalf("Error registering user: %s", err) } + + if err := db.GetDB().Close(); err != nil { + log.Fatal("Error closing DB: %s", err) + } } diff --git a/config/mongo/mongo.go b/config/mongo/mongo.go new file mode 100644 index 0000000..60f49f3 --- /dev/null +++ b/config/mongo/mongo.go @@ -0,0 +1,41 @@ +package mongo + +import ( + "context" + "fmt" + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" + "go.mongodb.org/mongo-driver/mongo/readpref" + "log" + "time" +) + +var db *mongo.Client + +func Init() { + client, err := mongo.NewClient(options.Client().ApplyURI("mongodb://localhost:27017")) + + if err != nil { + log.Fatalf("Error creating mongodb: %s", err) + } + + // Connect the mongo client to the MongoDB server + ctx, _ := context.WithTimeout(context.Background(), 10*time.Second) + err = client.Connect(ctx) + + // Ping MongoDB + ctx, _ = context.WithTimeout(context.Background(), 10*time.Second) + + if err = client.Ping(ctx, readpref.Primary()); err != nil { + fmt.Println("could not ping to mongo db service: %v\n", err) + return + } + + fmt.Println("connected to nosql database:") + + db = client +} + +func Get() *mongo.Client { + return db +} diff --git a/config/rabbit/rabbit.go b/config/rabbit/rabbit.go new file mode 100644 index 0000000..3fa2527 --- /dev/null +++ b/config/rabbit/rabbit.go @@ -0,0 +1,41 @@ +package rabbit + +import ( + "github.com/streadway/amqp" + "log" +) + +var amqpConnection *amqp.Connection + +func configureQueues() { + ch, err := amqpConnection.Channel() + if err != nil { + log.Fatalf("Error creating channel: %s", err) + } + + _, err = ch.QueueDeclare("user_events", true, true, false, false, nil) + + if err != nil { + log.Fatalf("Error creating queue: %s", err) + } + + if err := ch.Close(); err != nil { + log.Fatalf("Error closing rabbitmq channel: %s", err) + } +} + +func Init() { + conn, err := amqp.Dial("amqp://guest:guest@localhost:5672/") + + if err != nil { + log.Fatalf("Error connecting with RabbitMQ: %s", err) + } + + amqpConnection = conn + + configureQueues() +} + +func Get() *amqp.Connection { + return amqpConnection +} diff --git a/main b/main new file mode 100755 index 0000000..f706307 Binary files /dev/null and b/main differ diff --git a/pkg/dao/event.go b/pkg/dao/event.go new file mode 100644 index 0000000..2c252fc --- /dev/null +++ b/pkg/dao/event.go @@ -0,0 +1,14 @@ +package dao + +import ( + "github.com/google/uuid" +) + +type Event struct { + ID uuid.UUID + AggregateID uuid.UUID + AggregateType string + Type string + Version uint64 + Data string +} diff --git a/pkg/dao/user.go b/pkg/dao/user.go index 7247e15..a96b7a6 100644 --- a/pkg/dao/user.go +++ b/pkg/dao/user.go @@ -1,7 +1,6 @@ package dao import ( - "github.com/adriancarayol/go-event/pkg/domain/model" "github.com/google/uuid" ) @@ -9,16 +8,4 @@ type User struct { ID uuid.UUID Email string Username string -} - -func toUser(users []*model.User) []*User { - res := make([]*User, len(users)) - for i, user := range users { - res[i] = &User{ - ID: user.GetID(), - Email: user.GetEmail(), - Username: user.GetUsername(), - } - } - return res } \ No newline at end of file diff --git a/pkg/domain/eventbus.go b/pkg/domain/eventbus.go new file mode 100644 index 0000000..e38bb6c --- /dev/null +++ b/pkg/domain/eventbus.go @@ -0,0 +1,9 @@ +package domain + +import ( + "github.com/adriancarayol/go-event/pkg/dao" +) + +type EventBus interface { + Publish(event dao.Event) error +} diff --git a/pkg/domain/service/user.go b/pkg/domain/service/user.go index b0c0dc5..464c529 100644 --- a/pkg/domain/service/user.go +++ b/pkg/domain/service/user.go @@ -7,14 +7,12 @@ import ( ) type UserService struct { - repo repository.UserRepository - eventStore repository.EventStore + repo repository.UserRepository } -func NewUserService(repo repository.UserRepository, eventStore repository.EventStore) *UserService { +func NewUserService(repo repository.UserRepository) *UserService { return &UserService{ - eventStore: eventStore, - repo: repo, + repo: repo, } } @@ -44,4 +42,4 @@ func (s *UserService) CheckIfExistUsername(username string) error { } return nil -} \ No newline at end of file +} diff --git a/pkg/interfaces/eventbus.go b/pkg/interfaces/eventbus.go new file mode 100644 index 0000000..a467fdb --- /dev/null +++ b/pkg/interfaces/eventbus.go @@ -0,0 +1,45 @@ +package interfaces + +import ( + "encoding/json" + "github.com/adriancarayol/go-event/config/rabbit" + "github.com/adriancarayol/go-event/pkg/dao" + "github.com/streadway/amqp" + "log" +) + +type eventBusRabbitMQ struct { + amqp *amqp.Connection +} + +func NewEventBus() *eventBusRabbitMQ { + return &eventBusRabbitMQ{ + amqp: rabbit.Get(), + } +} + +func (b *eventBusRabbitMQ) Publish(event dao.Event) error { + amqpConnection := rabbit.Get() + ch, err := amqpConnection.Channel() + + if err != nil { + log.Printf("Error creating rabbitmq channel: %s", err) + return err + } + + payload, err := json.Marshal(event) + + if err != nil { + log.Printf("Error marshal event: %s", err) + return err + } + + err = ch.Publish("", "user_events", false, false, amqp.Publishing{DeliveryMode: amqp.Persistent, ContentType: "text/plain", Body: payload}) + + if err != nil { + log.Printf("Error publishing message in rabbitmq: %s", err) + return err + } + + return nil +} diff --git a/pkg/interfaces/registry/user.go b/pkg/interfaces/registry/user.go index ce3ae4c..0c7ceea 100644 --- a/pkg/interfaces/registry/user.go +++ b/pkg/interfaces/registry/user.go @@ -40,6 +40,7 @@ func (c *Container) Clean() error { func buildUserUseCase(ctn di.Container) (interface{}, error) { repo := interfaces.NewUserRepository() eventStore := interfaces.NewEventStore() - userService := service.NewUserService(repo, eventStore) - return usecases.NewUserUsecase(repo, eventStore, userService), nil + eventBus := interfaces.NewEventBus() + userService := service.NewUserService(repo) + return usecases.NewUserUsecase(repo, eventStore, eventBus, userService), nil } diff --git a/pkg/usecases/user.go b/pkg/usecases/user.go index c6c64b2..22e35b2 100644 --- a/pkg/usecases/user.go +++ b/pkg/usecases/user.go @@ -1,6 +1,8 @@ package usecases import ( + "github.com/adriancarayol/go-event/pkg/dao" + "github.com/adriancarayol/go-event/pkg/domain" "github.com/adriancarayol/go-event/pkg/domain/commands" "github.com/adriancarayol/go-event/pkg/domain/model" "github.com/adriancarayol/go-event/pkg/domain/repository" @@ -16,13 +18,15 @@ type UserUseCase interface { type userUseCase struct { repo repository.UserRepository eventStore repository.EventStore + eventBus domain.EventBus service *service.UserService } -func NewUserUsecase(repo repository.UserRepository, eventStore repository.EventStore, service *service.UserService) *userUseCase { +func NewUserUsecase(repo repository.UserRepository, eventStore repository.EventStore, eventBus domain.EventBus, service *service.UserService) *userUseCase { return &userUseCase{ repo: repo, eventStore: eventStore, + eventBus: eventBus, service: service, } } @@ -72,5 +76,27 @@ func (u *userUseCase) RegisterUser(email, username, password string) error { return err } + for _, event := range user.Events { + var eventDao dao.Event + + payload, err := event.Data.Value() + + if err != nil { + return err + } + + data := payload.([]byte) + eventDao.ID = event.ID + eventDao.AggregateID = event.AggregateID + eventDao.AggregateType = event.AggregateType + eventDao.Version = event.Version + eventDao.Type = string(event.Type) + eventDao.Data = string(data) + + if err := u.eventBus.Publish(eventDao); err != nil { + return err + } + } + return nil } diff --git a/vendor/github.com/go-stack/stack/.travis.yml b/vendor/github.com/go-stack/stack/.travis.yml new file mode 100644 index 0000000..5c5a2b5 --- /dev/null +++ b/vendor/github.com/go-stack/stack/.travis.yml @@ -0,0 +1,15 @@ +language: go +sudo: false +go: + - 1.7.x + - 1.8.x + - 1.9.x + - 1.10.x + - 1.11.x + - tip + +before_install: + - go get github.com/mattn/goveralls + +script: + - goveralls -service=travis-ci diff --git a/vendor/github.com/go-stack/stack/LICENSE.md b/vendor/github.com/go-stack/stack/LICENSE.md new file mode 100644 index 0000000..2abf98e --- /dev/null +++ b/vendor/github.com/go-stack/stack/LICENSE.md @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Chris Hines + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/go-stack/stack/README.md b/vendor/github.com/go-stack/stack/README.md new file mode 100644 index 0000000..f11cccc --- /dev/null +++ b/vendor/github.com/go-stack/stack/README.md @@ -0,0 +1,38 @@ +[![GoDoc](https://godoc.org/github.com/go-stack/stack?status.svg)](https://godoc.org/github.com/go-stack/stack) +[![Go Report Card](https://goreportcard.com/badge/go-stack/stack)](https://goreportcard.com/report/go-stack/stack) +[![TravisCI](https://travis-ci.org/go-stack/stack.svg?branch=master)](https://travis-ci.org/go-stack/stack) +[![Coverage Status](https://coveralls.io/repos/github/go-stack/stack/badge.svg?branch=master)](https://coveralls.io/github/go-stack/stack?branch=master) + +# stack + +Package stack implements utilities to capture, manipulate, and format call +stacks. It provides a simpler API than package runtime. + +The implementation takes care of the minutia and special cases of interpreting +the program counter (pc) values returned by runtime.Callers. + +## Versioning + +Package stack publishes releases via [semver](http://semver.org/) compatible Git +tags prefixed with a single 'v'. The master branch always contains the latest +release. The develop branch contains unreleased commits. + +## Formatting + +Package stack's types implement fmt.Formatter, which provides a simple and +flexible way to declaratively configure formatting when used with logging or +error tracking packages. + +```go +func DoTheThing() { + c := stack.Caller(0) + log.Print(c) // "source.go:10" + log.Printf("%+v", c) // "pkg/path/source.go:10" + log.Printf("%n", c) // "DoTheThing" + + s := stack.Trace().TrimRuntime() + log.Print(s) // "[source.go:15 caller.go:42 main.go:14]" +} +``` + +See the docs for all of the supported formatting options. diff --git a/vendor/github.com/go-stack/stack/format_test.go b/vendor/github.com/go-stack/stack/format_test.go new file mode 100644 index 0000000..013ad67 --- /dev/null +++ b/vendor/github.com/go-stack/stack/format_test.go @@ -0,0 +1,21 @@ +// +build go1.2 + +package stack_test + +import ( + "fmt" + + "github.com/go-stack/stack" +) + +func Example_callFormat() { + logCaller("%+s") + logCaller("%v %[1]n()") + // Output: + // github.com/go-stack/stack/format_test.go + // format_test.go:13 Example_callFormat() +} + +func logCaller(format string) { + fmt.Printf(format+"\n", stack.Caller(1)) +} diff --git a/vendor/github.com/go-stack/stack/go.mod b/vendor/github.com/go-stack/stack/go.mod new file mode 100644 index 0000000..96a53a1 --- /dev/null +++ b/vendor/github.com/go-stack/stack/go.mod @@ -0,0 +1 @@ +module github.com/go-stack/stack diff --git a/vendor/github.com/go-stack/stack/stack-go19_test.go b/vendor/github.com/go-stack/stack/stack-go19_test.go new file mode 100644 index 0000000..d7aeea2 --- /dev/null +++ b/vendor/github.com/go-stack/stack/stack-go19_test.go @@ -0,0 +1,67 @@ +// +build go1.9 + +package stack_test + +import ( + "runtime" + "testing" + + "github.com/go-stack/stack" +) + +func TestCallerInlinedPanic(t *testing.T) { + t.Parallel() + + var line int + + defer func() { + if recover() != nil { + var pcs [32]uintptr + n := runtime.Callers(1, pcs[:]) + frames := runtime.CallersFrames(pcs[:n]) + // count frames to runtime.sigpanic + panicIdx := 0 + for { + f, more := frames.Next() + if f.Function == "runtime.sigpanic" { + break + } + panicIdx++ + if !more { + t.Fatal("no runtime.sigpanic entry on the stack") + } + } + + c := stack.Caller(panicIdx) + if got, want := c.Frame().Function, "runtime.sigpanic"; got != want { + t.Errorf("sigpanic frame: got name == %v, want name == %v", got, want) + } + + c1 := stack.Caller(panicIdx + 1) + if got, want := c1.Frame().Function, "github.com/go-stack/stack_test.inlinablePanic"; got != want { + t.Errorf("TestCallerInlinedPanic frame: got name == %v, want name == %v", got, want) + } + if got, want := c1.Frame().Line, line; got != want { + t.Errorf("TestCallerInlinedPanic frame: got line == %v, want line == %v", got, want) + } + } + }() + + doPanic(t, &line) + t.Fatal("failed to panic") +} + +func doPanic(t *testing.T, panicLine *int) { + _, _, line, ok := runtime.Caller(0) + *panicLine = line + 11 // adjust to match line of panic below + if !ok { + t.Fatal("runtime.Caller(0) failed") + } + inlinablePanic() +} + +func inlinablePanic() { + // Initiate a sigpanic. + var x *uintptr + _ = *x +} diff --git a/vendor/github.com/go-stack/stack/stack.go b/vendor/github.com/go-stack/stack/stack.go new file mode 100644 index 0000000..ac3b93b --- /dev/null +++ b/vendor/github.com/go-stack/stack/stack.go @@ -0,0 +1,400 @@ +// +build go1.7 + +// Package stack implements utilities to capture, manipulate, and format call +// stacks. It provides a simpler API than package runtime. +// +// The implementation takes care of the minutia and special cases of +// interpreting the program counter (pc) values returned by runtime.Callers. +// +// Package stack's types implement fmt.Formatter, which provides a simple and +// flexible way to declaratively configure formatting when used with logging +// or error tracking packages. +package stack + +import ( + "bytes" + "errors" + "fmt" + "io" + "runtime" + "strconv" + "strings" +) + +// Call records a single function invocation from a goroutine stack. +type Call struct { + frame runtime.Frame +} + +// Caller returns a Call from the stack of the current goroutine. The argument +// skip is the number of stack frames to ascend, with 0 identifying the +// calling function. +func Caller(skip int) Call { + // As of Go 1.9 we need room for up to three PC entries. + // + // 0. An entry for the stack frame prior to the target to check for + // special handling needed if that prior entry is runtime.sigpanic. + // 1. A possible second entry to hold metadata about skipped inlined + // functions. If inline functions were not skipped the target frame + // PC will be here. + // 2. A third entry for the target frame PC when the second entry + // is used for skipped inline functions. + var pcs [3]uintptr + n := runtime.Callers(skip+1, pcs[:]) + frames := runtime.CallersFrames(pcs[:n]) + frame, _ := frames.Next() + frame, _ = frames.Next() + + return Call{ + frame: frame, + } +} + +// String implements fmt.Stinger. It is equivalent to fmt.Sprintf("%v", c). +func (c Call) String() string { + return fmt.Sprint(c) +} + +// MarshalText implements encoding.TextMarshaler. It formats the Call the same +// as fmt.Sprintf("%v", c). +func (c Call) MarshalText() ([]byte, error) { + if c.frame == (runtime.Frame{}) { + return nil, ErrNoFunc + } + + buf := bytes.Buffer{} + fmt.Fprint(&buf, c) + return buf.Bytes(), nil +} + +// ErrNoFunc means that the Call has a nil *runtime.Func. The most likely +// cause is a Call with the zero value. +var ErrNoFunc = errors.New("no call stack information") + +// Format implements fmt.Formatter with support for the following verbs. +// +// %s source file +// %d line number +// %n function name +// %k last segment of the package path +// %v equivalent to %s:%d +// +// It accepts the '+' and '#' flags for most of the verbs as follows. +// +// %+s path of source file relative to the compile time GOPATH, +// or the module path joined to the path of source file relative +// to module root +// %#s full path of source file +// %+n import path qualified function name +// %+k full package path +// %+v equivalent to %+s:%d +// %#v equivalent to %#s:%d +func (c Call) Format(s fmt.State, verb rune) { + if c.frame == (runtime.Frame{}) { + fmt.Fprintf(s, "%%!%c(NOFUNC)", verb) + return + } + + switch verb { + case 's', 'v': + file := c.frame.File + switch { + case s.Flag('#'): + // done + case s.Flag('+'): + file = pkgFilePath(&c.frame) + default: + const sep = "/" + if i := strings.LastIndex(file, sep); i != -1 { + file = file[i+len(sep):] + } + } + io.WriteString(s, file) + if verb == 'v' { + buf := [7]byte{':'} + s.Write(strconv.AppendInt(buf[:1], int64(c.frame.Line), 10)) + } + + case 'd': + buf := [6]byte{} + s.Write(strconv.AppendInt(buf[:0], int64(c.frame.Line), 10)) + + case 'k': + name := c.frame.Function + const pathSep = "/" + start, end := 0, len(name) + if i := strings.LastIndex(name, pathSep); i != -1 { + start = i + len(pathSep) + } + const pkgSep = "." + if i := strings.Index(name[start:], pkgSep); i != -1 { + end = start + i + } + if s.Flag('+') { + start = 0 + } + io.WriteString(s, name[start:end]) + + case 'n': + name := c.frame.Function + if !s.Flag('+') { + const pathSep = "/" + if i := strings.LastIndex(name, pathSep); i != -1 { + name = name[i+len(pathSep):] + } + const pkgSep = "." + if i := strings.Index(name, pkgSep); i != -1 { + name = name[i+len(pkgSep):] + } + } + io.WriteString(s, name) + } +} + +// Frame returns the call frame infomation for the Call. +func (c Call) Frame() runtime.Frame { + return c.frame +} + +// PC returns the program counter for this call frame; multiple frames may +// have the same PC value. +// +// Deprecated: Use Call.Frame instead. +func (c Call) PC() uintptr { + return c.frame.PC +} + +// CallStack records a sequence of function invocations from a goroutine +// stack. +type CallStack []Call + +// String implements fmt.Stinger. It is equivalent to fmt.Sprintf("%v", cs). +func (cs CallStack) String() string { + return fmt.Sprint(cs) +} + +var ( + openBracketBytes = []byte("[") + closeBracketBytes = []byte("]") + spaceBytes = []byte(" ") +) + +// MarshalText implements encoding.TextMarshaler. It formats the CallStack the +// same as fmt.Sprintf("%v", cs). +func (cs CallStack) MarshalText() ([]byte, error) { + buf := bytes.Buffer{} + buf.Write(openBracketBytes) + for i, pc := range cs { + if i > 0 { + buf.Write(spaceBytes) + } + fmt.Fprint(&buf, pc) + } + buf.Write(closeBracketBytes) + return buf.Bytes(), nil +} + +// Format implements fmt.Formatter by printing the CallStack as square brackets +// ([, ]) surrounding a space separated list of Calls each formatted with the +// supplied verb and options. +func (cs CallStack) Format(s fmt.State, verb rune) { + s.Write(openBracketBytes) + for i, pc := range cs { + if i > 0 { + s.Write(spaceBytes) + } + pc.Format(s, verb) + } + s.Write(closeBracketBytes) +} + +// Trace returns a CallStack for the current goroutine with element 0 +// identifying the calling function. +func Trace() CallStack { + var pcs [512]uintptr + n := runtime.Callers(1, pcs[:]) + + frames := runtime.CallersFrames(pcs[:n]) + cs := make(CallStack, 0, n) + + // Skip extra frame retrieved just to make sure the runtime.sigpanic + // special case is handled. + frame, more := frames.Next() + + for more { + frame, more = frames.Next() + cs = append(cs, Call{frame: frame}) + } + + return cs +} + +// TrimBelow returns a slice of the CallStack with all entries below c +// removed. +func (cs CallStack) TrimBelow(c Call) CallStack { + for len(cs) > 0 && cs[0] != c { + cs = cs[1:] + } + return cs +} + +// TrimAbove returns a slice of the CallStack with all entries above c +// removed. +func (cs CallStack) TrimAbove(c Call) CallStack { + for len(cs) > 0 && cs[len(cs)-1] != c { + cs = cs[:len(cs)-1] + } + return cs +} + +// pkgIndex returns the index that results in file[index:] being the path of +// file relative to the compile time GOPATH, and file[:index] being the +// $GOPATH/src/ portion of file. funcName must be the name of a function in +// file as returned by runtime.Func.Name. +func pkgIndex(file, funcName string) int { + // As of Go 1.6.2 there is no direct way to know the compile time GOPATH + // at runtime, but we can infer the number of path segments in the GOPATH. + // We note that runtime.Func.Name() returns the function name qualified by + // the import path, which does not include the GOPATH. Thus we can trim + // segments from the beginning of the file path until the number of path + // separators remaining is one more than the number of path separators in + // the function name. For example, given: + // + // GOPATH /home/user + // file /home/user/src/pkg/sub/file.go + // fn.Name() pkg/sub.Type.Method + // + // We want to produce: + // + // file[:idx] == /home/user/src/ + // file[idx:] == pkg/sub/file.go + // + // From this we can easily see that fn.Name() has one less path separator + // than our desired result for file[idx:]. We count separators from the + // end of the file path until it finds two more than in the function name + // and then move one character forward to preserve the initial path + // segment without a leading separator. + const sep = "/" + i := len(file) + for n := strings.Count(funcName, sep) + 2; n > 0; n-- { + i = strings.LastIndex(file[:i], sep) + if i == -1 { + i = -len(sep) + break + } + } + // get back to 0 or trim the leading separator + return i + len(sep) +} + +// pkgFilePath returns the frame's filepath relative to the compile-time GOPATH, +// or its module path joined to its path relative to the module root. +// +// As of Go 1.11 there is no direct way to know the compile time GOPATH or +// module paths at runtime, but we can piece together the desired information +// from available information. We note that runtime.Frame.Function contains the +// function name qualified by the package path, which includes the module path +// but not the GOPATH. We can extract the package path from that and append the +// last segments of the file path to arrive at the desired package qualified +// file path. For example, given: +// +// GOPATH /home/user +// import path pkg/sub +// frame.File /home/user/src/pkg/sub/file.go +// frame.Function pkg/sub.Type.Method +// Desired return pkg/sub/file.go +// +// It appears that we simply need to trim ".Type.Method" from frame.Function and +// append "/" + path.Base(file). +// +// But there are other wrinkles. Although it is idiomatic to do so, the internal +// name of a package is not required to match the last segment of its import +// path. In addition, the introduction of modules in Go 1.11 allows working +// without a GOPATH. So we also must make these work right: +// +// GOPATH /home/user +// import path pkg/go-sub +// package name sub +// frame.File /home/user/src/pkg/go-sub/file.go +// frame.Function pkg/sub.Type.Method +// Desired return pkg/go-sub/file.go +// +// Module path pkg/v2 +// import path pkg/v2/go-sub +// package name sub +// frame.File /home/user/cloned-pkg/go-sub/file.go +// frame.Function pkg/v2/sub.Type.Method +// Desired return pkg/v2/go-sub/file.go +// +// We can handle all of these situations by using the package path extracted +// from frame.Function up to, but not including, the last segment as the prefix +// and the last two segments of frame.File as the suffix of the returned path. +// This preserves the existing behavior when working in a GOPATH without modules +// and a semantically equivalent behavior when used in module aware project. +func pkgFilePath(frame *runtime.Frame) string { + pre := pkgPrefix(frame.Function) + post := pathSuffix(frame.File) + if pre == "" { + return post + } + return pre + "/" + post +} + +// pkgPrefix returns the import path of the function's package with the final +// segment removed. +func pkgPrefix(funcName string) string { + const pathSep = "/" + end := strings.LastIndex(funcName, pathSep) + if end == -1 { + return "" + } + return funcName[:end] +} + +// pathSuffix returns the last two segments of path. +func pathSuffix(path string) string { + const pathSep = "/" + lastSep := strings.LastIndex(path, pathSep) + if lastSep == -1 { + return path + } + return path[strings.LastIndex(path[:lastSep], pathSep)+1:] +} + +var runtimePath string + +func init() { + var pcs [3]uintptr + runtime.Callers(0, pcs[:]) + frames := runtime.CallersFrames(pcs[:]) + frame, _ := frames.Next() + file := frame.File + + idx := pkgIndex(frame.File, frame.Function) + + runtimePath = file[:idx] + if runtime.GOOS == "windows" { + runtimePath = strings.ToLower(runtimePath) + } +} + +func inGoroot(c Call) bool { + file := c.frame.File + if len(file) == 0 || file[0] == '?' { + return true + } + if runtime.GOOS == "windows" { + file = strings.ToLower(file) + } + return strings.HasPrefix(file, runtimePath) || strings.HasSuffix(file, "/_testmain.go") +} + +// TrimRuntime returns a slice of the CallStack with the topmost entries from +// the go runtime removed. It considers any calls originating from unknown +// files, files under GOROOT, or _testmain.go as part of the runtime. +func (cs CallStack) TrimRuntime() CallStack { + for len(cs) > 0 && inGoroot(cs[len(cs)-1]) { + cs = cs[:len(cs)-1] + } + return cs +} diff --git a/vendor/github.com/go-stack/stack/stack_test.go b/vendor/github.com/go-stack/stack/stack_test.go new file mode 100644 index 0000000..44f3a7d --- /dev/null +++ b/vendor/github.com/go-stack/stack/stack_test.go @@ -0,0 +1,582 @@ +package stack_test + +import ( + "fmt" + "io/ioutil" + "path" + "path/filepath" + "reflect" + "runtime" + "strings" + "testing" + + "github.com/go-stack/stack" +) + +func TestCaller(t *testing.T) { + t.Parallel() + + c := stack.Caller(0) + _, file, line, ok := runtime.Caller(0) + line-- + if !ok { + t.Fatal("runtime.Caller(0) failed") + } + + if got, want := c.Frame().File, file; got != want { + t.Errorf("got file == %v, want file == %v", got, want) + } + + if got, want := c.Frame().Line, line; got != want { + t.Errorf("got line == %v, want line == %v", got, want) + } +} + +func f3(f1 func() stack.Call) stack.Call { + return f2(f1) +} + +func f2(f1 func() stack.Call) stack.Call { + return f1() +} + +func TestCallerMidstackInlined(t *testing.T) { + t.Parallel() + + _, _, line, ok := runtime.Caller(0) + line -= 10 // adjust to return f1() line inside f2() + if !ok { + t.Fatal("runtime.Caller(0) failed") + } + + c := f3(func() stack.Call { + return stack.Caller(2) + }) + + if got, want := c.Frame().Line, line; got != want { + t.Errorf("got line == %v, want line == %v", got, want) + } + if got, want := c.Frame().Function, "github.com/go-stack/stack_test.f3"; got != want { + t.Errorf("got func name == %v, want func name == %v", got, want) + } +} + +func TestCallerPanic(t *testing.T) { + t.Parallel() + + var ( + line int + ok bool + ) + + defer func() { + if recover() != nil { + var pcs [32]uintptr + n := runtime.Callers(1, pcs[:]) + frames := runtime.CallersFrames(pcs[:n]) + // count frames to runtime.sigpanic + panicIdx := 0 + for { + f, more := frames.Next() + if f.Function == "runtime.sigpanic" { + break + } + panicIdx++ + if !more { + t.Fatal("no runtime.sigpanic entry on the stack") + } + } + c := stack.Caller(panicIdx) + if got, want := c.Frame().Function, "runtime.sigpanic"; got != want { + t.Errorf("sigpanic frame: got name == %v, want name == %v", got, want) + } + c1 := stack.Caller(panicIdx + 1) + if got, want := c1.Frame().Function, "github.com/go-stack/stack_test.TestCallerPanic"; got != want { + t.Errorf("TestCallerPanic frame: got name == %v, want name == %v", got, want) + } + if got, want := c1.Frame().Line, line; got != want { + t.Errorf("TestCallerPanic frame: got line == %v, want line == %v", got, want) + } + } + }() + + _, _, line, ok = runtime.Caller(0) + line += 7 // adjust to match line of panic below + if !ok { + t.Fatal("runtime.Caller(0) failed") + } + // Initiate a sigpanic. + var x *uintptr + _ = *x +} + +type tholder struct { + trace func() stack.CallStack +} + +func (th *tholder) traceLabyrinth() stack.CallStack { + for { + return th.trace() + } +} + +func TestTrace(t *testing.T) { + t.Parallel() + + _, _, line, ok := runtime.Caller(0) + if !ok { + t.Fatal("runtime.Caller(0) failed") + } + + fh := tholder{ + trace: func() stack.CallStack { + cs := stack.Trace() + return cs + }, + } + + cs := fh.traceLabyrinth() + + lines := []int{line + 7, line - 7, line + 12} + + for i, line := range lines { + if got, want := cs[i].Frame().Line, line; got != want { + t.Errorf("got line[%d] == %v, want line[%d] == %v", i, got, i, want) + } + } +} + +// Test stack handling originating from a sigpanic. +func TestTracePanic(t *testing.T) { + t.Parallel() + + var ( + line int + ok bool + ) + + defer func() { + if recover() != nil { + trace := stack.Trace() + + // find runtime.sigpanic + panicIdx := -1 + for i, c := range trace { + if c.Frame().Function == "runtime.sigpanic" { + panicIdx = i + break + } + } + if panicIdx == -1 { + t.Fatal("no runtime.sigpanic entry on the stack") + } + if got, want := trace[panicIdx].Frame().Function, "runtime.sigpanic"; got != want { + t.Errorf("sigpanic frame: got name == %v, want name == %v", got, want) + } + if got, want := trace[panicIdx+1].Frame().Function, "github.com/go-stack/stack_test.TestTracePanic"; got != want { + t.Errorf("TestTracePanic frame: got name == %v, want name == %v", got, want) + } + if got, want := trace[panicIdx+1].Frame().Line, line; got != want { + t.Errorf("TestTracePanic frame: got line == %v, want line == %v", got, want) + } + } + }() + + _, _, line, ok = runtime.Caller(0) + line += 7 // adjust to match line of panic below + if !ok { + t.Fatal("runtime.Caller(0) failed") + } + // Initiate a sigpanic. + var x *uintptr + _ = *x +} + +const importPath = "github.com/go-stack/stack" + +type testType struct{} + +func (tt testType) testMethod() (c stack.Call, file string, line int, ok bool) { + c = stack.Caller(0) + _, file, line, ok = runtime.Caller(0) + line-- + return +} + +func TestCallFormat(t *testing.T) { + t.Parallel() + + c := stack.Caller(0) + _, file, line, ok := runtime.Caller(0) + line-- + if !ok { + t.Fatal("runtime.Caller(0) failed") + } + relFile := path.Join(importPath, filepath.Base(file)) + + c2, file2, line2, ok2 := testType{}.testMethod() + if !ok2 { + t.Fatal("runtime.Caller(0) failed") + } + relFile2 := path.Join(importPath, filepath.Base(file2)) + + data := []struct { + c stack.Call + desc string + fmt string + out string + }{ + {stack.Call{}, "error", "%s", "%!s(NOFUNC)"}, + + {c, "func", "%s", path.Base(file)}, + {c, "func", "%+s", relFile}, + {c, "func", "%#s", file}, + {c, "func", "%d", fmt.Sprint(line)}, + {c, "func", "%n", "TestCallFormat"}, + {c, "func", "%+n", "github.com/go-stack/stack_test.TestCallFormat"}, + {c, "func", "%k", "stack_test"}, + {c, "func", "%+k", "github.com/go-stack/stack_test"}, + {c, "func", "%v", fmt.Sprint(path.Base(file), ":", line)}, + {c, "func", "%+v", fmt.Sprint(relFile, ":", line)}, + {c, "func", "%#v", fmt.Sprint(file, ":", line)}, + + {c2, "meth", "%s", path.Base(file2)}, + {c2, "meth", "%+s", relFile2}, + {c2, "meth", "%#s", file2}, + {c2, "meth", "%d", fmt.Sprint(line2)}, + {c2, "meth", "%n", "testType.testMethod"}, + {c2, "meth", "%+n", "github.com/go-stack/stack_test.testType.testMethod"}, + {c2, "meth", "%k", "stack_test"}, + {c2, "meth", "%+k", "github.com/go-stack/stack_test"}, + {c2, "meth", "%v", fmt.Sprint(path.Base(file2), ":", line2)}, + {c2, "meth", "%+v", fmt.Sprint(relFile2, ":", line2)}, + {c2, "meth", "%#v", fmt.Sprint(file2, ":", line2)}, + } + + for _, d := range data { + got := fmt.Sprintf(d.fmt, d.c) + if got != d.out { + t.Errorf("fmt.Sprintf(%q, Call(%s)) = %s, want %s", d.fmt, d.desc, got, d.out) + } + } +} + +func TestCallString(t *testing.T) { + t.Parallel() + + c := stack.Caller(0) + _, file, line, ok := runtime.Caller(0) + line-- + if !ok { + t.Fatal("runtime.Caller(0) failed") + } + + c2, file2, line2, ok2 := testType{}.testMethod() + if !ok2 { + t.Fatal("runtime.Caller(0) failed") + } + + data := []struct { + c stack.Call + desc string + out string + }{ + {stack.Call{}, "error", "%!v(NOFUNC)"}, + {c, "func", fmt.Sprint(path.Base(file), ":", line)}, + {c2, "meth", fmt.Sprint(path.Base(file2), ":", line2)}, + } + + for _, d := range data { + got := d.c.String() + if got != d.out { + t.Errorf("got %s, want %s", got, d.out) + } + } +} + +func TestCallMarshalText(t *testing.T) { + t.Parallel() + + c := stack.Caller(0) + _, file, line, ok := runtime.Caller(0) + line-- + if !ok { + t.Fatal("runtime.Caller(0) failed") + } + + c2, file2, line2, ok2 := testType{}.testMethod() + if !ok2 { + t.Fatal("runtime.Caller(0) failed") + } + + data := []struct { + c stack.Call + desc string + out []byte + err error + }{ + {stack.Call{}, "error", nil, stack.ErrNoFunc}, + {c, "func", []byte(fmt.Sprint(path.Base(file), ":", line)), nil}, + {c2, "meth", []byte(fmt.Sprint(path.Base(file2), ":", line2)), nil}, + } + + for _, d := range data { + text, err := d.c.MarshalText() + if got, want := err, d.err; got != want { + t.Errorf("%s: got err %v, want err %v", d.desc, got, want) + } + if got, want := text, d.out; !reflect.DeepEqual(got, want) { + t.Errorf("%s: got %s, want %s", d.desc, got, want) + } + } +} + +func TestCallStackString(t *testing.T) { + cs, line0 := getTrace(t) + _, file, line1, ok := runtime.Caller(0) + line1-- + if !ok { + t.Fatal("runtime.Caller(0) failed") + } + file = path.Base(file) + if got, want := cs.String(), fmt.Sprintf("[%s:%d %s:%d]", file, line0, file, line1); got != want { + t.Errorf("\n got %v\nwant %v", got, want) + } +} + +func TestCallStackMarshalText(t *testing.T) { + cs, line0 := getTrace(t) + _, file, line1, ok := runtime.Caller(0) + line1-- + if !ok { + t.Fatal("runtime.Caller(0) failed") + } + file = path.Base(file) + text, _ := cs.MarshalText() + if got, want := text, []byte(fmt.Sprintf("[%s:%d %s:%d]", file, line0, file, line1)); !reflect.DeepEqual(got, want) { + t.Errorf("\n got %v\nwant %v", got, want) + } +} + +func getTrace(t *testing.T) (stack.CallStack, int) { + cs := stack.Trace().TrimRuntime() + _, _, line, ok := runtime.Caller(0) + line-- + if !ok { + t.Fatal("runtime.Caller(0) failed") + } + return cs, line +} + +func TestTrimAbove(t *testing.T) { + trace := trimAbove() + if got, want := len(trace), 2; got != want { + t.Fatalf("got len(trace) == %v, want %v, trace: %n", got, want, trace) + } + if got, want := fmt.Sprintf("%n", trace[1]), "TestTrimAbove"; got != want { + t.Errorf("got %q, want %q", got, want) + } +} + +func trimAbove() stack.CallStack { + call := stack.Caller(1) + trace := stack.Trace() + return trace.TrimAbove(call) +} + +func TestTrimBelow(t *testing.T) { + trace := trimBelow() + if got, want := fmt.Sprintf("%n", trace[0]), "TestTrimBelow"; got != want { + t.Errorf("got %q, want %q", got, want) + } +} + +func trimBelow() stack.CallStack { + call := stack.Caller(1) + trace := stack.Trace() + return trace.TrimBelow(call) +} + +func TestTrimRuntime(t *testing.T) { + trace := stack.Trace().TrimRuntime() + if got, want := len(trace), 1; got != want { + t.Errorf("got len(trace) == %v, want %v, goroot: %q, trace: %#v", got, want, runtime.GOROOT(), trace) + } +} + +func BenchmarkCallVFmt(b *testing.B) { + c := stack.Caller(0) + b.ResetTimer() + for i := 0; i < b.N; i++ { + fmt.Fprint(ioutil.Discard, c) + } +} + +func BenchmarkCallPlusVFmt(b *testing.B) { + c := stack.Caller(0) + b.ResetTimer() + for i := 0; i < b.N; i++ { + fmt.Fprintf(ioutil.Discard, "%+v", c) + } +} + +func BenchmarkCallSharpVFmt(b *testing.B) { + c := stack.Caller(0) + b.ResetTimer() + for i := 0; i < b.N; i++ { + fmt.Fprintf(ioutil.Discard, "%#v", c) + } +} + +func BenchmarkCallSFmt(b *testing.B) { + c := stack.Caller(0) + b.ResetTimer() + for i := 0; i < b.N; i++ { + fmt.Fprintf(ioutil.Discard, "%s", c) + } +} + +func BenchmarkCallPlusSFmt(b *testing.B) { + c := stack.Caller(0) + b.ResetTimer() + for i := 0; i < b.N; i++ { + fmt.Fprintf(ioutil.Discard, "%+s", c) + } +} + +func BenchmarkCallSharpSFmt(b *testing.B) { + c := stack.Caller(0) + b.ResetTimer() + for i := 0; i < b.N; i++ { + fmt.Fprintf(ioutil.Discard, "%#s", c) + } +} + +func BenchmarkCallDFmt(b *testing.B) { + c := stack.Caller(0) + b.ResetTimer() + for i := 0; i < b.N; i++ { + fmt.Fprintf(ioutil.Discard, "%d", c) + } +} + +func BenchmarkCallNFmt(b *testing.B) { + c := stack.Caller(0) + b.ResetTimer() + for i := 0; i < b.N; i++ { + fmt.Fprintf(ioutil.Discard, "%n", c) + } +} + +func BenchmarkCallPlusNFmt(b *testing.B) { + c := stack.Caller(0) + b.ResetTimer() + for i := 0; i < b.N; i++ { + fmt.Fprintf(ioutil.Discard, "%+n", c) + } +} + +func BenchmarkCaller(b *testing.B) { + for i := 0; i < b.N; i++ { + stack.Caller(0) + } +} + +func BenchmarkTrace(b *testing.B) { + for i := 0; i < b.N; i++ { + stack.Trace() + } +} + +func deepStack(depth int, b *testing.B) stack.CallStack { + if depth > 0 { + return deepStack(depth-1, b) + } + b.StartTimer() + s := stack.Trace() + return s +} + +func BenchmarkTrace10(b *testing.B) { + for i := 0; i < b.N; i++ { + b.StopTimer() + deepStack(10, b) + } +} + +func BenchmarkTrace50(b *testing.B) { + b.StopTimer() + for i := 0; i < b.N; i++ { + deepStack(50, b) + } +} + +func BenchmarkTrace100(b *testing.B) { + b.StopTimer() + for i := 0; i < b.N; i++ { + deepStack(100, b) + } +} + +//////////////// +// Benchmark functions followed by formatting +//////////////// + +func BenchmarkCallerAndVFmt(b *testing.B) { + for i := 0; i < b.N; i++ { + fmt.Fprint(ioutil.Discard, stack.Caller(0)) + } +} + +func BenchmarkTraceAndVFmt(b *testing.B) { + for i := 0; i < b.N; i++ { + fmt.Fprint(ioutil.Discard, stack.Trace()) + } +} + +func BenchmarkTrace10AndVFmt(b *testing.B) { + for i := 0; i < b.N; i++ { + b.StopTimer() + fmt.Fprint(ioutil.Discard, deepStack(10, b)) + } +} + +//////////////// +// Baseline against package runtime. +//////////////// + +func BenchmarkRuntimeCaller(b *testing.B) { + for i := 0; i < b.N; i++ { + runtime.Caller(0) + } +} + +func BenchmarkRuntimeCallerAndFmt(b *testing.B) { + for i := 0; i < b.N; i++ { + _, file, line, _ := runtime.Caller(0) + const sep = "/" + if i := strings.LastIndex(file, sep); i != -1 { + file = file[i+len(sep):] + } + fmt.Fprint(ioutil.Discard, file, ":", line) + } +} + +func BenchmarkFuncForPC(b *testing.B) { + pc, _, _, _ := runtime.Caller(0) + pc-- + b.ResetTimer() + for i := 0; i < b.N; i++ { + runtime.FuncForPC(pc) + } +} + +func BenchmarkFuncFileLine(b *testing.B) { + pc, _, _, _ := runtime.Caller(0) + pc-- + fn := runtime.FuncForPC(pc) + b.ResetTimer() + for i := 0; i < b.N; i++ { + fn.FileLine(pc) + } +} diff --git a/vendor/github.com/golang/snappy/.gitignore b/vendor/github.com/golang/snappy/.gitignore new file mode 100644 index 0000000..042091d --- /dev/null +++ b/vendor/github.com/golang/snappy/.gitignore @@ -0,0 +1,16 @@ +cmd/snappytool/snappytool +testdata/bench + +# These explicitly listed benchmark data files are for an obsolete version of +# snappy_test.go. +testdata/alice29.txt +testdata/asyoulik.txt +testdata/fireworks.jpeg +testdata/geo.protodata +testdata/html +testdata/html_x_4 +testdata/kppkn.gtb +testdata/lcet10.txt +testdata/paper-100k.pdf +testdata/plrabn12.txt +testdata/urls.10K diff --git a/vendor/github.com/golang/snappy/AUTHORS b/vendor/github.com/golang/snappy/AUTHORS new file mode 100644 index 0000000..bcfa195 --- /dev/null +++ b/vendor/github.com/golang/snappy/AUTHORS @@ -0,0 +1,15 @@ +# This is the official list of Snappy-Go authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. + +# Names should be added to this file as +# Name or Organization +# The email address is not required for organizations. + +# Please keep the list sorted. + +Damian Gryski +Google Inc. +Jan Mercl <0xjnml@gmail.com> +Rodolfo Carvalho +Sebastien Binet diff --git a/vendor/github.com/golang/snappy/CONTRIBUTORS b/vendor/github.com/golang/snappy/CONTRIBUTORS new file mode 100644 index 0000000..931ae31 --- /dev/null +++ b/vendor/github.com/golang/snappy/CONTRIBUTORS @@ -0,0 +1,37 @@ +# This is the official list of people who can contribute +# (and typically have contributed) code to the Snappy-Go repository. +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees are listed here +# but not in AUTHORS, because Google holds the copyright. +# +# The submission process automatically checks to make sure +# that people submitting code are listed in this file (by email address). +# +# Names should be added to this file only after verifying that +# the individual or the individual's organization has agreed to +# the appropriate Contributor License Agreement, found here: +# +# http://code.google.com/legal/individual-cla-v1.0.html +# http://code.google.com/legal/corporate-cla-v1.0.html +# +# The agreement for individuals can be filled out on the web. +# +# When adding J Random Contributor's name to this file, +# either J's name or J's organization's name should be +# added to the AUTHORS file, depending on whether the +# individual or corporate CLA was used. + +# Names should be added to this file like so: +# Name + +# Please keep the list sorted. + +Damian Gryski +Jan Mercl <0xjnml@gmail.com> +Kai Backman +Marc-Antoine Ruel +Nigel Tao +Rob Pike +Rodolfo Carvalho +Russ Cox +Sebastien Binet diff --git a/vendor/github.com/golang/snappy/LICENSE b/vendor/github.com/golang/snappy/LICENSE new file mode 100644 index 0000000..6050c10 --- /dev/null +++ b/vendor/github.com/golang/snappy/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/golang/snappy/README b/vendor/github.com/golang/snappy/README new file mode 100644 index 0000000..cea1287 --- /dev/null +++ b/vendor/github.com/golang/snappy/README @@ -0,0 +1,107 @@ +The Snappy compression format in the Go programming language. + +To download and install from source: +$ go get github.com/golang/snappy + +Unless otherwise noted, the Snappy-Go source files are distributed +under the BSD-style license found in the LICENSE file. + + + +Benchmarks. + +The golang/snappy benchmarks include compressing (Z) and decompressing (U) ten +or so files, the same set used by the C++ Snappy code (github.com/google/snappy +and note the "google", not "golang"). On an "Intel(R) Core(TM) i7-3770 CPU @ +3.40GHz", Go's GOARCH=amd64 numbers as of 2016-05-29: + +"go test -test.bench=." + +_UFlat0-8 2.19GB/s ± 0% html +_UFlat1-8 1.41GB/s ± 0% urls +_UFlat2-8 23.5GB/s ± 2% jpg +_UFlat3-8 1.91GB/s ± 0% jpg_200 +_UFlat4-8 14.0GB/s ± 1% pdf +_UFlat5-8 1.97GB/s ± 0% html4 +_UFlat6-8 814MB/s ± 0% txt1 +_UFlat7-8 785MB/s ± 0% txt2 +_UFlat8-8 857MB/s ± 0% txt3 +_UFlat9-8 719MB/s ± 1% txt4 +_UFlat10-8 2.84GB/s ± 0% pb +_UFlat11-8 1.05GB/s ± 0% gaviota + +_ZFlat0-8 1.04GB/s ± 0% html +_ZFlat1-8 534MB/s ± 0% urls +_ZFlat2-8 15.7GB/s ± 1% jpg +_ZFlat3-8 740MB/s ± 3% jpg_200 +_ZFlat4-8 9.20GB/s ± 1% pdf +_ZFlat5-8 991MB/s ± 0% html4 +_ZFlat6-8 379MB/s ± 0% txt1 +_ZFlat7-8 352MB/s ± 0% txt2 +_ZFlat8-8 396MB/s ± 1% txt3 +_ZFlat9-8 327MB/s ± 1% txt4 +_ZFlat10-8 1.33GB/s ± 1% pb +_ZFlat11-8 605MB/s ± 1% gaviota + + + +"go test -test.bench=. -tags=noasm" + +_UFlat0-8 621MB/s ± 2% html +_UFlat1-8 494MB/s ± 1% urls +_UFlat2-8 23.2GB/s ± 1% jpg +_UFlat3-8 1.12GB/s ± 1% jpg_200 +_UFlat4-8 4.35GB/s ± 1% pdf +_UFlat5-8 609MB/s ± 0% html4 +_UFlat6-8 296MB/s ± 0% txt1 +_UFlat7-8 288MB/s ± 0% txt2 +_UFlat8-8 309MB/s ± 1% txt3 +_UFlat9-8 280MB/s ± 1% txt4 +_UFlat10-8 753MB/s ± 0% pb +_UFlat11-8 400MB/s ± 0% gaviota + +_ZFlat0-8 409MB/s ± 1% html +_ZFlat1-8 250MB/s ± 1% urls +_ZFlat2-8 12.3GB/s ± 1% jpg +_ZFlat3-8 132MB/s ± 0% jpg_200 +_ZFlat4-8 2.92GB/s ± 0% pdf +_ZFlat5-8 405MB/s ± 1% html4 +_ZFlat6-8 179MB/s ± 1% txt1 +_ZFlat7-8 170MB/s ± 1% txt2 +_ZFlat8-8 189MB/s ± 1% txt3 +_ZFlat9-8 164MB/s ± 1% txt4 +_ZFlat10-8 479MB/s ± 1% pb +_ZFlat11-8 270MB/s ± 1% gaviota + + + +For comparison (Go's encoded output is byte-for-byte identical to C++'s), here +are the numbers from C++ Snappy's + +make CXXFLAGS="-O2 -DNDEBUG -g" clean snappy_unittest.log && cat snappy_unittest.log + +BM_UFlat/0 2.4GB/s html +BM_UFlat/1 1.4GB/s urls +BM_UFlat/2 21.8GB/s jpg +BM_UFlat/3 1.5GB/s jpg_200 +BM_UFlat/4 13.3GB/s pdf +BM_UFlat/5 2.1GB/s html4 +BM_UFlat/6 1.0GB/s txt1 +BM_UFlat/7 959.4MB/s txt2 +BM_UFlat/8 1.0GB/s txt3 +BM_UFlat/9 864.5MB/s txt4 +BM_UFlat/10 2.9GB/s pb +BM_UFlat/11 1.2GB/s gaviota + +BM_ZFlat/0 944.3MB/s html (22.31 %) +BM_ZFlat/1 501.6MB/s urls (47.78 %) +BM_ZFlat/2 14.3GB/s jpg (99.95 %) +BM_ZFlat/3 538.3MB/s jpg_200 (73.00 %) +BM_ZFlat/4 8.3GB/s pdf (83.30 %) +BM_ZFlat/5 903.5MB/s html4 (22.52 %) +BM_ZFlat/6 336.0MB/s txt1 (57.88 %) +BM_ZFlat/7 312.3MB/s txt2 (61.91 %) +BM_ZFlat/8 353.1MB/s txt3 (54.99 %) +BM_ZFlat/9 289.9MB/s txt4 (66.26 %) +BM_ZFlat/10 1.2GB/s pb (19.68 %) +BM_ZFlat/11 527.4MB/s gaviota (37.72 %) diff --git a/vendor/github.com/golang/snappy/cmd/snappytool/main.go b/vendor/github.com/golang/snappy/cmd/snappytool/main.go new file mode 100644 index 0000000..b0f44c3 --- /dev/null +++ b/vendor/github.com/golang/snappy/cmd/snappytool/main.go @@ -0,0 +1,46 @@ +package main + +import ( + "errors" + "flag" + "io/ioutil" + "os" + + "github.com/golang/snappy" +) + +var ( + decode = flag.Bool("d", false, "decode") + encode = flag.Bool("e", false, "encode") +) + +func run() error { + flag.Parse() + if *decode == *encode { + return errors.New("exactly one of -d or -e must be given") + } + + in, err := ioutil.ReadAll(os.Stdin) + if err != nil { + return err + } + + out := []byte(nil) + if *decode { + out, err = snappy.Decode(nil, in) + if err != nil { + return err + } + } else { + out = snappy.Encode(nil, in) + } + _, err = os.Stdout.Write(out) + return err +} + +func main() { + if err := run(); err != nil { + os.Stderr.WriteString(err.Error() + "\n") + os.Exit(1) + } +} diff --git a/vendor/github.com/golang/snappy/decode.go b/vendor/github.com/golang/snappy/decode.go new file mode 100644 index 0000000..72efb03 --- /dev/null +++ b/vendor/github.com/golang/snappy/decode.go @@ -0,0 +1,237 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "encoding/binary" + "errors" + "io" +) + +var ( + // ErrCorrupt reports that the input is invalid. + ErrCorrupt = errors.New("snappy: corrupt input") + // ErrTooLarge reports that the uncompressed length is too large. + ErrTooLarge = errors.New("snappy: decoded block is too large") + // ErrUnsupported reports that the input isn't supported. + ErrUnsupported = errors.New("snappy: unsupported input") + + errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") +) + +// DecodedLen returns the length of the decoded block. +func DecodedLen(src []byte) (int, error) { + v, _, err := decodedLen(src) + return v, err +} + +// decodedLen returns the length of the decoded block and the number of bytes +// that the length header occupied. +func decodedLen(src []byte) (blockLen, headerLen int, err error) { + v, n := binary.Uvarint(src) + if n <= 0 || v > 0xffffffff { + return 0, 0, ErrCorrupt + } + + const wordSize = 32 << (^uint(0) >> 32 & 1) + if wordSize == 32 && v > 0x7fffffff { + return 0, 0, ErrTooLarge + } + return int(v), n, nil +} + +const ( + decodeErrCodeCorrupt = 1 + decodeErrCodeUnsupportedLiteralLength = 2 +) + +// Decode returns the decoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire decoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +func Decode(dst, src []byte) ([]byte, error) { + dLen, s, err := decodedLen(src) + if err != nil { + return nil, err + } + if dLen <= len(dst) { + dst = dst[:dLen] + } else { + dst = make([]byte, dLen) + } + switch decode(dst, src[s:]) { + case 0: + return dst, nil + case decodeErrCodeUnsupportedLiteralLength: + return nil, errUnsupportedLiteralLength + } + return nil, ErrCorrupt +} + +// NewReader returns a new Reader that decompresses from r, using the framing +// format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +func NewReader(r io.Reader) *Reader { + return &Reader{ + r: r, + decoded: make([]byte, maxBlockSize), + buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize), + } +} + +// Reader is an io.Reader that can read Snappy-compressed bytes. +type Reader struct { + r io.Reader + err error + decoded []byte + buf []byte + // decoded[i:j] contains decoded bytes that have not yet been passed on. + i, j int + readHeader bool +} + +// Reset discards any buffered data, resets all state, and switches the Snappy +// reader to read from r. This permits reusing a Reader rather than allocating +// a new one. +func (r *Reader) Reset(reader io.Reader) { + r.r = reader + r.err = nil + r.i = 0 + r.j = 0 + r.readHeader = false +} + +func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { + if _, r.err = io.ReadFull(r.r, p); r.err != nil { + if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { + r.err = ErrCorrupt + } + return false + } + return true +} + +// Read satisfies the io.Reader interface. +func (r *Reader) Read(p []byte) (int, error) { + if r.err != nil { + return 0, r.err + } + for { + if r.i < r.j { + n := copy(p, r.decoded[r.i:r.j]) + r.i += n + return n, nil + } + if !r.readFull(r.buf[:4], true) { + return 0, r.err + } + chunkType := r.buf[0] + if !r.readHeader { + if chunkType != chunkTypeStreamIdentifier { + r.err = ErrCorrupt + return 0, r.err + } + r.readHeader = true + } + chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 + if chunkLen > len(r.buf) { + r.err = ErrUnsupported + return 0, r.err + } + + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + // Section 4.2. Compressed data (chunk type 0x00). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return 0, r.err + } + buf := r.buf[:chunkLen] + if !r.readFull(buf, false) { + return 0, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + buf = buf[checksumSize:] + + n, err := DecodedLen(buf) + if err != nil { + r.err = err + return 0, r.err + } + if n > len(r.decoded) { + r.err = ErrCorrupt + return 0, r.err + } + if _, err := Decode(r.decoded, buf); err != nil { + r.err = err + return 0, r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return 0, r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeUncompressedData: + // Section 4.3. Uncompressed data (chunk type 0x01). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return 0, r.err + } + buf := r.buf[:checksumSize] + if !r.readFull(buf, false) { + return 0, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + // Read directly into r.decoded instead of via r.buf. + n := chunkLen - checksumSize + if n > len(r.decoded) { + r.err = ErrCorrupt + return 0, r.err + } + if !r.readFull(r.decoded[:n], false) { + return 0, r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return 0, r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeStreamIdentifier: + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(magicBody) { + r.err = ErrCorrupt + return 0, r.err + } + if !r.readFull(r.buf[:len(magicBody)], false) { + return 0, r.err + } + for i := 0; i < len(magicBody); i++ { + if r.buf[i] != magicBody[i] { + r.err = ErrCorrupt + return 0, r.err + } + } + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + r.err = ErrUnsupported + return 0, r.err + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + if !r.readFull(r.buf[:chunkLen], false) { + return 0, r.err + } + } +} diff --git a/vendor/github.com/golang/snappy/decode_amd64.go b/vendor/github.com/golang/snappy/decode_amd64.go new file mode 100644 index 0000000..fcd192b --- /dev/null +++ b/vendor/github.com/golang/snappy/decode_amd64.go @@ -0,0 +1,14 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +package snappy + +// decode has the same semantics as in decode_other.go. +// +//go:noescape +func decode(dst, src []byte) int diff --git a/vendor/github.com/golang/snappy/decode_amd64.s b/vendor/github.com/golang/snappy/decode_amd64.s new file mode 100644 index 0000000..e6179f6 --- /dev/null +++ b/vendor/github.com/golang/snappy/decode_amd64.s @@ -0,0 +1,490 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +// The asm code generally follows the pure Go code in decode_other.go, except +// where marked with a "!!!". + +// func decode(dst, src []byte) int +// +// All local variables fit into registers. The non-zero stack size is only to +// spill registers and push args when issuing a CALL. The register allocation: +// - AX scratch +// - BX scratch +// - CX length or x +// - DX offset +// - SI &src[s] +// - DI &dst[d] +// + R8 dst_base +// + R9 dst_len +// + R10 dst_base + dst_len +// + R11 src_base +// + R12 src_len +// + R13 src_base + src_len +// - R14 used by doCopy +// - R15 used by doCopy +// +// The registers R8-R13 (marked with a "+") are set at the start of the +// function, and after a CALL returns, and are not otherwise modified. +// +// The d variable is implicitly DI - R8, and len(dst)-d is R10 - DI. +// The s variable is implicitly SI - R11, and len(src)-s is R13 - SI. +TEXT ·decode(SB), NOSPLIT, $48-56 + // Initialize SI, DI and R8-R13. + MOVQ dst_base+0(FP), R8 + MOVQ dst_len+8(FP), R9 + MOVQ R8, DI + MOVQ R8, R10 + ADDQ R9, R10 + MOVQ src_base+24(FP), R11 + MOVQ src_len+32(FP), R12 + MOVQ R11, SI + MOVQ R11, R13 + ADDQ R12, R13 + +loop: + // for s < len(src) + CMPQ SI, R13 + JEQ end + + // CX = uint32(src[s]) + // + // switch src[s] & 0x03 + MOVBLZX (SI), CX + MOVL CX, BX + ANDL $3, BX + CMPL BX, $1 + JAE tagCopy + + // ---------------------------------------- + // The code below handles literal tags. + + // case tagLiteral: + // x := uint32(src[s] >> 2) + // switch + SHRL $2, CX + CMPL CX, $60 + JAE tagLit60Plus + + // case x < 60: + // s++ + INCQ SI + +doLit: + // This is the end of the inner "switch", when we have a literal tag. + // + // We assume that CX == x and x fits in a uint32, where x is the variable + // used in the pure Go decode_other.go code. + + // length = int(x) + 1 + // + // Unlike the pure Go code, we don't need to check if length <= 0 because + // CX can hold 64 bits, so the increment cannot overflow. + INCQ CX + + // Prepare to check if copying length bytes will run past the end of dst or + // src. + // + // AX = len(dst) - d + // BX = len(src) - s + MOVQ R10, AX + SUBQ DI, AX + MOVQ R13, BX + SUBQ SI, BX + + // !!! Try a faster technique for short (16 or fewer bytes) copies. + // + // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 { + // goto callMemmove // Fall back on calling runtime·memmove. + // } + // + // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s + // against 21 instead of 16, because it cannot assume that all of its input + // is contiguous in memory and so it needs to leave enough source bytes to + // read the next tag without refilling buffers, but Go's Decode assumes + // contiguousness (the src argument is a []byte). + CMPQ CX, $16 + JGT callMemmove + CMPQ AX, $16 + JLT callMemmove + CMPQ BX, $16 + JLT callMemmove + + // !!! Implement the copy from src to dst as a 16-byte load and store. + // (Decode's documentation says that dst and src must not overlap.) + // + // This always copies 16 bytes, instead of only length bytes, but that's + // OK. If the input is a valid Snappy encoding then subsequent iterations + // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a + // non-nil error), so the overrun will be ignored. + // + // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or + // 16-byte loads and stores. This technique probably wouldn't be as + // effective on architectures that are fussier about alignment. + MOVOU 0(SI), X0 + MOVOU X0, 0(DI) + + // d += length + // s += length + ADDQ CX, DI + ADDQ CX, SI + JMP loop + +callMemmove: + // if length > len(dst)-d || length > len(src)-s { etc } + CMPQ CX, AX + JGT errCorrupt + CMPQ CX, BX + JGT errCorrupt + + // copy(dst[d:], src[s:s+length]) + // + // This means calling runtime·memmove(&dst[d], &src[s], length), so we push + // DI, SI and CX as arguments. Coincidentally, we also need to spill those + // three registers to the stack, to save local variables across the CALL. + MOVQ DI, 0(SP) + MOVQ SI, 8(SP) + MOVQ CX, 16(SP) + MOVQ DI, 24(SP) + MOVQ SI, 32(SP) + MOVQ CX, 40(SP) + CALL runtime·memmove(SB) + + // Restore local variables: unspill registers from the stack and + // re-calculate R8-R13. + MOVQ 24(SP), DI + MOVQ 32(SP), SI + MOVQ 40(SP), CX + MOVQ dst_base+0(FP), R8 + MOVQ dst_len+8(FP), R9 + MOVQ R8, R10 + ADDQ R9, R10 + MOVQ src_base+24(FP), R11 + MOVQ src_len+32(FP), R12 + MOVQ R11, R13 + ADDQ R12, R13 + + // d += length + // s += length + ADDQ CX, DI + ADDQ CX, SI + JMP loop + +tagLit60Plus: + // !!! This fragment does the + // + // s += x - 58; if uint(s) > uint(len(src)) { etc } + // + // checks. In the asm version, we code it once instead of once per switch case. + ADDQ CX, SI + SUBQ $58, SI + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // case x == 60: + CMPL CX, $61 + JEQ tagLit61 + JA tagLit62Plus + + // x = uint32(src[s-1]) + MOVBLZX -1(SI), CX + JMP doLit + +tagLit61: + // case x == 61: + // x = uint32(src[s-2]) | uint32(src[s-1])<<8 + MOVWLZX -2(SI), CX + JMP doLit + +tagLit62Plus: + CMPL CX, $62 + JA tagLit63 + + // case x == 62: + // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + MOVWLZX -3(SI), CX + MOVBLZX -1(SI), BX + SHLL $16, BX + ORL BX, CX + JMP doLit + +tagLit63: + // case x == 63: + // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + MOVL -4(SI), CX + JMP doLit + +// The code above handles literal tags. +// ---------------------------------------- +// The code below handles copy tags. + +tagCopy4: + // case tagCopy4: + // s += 5 + ADDQ $5, SI + + // if uint(s) > uint(len(src)) { etc } + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // length = 1 + int(src[s-5])>>2 + SHRQ $2, CX + INCQ CX + + // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + MOVLQZX -4(SI), DX + JMP doCopy + +tagCopy2: + // case tagCopy2: + // s += 3 + ADDQ $3, SI + + // if uint(s) > uint(len(src)) { etc } + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // length = 1 + int(src[s-3])>>2 + SHRQ $2, CX + INCQ CX + + // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + MOVWQZX -2(SI), DX + JMP doCopy + +tagCopy: + // We have a copy tag. We assume that: + // - BX == src[s] & 0x03 + // - CX == src[s] + CMPQ BX, $2 + JEQ tagCopy2 + JA tagCopy4 + + // case tagCopy1: + // s += 2 + ADDQ $2, SI + + // if uint(s) > uint(len(src)) { etc } + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + MOVQ CX, DX + ANDQ $0xe0, DX + SHLQ $3, DX + MOVBQZX -1(SI), BX + ORQ BX, DX + + // length = 4 + int(src[s-2])>>2&0x7 + SHRQ $2, CX + ANDQ $7, CX + ADDQ $4, CX + +doCopy: + // This is the end of the outer "switch", when we have a copy tag. + // + // We assume that: + // - CX == length && CX > 0 + // - DX == offset + + // if offset <= 0 { etc } + CMPQ DX, $0 + JLE errCorrupt + + // if d < offset { etc } + MOVQ DI, BX + SUBQ R8, BX + CMPQ BX, DX + JLT errCorrupt + + // if length > len(dst)-d { etc } + MOVQ R10, BX + SUBQ DI, BX + CMPQ CX, BX + JGT errCorrupt + + // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length + // + // Set: + // - R14 = len(dst)-d + // - R15 = &dst[d-offset] + MOVQ R10, R14 + SUBQ DI, R14 + MOVQ DI, R15 + SUBQ DX, R15 + + // !!! Try a faster technique for short (16 or fewer bytes) forward copies. + // + // First, try using two 8-byte load/stores, similar to the doLit technique + // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is + // still OK if offset >= 8. Note that this has to be two 8-byte load/stores + // and not one 16-byte load/store, and the first store has to be before the + // second load, due to the overlap if offset is in the range [8, 16). + // + // if length > 16 || offset < 8 || len(dst)-d < 16 { + // goto slowForwardCopy + // } + // copy 16 bytes + // d += length + CMPQ CX, $16 + JGT slowForwardCopy + CMPQ DX, $8 + JLT slowForwardCopy + CMPQ R14, $16 + JLT slowForwardCopy + MOVQ 0(R15), AX + MOVQ AX, 0(DI) + MOVQ 8(R15), BX + MOVQ BX, 8(DI) + ADDQ CX, DI + JMP loop + +slowForwardCopy: + // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we + // can still try 8-byte load stores, provided we can overrun up to 10 extra + // bytes. As above, the overrun will be fixed up by subsequent iterations + // of the outermost loop. + // + // The C++ snappy code calls this technique IncrementalCopyFastPath. Its + // commentary says: + // + // ---- + // + // The main part of this loop is a simple copy of eight bytes at a time + // until we've copied (at least) the requested amount of bytes. However, + // if d and d-offset are less than eight bytes apart (indicating a + // repeating pattern of length < 8), we first need to expand the pattern in + // order to get the correct results. For instance, if the buffer looks like + // this, with the eight-byte and patterns marked as + // intervals: + // + // abxxxxxxxxxxxx + // [------] d-offset + // [------] d + // + // a single eight-byte copy from to will repeat the pattern + // once, after which we can move two bytes without moving : + // + // ababxxxxxxxxxx + // [------] d-offset + // [------] d + // + // and repeat the exercise until the two no longer overlap. + // + // This allows us to do very well in the special case of one single byte + // repeated many times, without taking a big hit for more general cases. + // + // The worst case of extra writing past the end of the match occurs when + // offset == 1 and length == 1; the last copy will read from byte positions + // [0..7] and write to [4..11], whereas it was only supposed to write to + // position 1. Thus, ten excess bytes. + // + // ---- + // + // That "10 byte overrun" worst case is confirmed by Go's + // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy + // and finishSlowForwardCopy algorithm. + // + // if length > len(dst)-d-10 { + // goto verySlowForwardCopy + // } + SUBQ $10, R14 + CMPQ CX, R14 + JGT verySlowForwardCopy + +makeOffsetAtLeast8: + // !!! As above, expand the pattern so that offset >= 8 and we can use + // 8-byte load/stores. + // + // for offset < 8 { + // copy 8 bytes from dst[d-offset:] to dst[d:] + // length -= offset + // d += offset + // offset += offset + // // The two previous lines together means that d-offset, and therefore + // // R15, is unchanged. + // } + CMPQ DX, $8 + JGE fixUpSlowForwardCopy + MOVQ (R15), BX + MOVQ BX, (DI) + SUBQ DX, CX + ADDQ DX, DI + ADDQ DX, DX + JMP makeOffsetAtLeast8 + +fixUpSlowForwardCopy: + // !!! Add length (which might be negative now) to d (implied by DI being + // &dst[d]) so that d ends up at the right place when we jump back to the + // top of the loop. Before we do that, though, we save DI to AX so that, if + // length is positive, copying the remaining length bytes will write to the + // right place. + MOVQ DI, AX + ADDQ CX, DI + +finishSlowForwardCopy: + // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative + // length means that we overrun, but as above, that will be fixed up by + // subsequent iterations of the outermost loop. + CMPQ CX, $0 + JLE loop + MOVQ (R15), BX + MOVQ BX, (AX) + ADDQ $8, R15 + ADDQ $8, AX + SUBQ $8, CX + JMP finishSlowForwardCopy + +verySlowForwardCopy: + // verySlowForwardCopy is a simple implementation of forward copy. In C + // parlance, this is a do/while loop instead of a while loop, since we know + // that length > 0. In Go syntax: + // + // for { + // dst[d] = dst[d - offset] + // d++ + // length-- + // if length == 0 { + // break + // } + // } + MOVB (R15), BX + MOVB BX, (DI) + INCQ R15 + INCQ DI + DECQ CX + JNZ verySlowForwardCopy + JMP loop + +// The code above handles copy tags. +// ---------------------------------------- + +end: + // This is the end of the "for s < len(src)". + // + // if d != len(dst) { etc } + CMPQ DI, R10 + JNE errCorrupt + + // return 0 + MOVQ $0, ret+48(FP) + RET + +errCorrupt: + // return decodeErrCodeCorrupt + MOVQ $1, ret+48(FP) + RET diff --git a/vendor/github.com/golang/snappy/decode_other.go b/vendor/github.com/golang/snappy/decode_other.go new file mode 100644 index 0000000..8c9f204 --- /dev/null +++ b/vendor/github.com/golang/snappy/decode_other.go @@ -0,0 +1,101 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 appengine !gc noasm + +package snappy + +// decode writes the decoding of src to dst. It assumes that the varint-encoded +// length of the decompressed bytes has already been read, and that len(dst) +// equals that length. +// +// It returns 0 on success or a decodeErrCodeXxx error code on failure. +func decode(dst, src []byte) int { + var d, s, offset, length int + for s < len(src) { + switch src[s] & 0x03 { + case tagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-1]) + case x == 61: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-2]) | uint32(src[s-1])<<8 + case x == 62: + s += 4 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + case x == 63: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + length = int(x) + 1 + if length <= 0 { + return decodeErrCodeUnsupportedLiteralLength + } + if length > len(dst)-d || length > len(src)-s { + return decodeErrCodeCorrupt + } + copy(dst[d:], src[s:s+length]) + d += length + s += length + continue + + case tagCopy1: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 4 + int(src[s-2])>>2&0x7 + offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + + case tagCopy2: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-3])>>2 + offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + + case tagCopy4: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-5])>>2 + offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + } + + if offset <= 0 || d < offset || length > len(dst)-d { + return decodeErrCodeCorrupt + } + // Copy from an earlier sub-slice of dst to a later sub-slice. Unlike + // the built-in copy function, this byte-by-byte copy always runs + // forwards, even if the slices overlap. Conceptually, this is: + // + // d += forwardCopy(dst[d:d+length], dst[d-offset:]) + for end := d + length; d != end; d++ { + dst[d] = dst[d-offset] + } + } + if d != len(dst) { + return decodeErrCodeCorrupt + } + return 0 +} diff --git a/vendor/github.com/golang/snappy/encode.go b/vendor/github.com/golang/snappy/encode.go new file mode 100644 index 0000000..8d393e9 --- /dev/null +++ b/vendor/github.com/golang/snappy/encode.go @@ -0,0 +1,285 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "encoding/binary" + "errors" + "io" +) + +// Encode returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +func Encode(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if len(dst) < n { + dst = make([]byte, n) + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + for len(src) > 0 { + p := src + src = nil + if len(p) > maxBlockSize { + p, src = p[:maxBlockSize], p[maxBlockSize:] + } + if len(p) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], p) + } else { + d += encodeBlock(dst[d:], p) + } + } + return dst[:d] +} + +// inputMargin is the minimum number of extra input bytes to keep, inside +// encodeBlock's inner loop. On some architectures, this margin lets us +// implement a fast path for emitLiteral, where the copy of short (<= 16 byte) +// literals can be implemented as a single load to and store from a 16-byte +// register. That literal's actual length can be as short as 1 byte, so this +// can copy up to 15 bytes too much, but that's OK as subsequent iterations of +// the encoding loop will fix up the copy overrun, and this inputMargin ensures +// that we don't overrun the dst and src buffers. +const inputMargin = 16 - 1 + +// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that +// could be encoded with a copy tag. This is the minimum with respect to the +// algorithm used by encodeBlock, not a minimum enforced by the file format. +// +// The encoded output must start with at least a 1 byte literal, as there are +// no previous bytes to copy. A minimal (1 byte) copy after that, generated +// from an emitCopy call in encodeBlock's main loop, would require at least +// another inputMargin bytes, for the reason above: we want any emitLiteral +// calls inside encodeBlock's main loop to use the fast path if possible, which +// requires being able to overrun by inputMargin bytes. Thus, +// minNonLiteralBlockSize equals 1 + 1 + inputMargin. +// +// The C++ code doesn't use this exact threshold, but it could, as discussed at +// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion +// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an +// optimization. It should not affect the encoded form. This is tested by +// TestSameEncodingAsCppShortCopies. +const minNonLiteralBlockSize = 1 + 1 + inputMargin + +// MaxEncodedLen returns the maximum length of a snappy block, given its +// uncompressed length. +// +// It will return a negative value if srcLen is too large to encode. +func MaxEncodedLen(srcLen int) int { + n := uint64(srcLen) + if n > 0xffffffff { + return -1 + } + // Compressed data can be defined as: + // compressed := item* literal* + // item := literal* copy + // + // The trailing literal sequence has a space blowup of at most 62/60 + // since a literal of length 60 needs one tag byte + one extra byte + // for length information. + // + // Item blowup is trickier to measure. Suppose the "copy" op copies + // 4 bytes of data. Because of a special check in the encoding code, + // we produce a 4-byte copy only if the offset is < 65536. Therefore + // the copy op takes 3 bytes to encode, and this type of item leads + // to at most the 62/60 blowup for representing literals. + // + // Suppose the "copy" op copies 5 bytes of data. If the offset is big + // enough, it will take 5 bytes to encode the copy op. Therefore the + // worst case here is a one-byte literal followed by a five-byte copy. + // That is, 6 bytes of input turn into 7 bytes of "compressed" data. + // + // This last factor dominates the blowup, so the final estimate is: + n = 32 + n + n/6 + if n > 0xffffffff { + return -1 + } + return int(n) +} + +var errClosed = errors.New("snappy: Writer is closed") + +// NewWriter returns a new Writer that compresses to w. +// +// The Writer returned does not buffer writes. There is no need to Flush or +// Close such a Writer. +// +// Deprecated: the Writer returned is not suitable for many small writes, only +// for few large writes. Use NewBufferedWriter instead, which is efficient +// regardless of the frequency and shape of the writes, and remember to Close +// that Writer when done. +func NewWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + obuf: make([]byte, obufLen), + } +} + +// NewBufferedWriter returns a new Writer that compresses to w, using the +// framing format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +// +// The Writer returned buffers writes. Users must call Close to guarantee all +// data has been forwarded to the underlying io.Writer. They may also call +// Flush zero or more times before calling Close. +func NewBufferedWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + ibuf: make([]byte, 0, maxBlockSize), + obuf: make([]byte, obufLen), + } +} + +// Writer is an io.Writer that can write Snappy-compressed bytes. +type Writer struct { + w io.Writer + err error + + // ibuf is a buffer for the incoming (uncompressed) bytes. + // + // Its use is optional. For backwards compatibility, Writers created by the + // NewWriter function have ibuf == nil, do not buffer incoming bytes, and + // therefore do not need to be Flush'ed or Close'd. + ibuf []byte + + // obuf is a buffer for the outgoing (compressed) bytes. + obuf []byte + + // wroteStreamHeader is whether we have written the stream header. + wroteStreamHeader bool +} + +// Reset discards the writer's state and switches the Snappy writer to write to +// w. This permits reusing a Writer rather than allocating a new one. +func (w *Writer) Reset(writer io.Writer) { + w.w = writer + w.err = nil + if w.ibuf != nil { + w.ibuf = w.ibuf[:0] + } + w.wroteStreamHeader = false +} + +// Write satisfies the io.Writer interface. +func (w *Writer) Write(p []byte) (nRet int, errRet error) { + if w.ibuf == nil { + // Do not buffer incoming bytes. This does not perform or compress well + // if the caller of Writer.Write writes many small slices. This + // behavior is therefore deprecated, but still supported for backwards + // compatibility with code that doesn't explicitly Flush or Close. + return w.write(p) + } + + // The remainder of this method is based on bufio.Writer.Write from the + // standard library. + + for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil { + var n int + if len(w.ibuf) == 0 { + // Large write, empty buffer. + // Write directly from p to avoid copy. + n, _ = w.write(p) + } else { + n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + w.Flush() + } + nRet += n + p = p[n:] + } + if w.err != nil { + return nRet, w.err + } + n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + nRet += n + return nRet, nil +} + +func (w *Writer) write(p []byte) (nRet int, errRet error) { + if w.err != nil { + return 0, w.err + } + for len(p) > 0 { + obufStart := len(magicChunk) + if !w.wroteStreamHeader { + w.wroteStreamHeader = true + copy(w.obuf, magicChunk) + obufStart = 0 + } + + var uncompressed []byte + if len(p) > maxBlockSize { + uncompressed, p = p[:maxBlockSize], p[maxBlockSize:] + } else { + uncompressed, p = p, nil + } + checksum := crc(uncompressed) + + // Compress the buffer, discarding the result if the improvement + // isn't at least 12.5%. + compressed := Encode(w.obuf[obufHeaderLen:], uncompressed) + chunkType := uint8(chunkTypeCompressedData) + chunkLen := 4 + len(compressed) + obufEnd := obufHeaderLen + len(compressed) + if len(compressed) >= len(uncompressed)-len(uncompressed)/8 { + chunkType = chunkTypeUncompressedData + chunkLen = 4 + len(uncompressed) + obufEnd = obufHeaderLen + } + + // Fill in the per-chunk header that comes before the body. + w.obuf[len(magicChunk)+0] = chunkType + w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0) + w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8) + w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16) + w.obuf[len(magicChunk)+4] = uint8(checksum >> 0) + w.obuf[len(magicChunk)+5] = uint8(checksum >> 8) + w.obuf[len(magicChunk)+6] = uint8(checksum >> 16) + w.obuf[len(magicChunk)+7] = uint8(checksum >> 24) + + if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil { + w.err = err + return nRet, err + } + if chunkType == chunkTypeUncompressedData { + if _, err := w.w.Write(uncompressed); err != nil { + w.err = err + return nRet, err + } + } + nRet += len(uncompressed) + } + return nRet, nil +} + +// Flush flushes the Writer to its underlying io.Writer. +func (w *Writer) Flush() error { + if w.err != nil { + return w.err + } + if len(w.ibuf) == 0 { + return nil + } + w.write(w.ibuf) + w.ibuf = w.ibuf[:0] + return w.err +} + +// Close calls Flush and then closes the Writer. +func (w *Writer) Close() error { + w.Flush() + ret := w.err + if w.err == nil { + w.err = errClosed + } + return ret +} diff --git a/vendor/github.com/golang/snappy/encode_amd64.go b/vendor/github.com/golang/snappy/encode_amd64.go new file mode 100644 index 0000000..150d91b --- /dev/null +++ b/vendor/github.com/golang/snappy/encode_amd64.go @@ -0,0 +1,29 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +package snappy + +// emitLiteral has the same semantics as in encode_other.go. +// +//go:noescape +func emitLiteral(dst, lit []byte) int + +// emitCopy has the same semantics as in encode_other.go. +// +//go:noescape +func emitCopy(dst []byte, offset, length int) int + +// extendMatch has the same semantics as in encode_other.go. +// +//go:noescape +func extendMatch(src []byte, i, j int) int + +// encodeBlock has the same semantics as in encode_other.go. +// +//go:noescape +func encodeBlock(dst, src []byte) (d int) diff --git a/vendor/github.com/golang/snappy/encode_amd64.s b/vendor/github.com/golang/snappy/encode_amd64.s new file mode 100644 index 0000000..adfd979 --- /dev/null +++ b/vendor/github.com/golang/snappy/encode_amd64.s @@ -0,0 +1,730 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +// The XXX lines assemble on Go 1.4, 1.5 and 1.7, but not 1.6, due to a +// Go toolchain regression. See https://github.com/golang/go/issues/15426 and +// https://github.com/golang/snappy/issues/29 +// +// As a workaround, the package was built with a known good assembler, and +// those instructions were disassembled by "objdump -d" to yield the +// 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 +// style comments, in AT&T asm syntax. Note that rsp here is a physical +// register, not Go/asm's SP pseudo-register (see https://golang.org/doc/asm). +// The instructions were then encoded as "BYTE $0x.." sequences, which assemble +// fine on Go 1.6. + +// The asm code generally follows the pure Go code in encode_other.go, except +// where marked with a "!!!". + +// ---------------------------------------------------------------------------- + +// func emitLiteral(dst, lit []byte) int +// +// All local variables fit into registers. The register allocation: +// - AX len(lit) +// - BX n +// - DX return value +// - DI &dst[i] +// - R10 &lit[0] +// +// The 24 bytes of stack space is to call runtime·memmove. +// +// The unusual register allocation of local variables, such as R10 for the +// source pointer, matches the allocation used at the call site in encodeBlock, +// which makes it easier to manually inline this function. +TEXT ·emitLiteral(SB), NOSPLIT, $24-56 + MOVQ dst_base+0(FP), DI + MOVQ lit_base+24(FP), R10 + MOVQ lit_len+32(FP), AX + MOVQ AX, DX + MOVL AX, BX + SUBL $1, BX + + CMPL BX, $60 + JLT oneByte + CMPL BX, $256 + JLT twoBytes + +threeBytes: + MOVB $0xf4, 0(DI) + MOVW BX, 1(DI) + ADDQ $3, DI + ADDQ $3, DX + JMP memmove + +twoBytes: + MOVB $0xf0, 0(DI) + MOVB BX, 1(DI) + ADDQ $2, DI + ADDQ $2, DX + JMP memmove + +oneByte: + SHLB $2, BX + MOVB BX, 0(DI) + ADDQ $1, DI + ADDQ $1, DX + +memmove: + MOVQ DX, ret+48(FP) + + // copy(dst[i:], lit) + // + // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push + // DI, R10 and AX as arguments. + MOVQ DI, 0(SP) + MOVQ R10, 8(SP) + MOVQ AX, 16(SP) + CALL runtime·memmove(SB) + RET + +// ---------------------------------------------------------------------------- + +// func emitCopy(dst []byte, offset, length int) int +// +// All local variables fit into registers. The register allocation: +// - AX length +// - SI &dst[0] +// - DI &dst[i] +// - R11 offset +// +// The unusual register allocation of local variables, such as R11 for the +// offset, matches the allocation used at the call site in encodeBlock, which +// makes it easier to manually inline this function. +TEXT ·emitCopy(SB), NOSPLIT, $0-48 + MOVQ dst_base+0(FP), DI + MOVQ DI, SI + MOVQ offset+24(FP), R11 + MOVQ length+32(FP), AX + +loop0: + // for length >= 68 { etc } + CMPL AX, $68 + JLT step1 + + // Emit a length 64 copy, encoded as 3 bytes. + MOVB $0xfe, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $64, AX + JMP loop0 + +step1: + // if length > 64 { etc } + CMPL AX, $64 + JLE step2 + + // Emit a length 60 copy, encoded as 3 bytes. + MOVB $0xee, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $60, AX + +step2: + // if length >= 12 || offset >= 2048 { goto step3 } + CMPL AX, $12 + JGE step3 + CMPL R11, $2048 + JGE step3 + + // Emit the remaining copy, encoded as 2 bytes. + MOVB R11, 1(DI) + SHRL $8, R11 + SHLB $5, R11 + SUBB $4, AX + SHLB $2, AX + ORB AX, R11 + ORB $1, R11 + MOVB R11, 0(DI) + ADDQ $2, DI + + // Return the number of bytes written. + SUBQ SI, DI + MOVQ DI, ret+40(FP) + RET + +step3: + // Emit the remaining copy, encoded as 3 bytes. + SUBL $1, AX + SHLB $2, AX + ORB $2, AX + MOVB AX, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + + // Return the number of bytes written. + SUBQ SI, DI + MOVQ DI, ret+40(FP) + RET + +// ---------------------------------------------------------------------------- + +// func extendMatch(src []byte, i, j int) int +// +// All local variables fit into registers. The register allocation: +// - DX &src[0] +// - SI &src[j] +// - R13 &src[len(src) - 8] +// - R14 &src[len(src)] +// - R15 &src[i] +// +// The unusual register allocation of local variables, such as R15 for a source +// pointer, matches the allocation used at the call site in encodeBlock, which +// makes it easier to manually inline this function. +TEXT ·extendMatch(SB), NOSPLIT, $0-48 + MOVQ src_base+0(FP), DX + MOVQ src_len+8(FP), R14 + MOVQ i+24(FP), R15 + MOVQ j+32(FP), SI + ADDQ DX, R14 + ADDQ DX, R15 + ADDQ DX, SI + MOVQ R14, R13 + SUBQ $8, R13 + +cmp8: + // As long as we are 8 or more bytes before the end of src, we can load and + // compare 8 bytes at a time. If those 8 bytes are equal, repeat. + CMPQ SI, R13 + JA cmp1 + MOVQ (R15), AX + MOVQ (SI), BX + CMPQ AX, BX + JNE bsf + ADDQ $8, R15 + ADDQ $8, SI + JMP cmp8 + +bsf: + // If those 8 bytes were not equal, XOR the two 8 byte values, and return + // the index of the first byte that differs. The BSF instruction finds the + // least significant 1 bit, the amd64 architecture is little-endian, and + // the shift by 3 converts a bit index to a byte index. + XORQ AX, BX + BSFQ BX, BX + SHRQ $3, BX + ADDQ BX, SI + + // Convert from &src[ret] to ret. + SUBQ DX, SI + MOVQ SI, ret+40(FP) + RET + +cmp1: + // In src's tail, compare 1 byte at a time. + CMPQ SI, R14 + JAE extendMatchEnd + MOVB (R15), AX + MOVB (SI), BX + CMPB AX, BX + JNE extendMatchEnd + ADDQ $1, R15 + ADDQ $1, SI + JMP cmp1 + +extendMatchEnd: + // Convert from &src[ret] to ret. + SUBQ DX, SI + MOVQ SI, ret+40(FP) + RET + +// ---------------------------------------------------------------------------- + +// func encodeBlock(dst, src []byte) (d int) +// +// All local variables fit into registers, other than "var table". The register +// allocation: +// - AX . . +// - BX . . +// - CX 56 shift (note that amd64 shifts by non-immediates must use CX). +// - DX 64 &src[0], tableSize +// - SI 72 &src[s] +// - DI 80 &dst[d] +// - R9 88 sLimit +// - R10 . &src[nextEmit] +// - R11 96 prevHash, currHash, nextHash, offset +// - R12 104 &src[base], skip +// - R13 . &src[nextS], &src[len(src) - 8] +// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x +// - R15 112 candidate +// +// The second column (56, 64, etc) is the stack offset to spill the registers +// when calling other functions. We could pack this slightly tighter, but it's +// simpler to have a dedicated spill map independent of the function called. +// +// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An +// extra 56 bytes, to call other functions, and an extra 64 bytes, to spill +// local variables (registers) during calls gives 32768 + 56 + 64 = 32888. +TEXT ·encodeBlock(SB), 0, $32888-56 + MOVQ dst_base+0(FP), DI + MOVQ src_base+24(FP), SI + MOVQ src_len+32(FP), R14 + + // shift, tableSize := uint32(32-8), 1<<8 + MOVQ $24, CX + MOVQ $256, DX + +calcShift: + // for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { + // shift-- + // } + CMPQ DX, $16384 + JGE varTable + CMPQ DX, R14 + JGE varTable + SUBQ $1, CX + SHLQ $1, DX + JMP calcShift + +varTable: + // var table [maxTableSize]uint16 + // + // In the asm code, unlike the Go code, we can zero-initialize only the + // first tableSize elements. Each uint16 element is 2 bytes and each MOVOU + // writes 16 bytes, so we can do only tableSize/8 writes instead of the + // 2048 writes that would zero-initialize all of table's 32768 bytes. + SHRQ $3, DX + LEAQ table-32768(SP), BX + PXOR X0, X0 + +memclr: + MOVOU X0, 0(BX) + ADDQ $16, BX + SUBQ $1, DX + JNZ memclr + + // !!! DX = &src[0] + MOVQ SI, DX + + // sLimit := len(src) - inputMargin + MOVQ R14, R9 + SUBQ $15, R9 + + // !!! Pre-emptively spill CX, DX and R9 to the stack. Their values don't + // change for the rest of the function. + MOVQ CX, 56(SP) + MOVQ DX, 64(SP) + MOVQ R9, 88(SP) + + // nextEmit := 0 + MOVQ DX, R10 + + // s := 1 + ADDQ $1, SI + + // nextHash := hash(load32(src, s), shift) + MOVL 0(SI), R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + +outer: + // for { etc } + + // skip := 32 + MOVQ $32, R12 + + // nextS := s + MOVQ SI, R13 + + // candidate := 0 + MOVQ $0, R15 + +inner0: + // for { etc } + + // s := nextS + MOVQ R13, SI + + // bytesBetweenHashLookups := skip >> 5 + MOVQ R12, R14 + SHRQ $5, R14 + + // nextS = s + bytesBetweenHashLookups + ADDQ R14, R13 + + // skip += bytesBetweenHashLookups + ADDQ R14, R12 + + // if nextS > sLimit { goto emitRemainder } + MOVQ R13, AX + SUBQ DX, AX + CMPQ AX, R9 + JA emitRemainder + + // candidate = int(table[nextHash]) + // XXX: MOVWQZX table-32768(SP)(R11*2), R15 + // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 + BYTE $0x4e + BYTE $0x0f + BYTE $0xb7 + BYTE $0x7c + BYTE $0x5c + BYTE $0x78 + + // table[nextHash] = uint16(s) + MOVQ SI, AX + SUBQ DX, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // nextHash = hash(load32(src, nextS), shift) + MOVL 0(R13), R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // if load32(src, s) != load32(src, candidate) { continue } break + MOVL 0(SI), AX + MOVL (DX)(R15*1), BX + CMPL AX, BX + JNE inner0 + +fourByteMatch: + // As per the encode_other.go code: + // + // A 4-byte match has been found. We'll later see etc. + + // !!! Jump to a fast path for short (<= 16 byte) literals. See the comment + // on inputMargin in encode.go. + MOVQ SI, AX + SUBQ R10, AX + CMPQ AX, $16 + JLE emitLiteralFastPath + + // ---------------------------------------- + // Begin inline of the emitLiteral call. + // + // d += emitLiteral(dst[d:], src[nextEmit:s]) + + MOVL AX, BX + SUBL $1, BX + + CMPL BX, $60 + JLT inlineEmitLiteralOneByte + CMPL BX, $256 + JLT inlineEmitLiteralTwoBytes + +inlineEmitLiteralThreeBytes: + MOVB $0xf4, 0(DI) + MOVW BX, 1(DI) + ADDQ $3, DI + JMP inlineEmitLiteralMemmove + +inlineEmitLiteralTwoBytes: + MOVB $0xf0, 0(DI) + MOVB BX, 1(DI) + ADDQ $2, DI + JMP inlineEmitLiteralMemmove + +inlineEmitLiteralOneByte: + SHLB $2, BX + MOVB BX, 0(DI) + ADDQ $1, DI + +inlineEmitLiteralMemmove: + // Spill local variables (registers) onto the stack; call; unspill. + // + // copy(dst[i:], lit) + // + // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push + // DI, R10 and AX as arguments. + MOVQ DI, 0(SP) + MOVQ R10, 8(SP) + MOVQ AX, 16(SP) + ADDQ AX, DI // Finish the "d +=" part of "d += emitLiteral(etc)". + MOVQ SI, 72(SP) + MOVQ DI, 80(SP) + MOVQ R15, 112(SP) + CALL runtime·memmove(SB) + MOVQ 56(SP), CX + MOVQ 64(SP), DX + MOVQ 72(SP), SI + MOVQ 80(SP), DI + MOVQ 88(SP), R9 + MOVQ 112(SP), R15 + JMP inner1 + +inlineEmitLiteralEnd: + // End inline of the emitLiteral call. + // ---------------------------------------- + +emitLiteralFastPath: + // !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2". + MOVB AX, BX + SUBB $1, BX + SHLB $2, BX + MOVB BX, (DI) + ADDQ $1, DI + + // !!! Implement the copy from lit to dst as a 16-byte load and store. + // (Encode's documentation says that dst and src must not overlap.) + // + // This always copies 16 bytes, instead of only len(lit) bytes, but that's + // OK. Subsequent iterations will fix up the overrun. + // + // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or + // 16-byte loads and stores. This technique probably wouldn't be as + // effective on architectures that are fussier about alignment. + MOVOU 0(R10), X0 + MOVOU X0, 0(DI) + ADDQ AX, DI + +inner1: + // for { etc } + + // base := s + MOVQ SI, R12 + + // !!! offset := base - candidate + MOVQ R12, R11 + SUBQ R15, R11 + SUBQ DX, R11 + + // ---------------------------------------- + // Begin inline of the extendMatch call. + // + // s = extendMatch(src, candidate+4, s+4) + + // !!! R14 = &src[len(src)] + MOVQ src_len+32(FP), R14 + ADDQ DX, R14 + + // !!! R13 = &src[len(src) - 8] + MOVQ R14, R13 + SUBQ $8, R13 + + // !!! R15 = &src[candidate + 4] + ADDQ $4, R15 + ADDQ DX, R15 + + // !!! s += 4 + ADDQ $4, SI + +inlineExtendMatchCmp8: + // As long as we are 8 or more bytes before the end of src, we can load and + // compare 8 bytes at a time. If those 8 bytes are equal, repeat. + CMPQ SI, R13 + JA inlineExtendMatchCmp1 + MOVQ (R15), AX + MOVQ (SI), BX + CMPQ AX, BX + JNE inlineExtendMatchBSF + ADDQ $8, R15 + ADDQ $8, SI + JMP inlineExtendMatchCmp8 + +inlineExtendMatchBSF: + // If those 8 bytes were not equal, XOR the two 8 byte values, and return + // the index of the first byte that differs. The BSF instruction finds the + // least significant 1 bit, the amd64 architecture is little-endian, and + // the shift by 3 converts a bit index to a byte index. + XORQ AX, BX + BSFQ BX, BX + SHRQ $3, BX + ADDQ BX, SI + JMP inlineExtendMatchEnd + +inlineExtendMatchCmp1: + // In src's tail, compare 1 byte at a time. + CMPQ SI, R14 + JAE inlineExtendMatchEnd + MOVB (R15), AX + MOVB (SI), BX + CMPB AX, BX + JNE inlineExtendMatchEnd + ADDQ $1, R15 + ADDQ $1, SI + JMP inlineExtendMatchCmp1 + +inlineExtendMatchEnd: + // End inline of the extendMatch call. + // ---------------------------------------- + + // ---------------------------------------- + // Begin inline of the emitCopy call. + // + // d += emitCopy(dst[d:], base-candidate, s-base) + + // !!! length := s - base + MOVQ SI, AX + SUBQ R12, AX + +inlineEmitCopyLoop0: + // for length >= 68 { etc } + CMPL AX, $68 + JLT inlineEmitCopyStep1 + + // Emit a length 64 copy, encoded as 3 bytes. + MOVB $0xfe, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $64, AX + JMP inlineEmitCopyLoop0 + +inlineEmitCopyStep1: + // if length > 64 { etc } + CMPL AX, $64 + JLE inlineEmitCopyStep2 + + // Emit a length 60 copy, encoded as 3 bytes. + MOVB $0xee, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $60, AX + +inlineEmitCopyStep2: + // if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 } + CMPL AX, $12 + JGE inlineEmitCopyStep3 + CMPL R11, $2048 + JGE inlineEmitCopyStep3 + + // Emit the remaining copy, encoded as 2 bytes. + MOVB R11, 1(DI) + SHRL $8, R11 + SHLB $5, R11 + SUBB $4, AX + SHLB $2, AX + ORB AX, R11 + ORB $1, R11 + MOVB R11, 0(DI) + ADDQ $2, DI + JMP inlineEmitCopyEnd + +inlineEmitCopyStep3: + // Emit the remaining copy, encoded as 3 bytes. + SUBL $1, AX + SHLB $2, AX + ORB $2, AX + MOVB AX, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + +inlineEmitCopyEnd: + // End inline of the emitCopy call. + // ---------------------------------------- + + // nextEmit = s + MOVQ SI, R10 + + // if s >= sLimit { goto emitRemainder } + MOVQ SI, AX + SUBQ DX, AX + CMPQ AX, R9 + JAE emitRemainder + + // As per the encode_other.go code: + // + // We could immediately etc. + + // x := load64(src, s-1) + MOVQ -1(SI), R14 + + // prevHash := hash(uint32(x>>0), shift) + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // table[prevHash] = uint16(s-1) + MOVQ SI, AX + SUBQ DX, AX + SUBQ $1, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // currHash := hash(uint32(x>>8), shift) + SHRQ $8, R14 + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // candidate = int(table[currHash]) + // XXX: MOVWQZX table-32768(SP)(R11*2), R15 + // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 + BYTE $0x4e + BYTE $0x0f + BYTE $0xb7 + BYTE $0x7c + BYTE $0x5c + BYTE $0x78 + + // table[currHash] = uint16(s) + ADDQ $1, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // if uint32(x>>8) == load32(src, candidate) { continue } + MOVL (DX)(R15*1), BX + CMPL R14, BX + JEQ inner1 + + // nextHash = hash(uint32(x>>16), shift) + SHRQ $8, R14 + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // s++ + ADDQ $1, SI + + // break out of the inner1 for loop, i.e. continue the outer loop. + JMP outer + +emitRemainder: + // if nextEmit < len(src) { etc } + MOVQ src_len+32(FP), AX + ADDQ DX, AX + CMPQ R10, AX + JEQ encodeBlockEnd + + // d += emitLiteral(dst[d:], src[nextEmit:]) + // + // Push args. + MOVQ DI, 0(SP) + MOVQ $0, 8(SP) // Unnecessary, as the callee ignores it, but conservative. + MOVQ $0, 16(SP) // Unnecessary, as the callee ignores it, but conservative. + MOVQ R10, 24(SP) + SUBQ R10, AX + MOVQ AX, 32(SP) + MOVQ AX, 40(SP) // Unnecessary, as the callee ignores it, but conservative. + + // Spill local variables (registers) onto the stack; call; unspill. + MOVQ DI, 80(SP) + CALL ·emitLiteral(SB) + MOVQ 80(SP), DI + + // Finish the "d +=" part of "d += emitLiteral(etc)". + ADDQ 48(SP), DI + +encodeBlockEnd: + MOVQ dst_base+0(FP), AX + SUBQ AX, DI + MOVQ DI, d+48(FP) + RET diff --git a/vendor/github.com/golang/snappy/encode_other.go b/vendor/github.com/golang/snappy/encode_other.go new file mode 100644 index 0000000..dbcae90 --- /dev/null +++ b/vendor/github.com/golang/snappy/encode_other.go @@ -0,0 +1,238 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 appengine !gc noasm + +package snappy + +func load32(b []byte, i int) uint32 { + b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func load64(b []byte, i int) uint64 { + b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +// emitLiteral writes a literal chunk and returns the number of bytes written. +// +// It assumes that: +// dst is long enough to hold the encoded bytes +// 1 <= len(lit) && len(lit) <= 65536 +func emitLiteral(dst, lit []byte) int { + i, n := 0, uint(len(lit)-1) + switch { + case n < 60: + dst[0] = uint8(n)<<2 | tagLiteral + i = 1 + case n < 1<<8: + dst[0] = 60<<2 | tagLiteral + dst[1] = uint8(n) + i = 2 + default: + dst[0] = 61<<2 | tagLiteral + dst[1] = uint8(n) + dst[2] = uint8(n >> 8) + i = 3 + } + return i + copy(dst[i:], lit) +} + +// emitCopy writes a copy chunk and returns the number of bytes written. +// +// It assumes that: +// dst is long enough to hold the encoded bytes +// 1 <= offset && offset <= 65535 +// 4 <= length && length <= 65535 +func emitCopy(dst []byte, offset, length int) int { + i := 0 + // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The + // threshold for this loop is a little higher (at 68 = 64 + 4), and the + // length emitted down below is is a little lower (at 60 = 64 - 4), because + // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed + // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as + // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as + // 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a + // tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an + // encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1. + for length >= 68 { + // Emit a length 64 copy, encoded as 3 bytes. + dst[i+0] = 63<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 64 + } + if length > 64 { + // Emit a length 60 copy, encoded as 3 bytes. + dst[i+0] = 59<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 60 + } + if length >= 12 || offset >= 2048 { + // Emit the remaining copy, encoded as 3 bytes. + dst[i+0] = uint8(length-1)<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + return i + 3 + } + // Emit the remaining copy, encoded as 2 bytes. + dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 + dst[i+1] = uint8(offset) + return i + 2 +} + +// extendMatch returns the largest k such that k <= len(src) and that +// src[i:i+k-j] and src[j:k] have the same contents. +// +// It assumes that: +// 0 <= i && i < j && j <= len(src) +func extendMatch(src []byte, i, j int) int { + for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { + } + return j +} + +func hash(u, shift uint32) uint32 { + return (u * 0x1e35a7bd) >> shift +} + +// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlock(dst, src []byte) (d int) { + // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. + // The table element type is uint16, as s < sLimit and sLimit < len(src) + // and len(src) <= maxBlockSize and maxBlockSize == 65536. + const ( + maxTableSize = 1 << 14 + // tableMask is redundant, but helps the compiler eliminate bounds + // checks. + tableMask = maxTableSize - 1 + ) + shift := uint32(32 - 8) + for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { + shift-- + } + // In Go, all array elements are zero-initialized, so there is no advantage + // to a smaller tableSize per se. However, it matches the C++ algorithm, + // and in the asm versions of this code, we can get away with zeroing only + // the first tableSize elements. + var table [maxTableSize]uint16 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + nextHash := hash(load32(src, s), shift) + + for { + // Copied from the C++ snappy implementation: + // + // Heuristic match skipping: If 32 bytes are scanned with no matches + // found, start looking only at every other byte. If 32 more bytes are + // scanned (or skipped), look at every third byte, etc.. When a match + // is found, immediately go back to looking at every byte. This is a + // small loss (~5% performance, ~0.1% density) for compressible data + // due to more bookkeeping, but for non-compressible data (such as + // JPEG) it's a huge win since the compressor quickly "realizes" the + // data is incompressible and doesn't bother looking for matches + // everywhere. + // + // The "skip" variable keeps track of how many bytes there are since + // the last match; dividing it by 32 (ie. right-shifting by five) gives + // the number of bytes to move ahead for each iteration. + skip := 32 + + nextS := s + candidate := 0 + for { + s = nextS + bytesBetweenHashLookups := skip >> 5 + nextS = s + bytesBetweenHashLookups + skip += bytesBetweenHashLookups + if nextS > sLimit { + goto emitRemainder + } + candidate = int(table[nextHash&tableMask]) + table[nextHash&tableMask] = uint16(s) + nextHash = hash(load32(src, nextS), shift) + if load32(src, s) == load32(src, candidate) { + break + } + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + d += emitLiteral(dst[d:], src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + + // Extend the 4-byte match as long as possible. + // + // This is an inlined version of: + // s = extendMatch(src, candidate+4, s+4) + s += 4 + for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 { + } + + d += emitCopy(dst[d:], base-candidate, s-base) + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load64(src, s-1) + prevHash := hash(uint32(x>>0), shift) + table[prevHash&tableMask] = uint16(s - 1) + currHash := hash(uint32(x>>8), shift) + candidate = int(table[currHash&tableMask]) + table[currHash&tableMask] = uint16(s) + if uint32(x>>8) != load32(src, candidate) { + nextHash = hash(uint32(x>>16), shift) + s++ + break + } + } + } + +emitRemainder: + if nextEmit < len(src) { + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} diff --git a/vendor/github.com/golang/snappy/go.mod b/vendor/github.com/golang/snappy/go.mod new file mode 100644 index 0000000..f6406bb --- /dev/null +++ b/vendor/github.com/golang/snappy/go.mod @@ -0,0 +1 @@ +module github.com/golang/snappy diff --git a/vendor/github.com/golang/snappy/golden_test.go b/vendor/github.com/golang/snappy/golden_test.go new file mode 100644 index 0000000..e4496f9 --- /dev/null +++ b/vendor/github.com/golang/snappy/golden_test.go @@ -0,0 +1,1965 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +// extendMatchGoldenTestCases is the i and j arguments, and the returned value, +// for every extendMatch call issued when encoding the +// testdata/Mark.Twain-Tom.Sawyer.txt file. It is used to benchmark the +// extendMatch implementation. +// +// It was generated manually by adding some print statements to the (pure Go) +// extendMatch implementation: +// +// func extendMatch(src []byte, i, j int) int { +// i0, j0 := i, j +// for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { +// } +// println("{", i0, ",", j0, ",", j, "},") +// return j +// } +// +// and running "go test -test.run=EncodeGoldenInput -tags=noasm". +var extendMatchGoldenTestCases = []struct { + i, j, want int +}{ + {11, 61, 62}, + {80, 81, 82}, + {86, 87, 101}, + {85, 133, 149}, + {152, 153, 162}, + {133, 168, 193}, + {168, 207, 225}, + {81, 255, 275}, + {278, 279, 283}, + {306, 417, 417}, + {373, 428, 430}, + {389, 444, 447}, + {474, 510, 512}, + {465, 533, 533}, + {47, 547, 547}, + {307, 551, 554}, + {420, 582, 587}, + {309, 604, 604}, + {604, 625, 625}, + {538, 629, 629}, + {328, 640, 640}, + {573, 645, 645}, + {319, 657, 657}, + {30, 664, 664}, + {45, 679, 680}, + {621, 684, 684}, + {376, 700, 700}, + {33, 707, 708}, + {601, 733, 733}, + {334, 744, 745}, + {625, 758, 759}, + {382, 763, 763}, + {550, 769, 771}, + {533, 789, 789}, + {804, 813, 813}, + {342, 841, 842}, + {742, 847, 847}, + {74, 852, 852}, + {810, 864, 864}, + {758, 868, 869}, + {714, 883, 883}, + {582, 889, 891}, + {61, 934, 935}, + {894, 942, 942}, + {939, 949, 949}, + {785, 956, 957}, + {886, 978, 978}, + {792, 998, 998}, + {998, 1005, 1005}, + {572, 1032, 1032}, + {698, 1051, 1053}, + {599, 1067, 1069}, + {1056, 1079, 1079}, + {942, 1089, 1090}, + {831, 1094, 1096}, + {1088, 1100, 1103}, + {732, 1113, 1114}, + {1037, 1118, 1118}, + {872, 1128, 1130}, + {1079, 1140, 1142}, + {332, 1162, 1162}, + {207, 1168, 1186}, + {1189, 1190, 1225}, + {105, 1229, 1230}, + {79, 1256, 1257}, + {1190, 1261, 1283}, + {255, 1306, 1306}, + {1319, 1339, 1358}, + {364, 1370, 1370}, + {955, 1378, 1380}, + {122, 1403, 1403}, + {1325, 1407, 1419}, + {664, 1423, 1424}, + {941, 1461, 1463}, + {867, 1477, 1478}, + {757, 1488, 1489}, + {1140, 1499, 1499}, + {31, 1506, 1506}, + {1487, 1510, 1512}, + {1089, 1520, 1521}, + {1467, 1525, 1529}, + {1394, 1537, 1537}, + {1499, 1541, 1541}, + {367, 1558, 1558}, + {1475, 1564, 1564}, + {1525, 1568, 1571}, + {1541, 1582, 1583}, + {864, 1587, 1588}, + {704, 1597, 1597}, + {336, 1602, 1602}, + {1383, 1613, 1613}, + {1498, 1617, 1618}, + {1051, 1623, 1625}, + {401, 1643, 1645}, + {1072, 1654, 1655}, + {1067, 1667, 1669}, + {699, 1673, 1674}, + {1587, 1683, 1684}, + {920, 1696, 1696}, + {1505, 1710, 1710}, + {1550, 1723, 1723}, + {996, 1727, 1727}, + {833, 1733, 1734}, + {1638, 1739, 1740}, + {1654, 1744, 1744}, + {753, 1761, 1761}, + {1548, 1773, 1773}, + {1568, 1777, 1780}, + {1683, 1793, 1794}, + {948, 1801, 1801}, + {1666, 1805, 1808}, + {1502, 1814, 1814}, + {1696, 1822, 1822}, + {502, 1836, 1837}, + {917, 1843, 1843}, + {1733, 1854, 1855}, + {970, 1859, 1859}, + {310, 1863, 1863}, + {657, 1872, 1872}, + {1005, 1876, 1876}, + {1662, 1880, 1880}, + {904, 1892, 1892}, + {1427, 1910, 1910}, + {1772, 1929, 1930}, + {1822, 1937, 1940}, + {1858, 1949, 1950}, + {1602, 1956, 1956}, + {1150, 1962, 1962}, + {1504, 1966, 1967}, + {51, 1971, 1971}, + {1605, 1979, 1979}, + {1458, 1983, 1988}, + {1536, 2001, 2006}, + {1373, 2014, 2018}, + {1494, 2025, 2025}, + {1667, 2029, 2031}, + {1592, 2035, 2035}, + {330, 2045, 2045}, + {1376, 2053, 2053}, + {1991, 2058, 2059}, + {1635, 2065, 2065}, + {1992, 2073, 2074}, + {2014, 2080, 2081}, + {1546, 2085, 2087}, + {59, 2099, 2099}, + {1996, 2106, 2106}, + {1836, 2110, 2110}, + {2068, 2114, 2114}, + {1338, 2122, 2122}, + {1562, 2128, 2130}, + {1934, 2134, 2134}, + {2114, 2141, 2142}, + {977, 2149, 2150}, + {956, 2154, 2155}, + {1407, 2162, 2162}, + {1773, 2166, 2166}, + {883, 2171, 2171}, + {623, 2175, 2178}, + {1520, 2191, 2192}, + {1162, 2200, 2200}, + {912, 2204, 2204}, + {733, 2208, 2208}, + {1777, 2212, 2215}, + {1532, 2219, 2219}, + {718, 2223, 2225}, + {2069, 2229, 2229}, + {2207, 2245, 2246}, + {1139, 2264, 2264}, + {677, 2274, 2274}, + {2099, 2279, 2279}, + {1863, 2283, 2283}, + {1966, 2305, 2306}, + {2279, 2313, 2313}, + {1628, 2319, 2319}, + {755, 2329, 2329}, + {1461, 2334, 2334}, + {2117, 2340, 2340}, + {2313, 2349, 2349}, + {1859, 2353, 2353}, + {1048, 2362, 2362}, + {895, 2366, 2366}, + {2278, 2373, 2373}, + {1884, 2377, 2377}, + {1402, 2387, 2392}, + {700, 2398, 2398}, + {1971, 2402, 2402}, + {2009, 2419, 2419}, + {1441, 2426, 2428}, + {2208, 2432, 2432}, + {2038, 2436, 2436}, + {932, 2443, 2443}, + {1759, 2447, 2448}, + {744, 2452, 2452}, + {1875, 2458, 2458}, + {2405, 2468, 2468}, + {1596, 2472, 2473}, + {1953, 2480, 2482}, + {736, 2487, 2487}, + {1913, 2493, 2493}, + {774, 2497, 2497}, + {1484, 2506, 2508}, + {2432, 2512, 2512}, + {752, 2519, 2519}, + {2497, 2523, 2523}, + {2409, 2528, 2529}, + {2122, 2533, 2533}, + {2396, 2537, 2538}, + {2410, 2547, 2548}, + {1093, 2555, 2560}, + {551, 2564, 2565}, + {2268, 2569, 2569}, + {1362, 2580, 2580}, + {1916, 2584, 2585}, + {994, 2589, 2590}, + {1979, 2596, 2596}, + {1041, 2602, 2602}, + {2104, 2614, 2616}, + {2609, 2621, 2628}, + {2329, 2638, 2638}, + {2211, 2657, 2658}, + {2638, 2662, 2667}, + {2578, 2676, 2679}, + {2153, 2685, 2686}, + {2608, 2696, 2697}, + {598, 2712, 2712}, + {2620, 2719, 2720}, + {1888, 2724, 2728}, + {2709, 2732, 2732}, + {1365, 2739, 2739}, + {784, 2747, 2748}, + {424, 2753, 2753}, + {2204, 2759, 2759}, + {812, 2768, 2769}, + {2455, 2773, 2773}, + {1722, 2781, 2781}, + {1917, 2792, 2792}, + {2705, 2799, 2799}, + {2685, 2806, 2807}, + {2742, 2811, 2811}, + {1370, 2818, 2818}, + {2641, 2830, 2830}, + {2512, 2837, 2837}, + {2457, 2841, 2841}, + {2756, 2845, 2845}, + {2719, 2855, 2855}, + {1423, 2859, 2859}, + {2849, 2863, 2865}, + {1474, 2871, 2871}, + {1161, 2875, 2876}, + {2282, 2880, 2881}, + {2746, 2888, 2888}, + {1783, 2893, 2893}, + {2401, 2899, 2900}, + {2632, 2920, 2923}, + {2422, 2928, 2930}, + {2715, 2939, 2939}, + {2162, 2943, 2943}, + {2859, 2947, 2947}, + {1910, 2951, 2951}, + {1431, 2955, 2956}, + {1439, 2964, 2964}, + {2501, 2968, 2969}, + {2029, 2973, 2976}, + {689, 2983, 2984}, + {1658, 2988, 2988}, + {1031, 2996, 2996}, + {2149, 3001, 3002}, + {25, 3009, 3013}, + {2964, 3023, 3023}, + {953, 3027, 3028}, + {2359, 3036, 3036}, + {3023, 3049, 3049}, + {2880, 3055, 3056}, + {2973, 3076, 3077}, + {2874, 3090, 3090}, + {2871, 3094, 3094}, + {2532, 3100, 3100}, + {2938, 3107, 3108}, + {350, 3115, 3115}, + {2196, 3119, 3121}, + {1133, 3127, 3129}, + {1797, 3134, 3150}, + {3032, 3158, 3158}, + {3016, 3172, 3172}, + {2533, 3179, 3179}, + {3055, 3187, 3188}, + {1384, 3192, 3193}, + {2799, 3199, 3199}, + {2126, 3203, 3207}, + {2334, 3215, 3215}, + {2105, 3220, 3221}, + {3199, 3229, 3229}, + {2891, 3233, 3233}, + {855, 3240, 3240}, + {1852, 3253, 3256}, + {2140, 3263, 3263}, + {1682, 3268, 3270}, + {3243, 3274, 3274}, + {924, 3279, 3279}, + {2212, 3283, 3283}, + {2596, 3287, 3287}, + {2999, 3291, 3291}, + {2353, 3295, 3295}, + {2480, 3302, 3304}, + {1959, 3308, 3311}, + {3000, 3318, 3318}, + {845, 3330, 3330}, + {2283, 3334, 3334}, + {2519, 3342, 3342}, + {3325, 3346, 3348}, + {2397, 3353, 3354}, + {2763, 3358, 3358}, + {3198, 3363, 3364}, + {3211, 3368, 3372}, + {2950, 3376, 3377}, + {3245, 3388, 3391}, + {2264, 3398, 3398}, + {795, 3403, 3403}, + {3287, 3407, 3407}, + {3358, 3411, 3411}, + {3317, 3415, 3415}, + {3232, 3431, 3431}, + {2128, 3435, 3437}, + {3236, 3441, 3441}, + {3398, 3445, 3446}, + {2814, 3450, 3450}, + {3394, 3466, 3466}, + {2425, 3470, 3470}, + {3330, 3476, 3476}, + {1612, 3480, 3480}, + {1004, 3485, 3486}, + {2732, 3490, 3490}, + {1117, 3494, 3495}, + {629, 3501, 3501}, + {3087, 3514, 3514}, + {684, 3518, 3518}, + {3489, 3522, 3524}, + {1760, 3529, 3529}, + {617, 3537, 3537}, + {3431, 3541, 3541}, + {997, 3547, 3547}, + {882, 3552, 3553}, + {2419, 3558, 3558}, + {610, 3562, 3563}, + {1903, 3567, 3569}, + {3005, 3575, 3575}, + {3076, 3585, 3586}, + {3541, 3590, 3590}, + {3490, 3594, 3594}, + {1899, 3599, 3599}, + {3545, 3606, 3606}, + {3290, 3614, 3615}, + {2056, 3619, 3620}, + {3556, 3625, 3625}, + {3294, 3632, 3633}, + {637, 3643, 3644}, + {3609, 3648, 3650}, + {3175, 3658, 3658}, + {3498, 3665, 3665}, + {1597, 3669, 3669}, + {1983, 3673, 3673}, + {3215, 3682, 3682}, + {3544, 3689, 3689}, + {3694, 3698, 3698}, + {3228, 3715, 3716}, + {2594, 3720, 3722}, + {3573, 3726, 3726}, + {2479, 3732, 3735}, + {3191, 3741, 3742}, + {1113, 3746, 3747}, + {2844, 3751, 3751}, + {3445, 3756, 3757}, + {3755, 3766, 3766}, + {3421, 3775, 3780}, + {3593, 3784, 3786}, + {3263, 3796, 3796}, + {3469, 3806, 3806}, + {2602, 3815, 3815}, + {723, 3819, 3821}, + {1608, 3826, 3826}, + {3334, 3830, 3830}, + {2198, 3835, 3835}, + {2635, 3840, 3840}, + {3702, 3852, 3853}, + {3406, 3858, 3859}, + {3681, 3867, 3870}, + {3407, 3880, 3880}, + {340, 3889, 3889}, + {3772, 3893, 3893}, + {593, 3897, 3897}, + {2563, 3914, 3916}, + {2981, 3929, 3929}, + {1835, 3933, 3934}, + {3906, 3951, 3951}, + {1459, 3958, 3958}, + {3889, 3974, 3974}, + {2188, 3982, 3982}, + {3220, 3986, 3987}, + {3585, 3991, 3993}, + {3712, 3997, 4001}, + {2805, 4007, 4007}, + {1879, 4012, 4013}, + {3618, 4018, 4018}, + {1145, 4031, 4032}, + {3901, 4037, 4037}, + {2772, 4046, 4047}, + {2802, 4053, 4054}, + {3299, 4058, 4058}, + {3725, 4066, 4066}, + {2271, 4070, 4070}, + {385, 4075, 4076}, + {3624, 4089, 4090}, + {3745, 4096, 4098}, + {1563, 4102, 4102}, + {4045, 4106, 4111}, + {3696, 4115, 4119}, + {3376, 4125, 4126}, + {1880, 4130, 4130}, + {2048, 4140, 4141}, + {2724, 4149, 4149}, + {1767, 4156, 4156}, + {2601, 4164, 4164}, + {2757, 4168, 4168}, + {3974, 4172, 4172}, + {3914, 4178, 4178}, + {516, 4185, 4185}, + {1032, 4189, 4190}, + {3462, 4197, 4198}, + {3805, 4202, 4203}, + {3910, 4207, 4212}, + {3075, 4221, 4221}, + {3756, 4225, 4226}, + {1872, 4236, 4237}, + {3844, 4241, 4241}, + {3991, 4245, 4249}, + {2203, 4258, 4258}, + {3903, 4267, 4268}, + {705, 4272, 4272}, + {1896, 4276, 4276}, + {1955, 4285, 4288}, + {3746, 4302, 4303}, + {2672, 4311, 4311}, + {3969, 4317, 4317}, + {3883, 4322, 4322}, + {1920, 4339, 4340}, + {3527, 4344, 4346}, + {1160, 4358, 4358}, + {3648, 4364, 4366}, + {2711, 4387, 4387}, + {3619, 4391, 4392}, + {1944, 4396, 4396}, + {4369, 4400, 4400}, + {2736, 4404, 4407}, + {2546, 4411, 4412}, + {4390, 4422, 4422}, + {3610, 4426, 4427}, + {4058, 4431, 4431}, + {4374, 4435, 4435}, + {3463, 4445, 4446}, + {1813, 4452, 4452}, + {3669, 4456, 4456}, + {3830, 4460, 4460}, + {421, 4464, 4465}, + {1719, 4471, 4471}, + {3880, 4475, 4475}, + {1834, 4485, 4487}, + {3590, 4491, 4491}, + {442, 4496, 4497}, + {4435, 4501, 4501}, + {3814, 4509, 4509}, + {987, 4513, 4513}, + {4494, 4518, 4521}, + {3218, 4526, 4529}, + {4221, 4537, 4537}, + {2778, 4543, 4545}, + {4422, 4552, 4552}, + {4031, 4558, 4559}, + {4178, 4563, 4563}, + {3726, 4567, 4574}, + {4027, 4578, 4578}, + {4339, 4585, 4587}, + {3796, 4592, 4595}, + {543, 4600, 4613}, + {2855, 4620, 4621}, + {2795, 4627, 4627}, + {3440, 4631, 4632}, + {4279, 4636, 4639}, + {4245, 4643, 4645}, + {4516, 4649, 4650}, + {3133, 4654, 4654}, + {4042, 4658, 4659}, + {3422, 4663, 4663}, + {4046, 4667, 4668}, + {4267, 4672, 4672}, + {4004, 4676, 4677}, + {2490, 4682, 4682}, + {2451, 4697, 4697}, + {3027, 4705, 4705}, + {4028, 4717, 4717}, + {4460, 4721, 4721}, + {2471, 4725, 4727}, + {3090, 4735, 4735}, + {3192, 4739, 4740}, + {3835, 4760, 4760}, + {4540, 4764, 4764}, + {4007, 4772, 4774}, + {619, 4784, 4784}, + {3561, 4789, 4791}, + {3367, 4805, 4805}, + {4490, 4810, 4811}, + {2402, 4815, 4815}, + {3352, 4819, 4822}, + {2773, 4828, 4828}, + {4552, 4832, 4832}, + {2522, 4840, 4841}, + {316, 4847, 4852}, + {4715, 4858, 4858}, + {2959, 4862, 4862}, + {4858, 4868, 4869}, + {2134, 4873, 4873}, + {578, 4878, 4878}, + {4189, 4889, 4890}, + {2229, 4894, 4894}, + {4501, 4898, 4898}, + {2297, 4903, 4903}, + {2933, 4909, 4909}, + {3008, 4913, 4913}, + {3153, 4917, 4917}, + {4819, 4921, 4921}, + {4921, 4932, 4933}, + {4920, 4944, 4945}, + {4814, 4954, 4955}, + {576, 4966, 4966}, + {1854, 4970, 4971}, + {1374, 4975, 4976}, + {3307, 4980, 4980}, + {974, 4984, 4988}, + {4721, 4992, 4992}, + {4898, 4996, 4996}, + {4475, 5006, 5006}, + {3819, 5012, 5012}, + {1948, 5019, 5021}, + {4954, 5027, 5029}, + {3740, 5038, 5040}, + {4763, 5044, 5045}, + {1936, 5051, 5051}, + {4844, 5055, 5060}, + {4215, 5069, 5072}, + {1146, 5076, 5076}, + {3845, 5082, 5082}, + {4865, 5090, 5090}, + {4624, 5094, 5094}, + {4815, 5098, 5098}, + {5006, 5105, 5105}, + {4980, 5109, 5109}, + {4795, 5113, 5115}, + {5043, 5119, 5121}, + {4782, 5129, 5129}, + {3826, 5139, 5139}, + {3876, 5156, 5156}, + {3111, 5167, 5171}, + {1470, 5177, 5177}, + {4431, 5181, 5181}, + {546, 5189, 5189}, + {4225, 5193, 5193}, + {1672, 5199, 5201}, + {4207, 5205, 5209}, + {4220, 5216, 5217}, + {4658, 5224, 5225}, + {3295, 5235, 5235}, + {2436, 5239, 5239}, + {2349, 5246, 5246}, + {2175, 5250, 5250}, + {5180, 5257, 5258}, + {3161, 5263, 5263}, + {5105, 5272, 5272}, + {3552, 5282, 5282}, + {4944, 5299, 5300}, + {4130, 5312, 5313}, + {902, 5323, 5323}, + {913, 5327, 5327}, + {2987, 5333, 5334}, + {5150, 5344, 5344}, + {5249, 5348, 5348}, + {1965, 5358, 5359}, + {5330, 5364, 5364}, + {2012, 5373, 5377}, + {712, 5384, 5386}, + {5235, 5390, 5390}, + {5044, 5398, 5399}, + {564, 5406, 5406}, + {39, 5410, 5410}, + {4642, 5422, 5425}, + {4421, 5437, 5438}, + {2347, 5449, 5449}, + {5333, 5453, 5454}, + {4136, 5458, 5459}, + {3793, 5468, 5468}, + {2243, 5480, 5480}, + {4889, 5492, 5493}, + {4295, 5504, 5504}, + {2785, 5511, 5511}, + {2377, 5518, 5518}, + {3662, 5525, 5525}, + {5097, 5529, 5530}, + {4781, 5537, 5538}, + {4697, 5547, 5548}, + {436, 5552, 5553}, + {5542, 5558, 5558}, + {3692, 5562, 5562}, + {2696, 5568, 5569}, + {4620, 5578, 5578}, + {2898, 5590, 5590}, + {5557, 5596, 5618}, + {2797, 5623, 5625}, + {2792, 5629, 5629}, + {5243, 5633, 5633}, + {5348, 5637, 5637}, + {5547, 5643, 5643}, + {4296, 5654, 5655}, + {5568, 5662, 5662}, + {3001, 5670, 5671}, + {3794, 5679, 5679}, + {4006, 5685, 5686}, + {4969, 5690, 5692}, + {687, 5704, 5704}, + {4563, 5708, 5708}, + {1723, 5738, 5738}, + {649, 5742, 5742}, + {5163, 5748, 5755}, + {3907, 5759, 5759}, + {3074, 5764, 5764}, + {5326, 5771, 5771}, + {2951, 5776, 5776}, + {5181, 5780, 5780}, + {2614, 5785, 5788}, + {4709, 5794, 5794}, + {2784, 5799, 5799}, + {5518, 5803, 5803}, + {4155, 5812, 5815}, + {921, 5819, 5819}, + {5224, 5823, 5824}, + {2853, 5830, 5836}, + {5776, 5840, 5840}, + {2955, 5844, 5845}, + {5745, 5853, 5853}, + {3291, 5857, 5857}, + {2988, 5861, 5861}, + {2647, 5865, 5865}, + {5398, 5869, 5870}, + {1085, 5874, 5875}, + {4906, 5881, 5881}, + {802, 5886, 5886}, + {5119, 5890, 5893}, + {5802, 5899, 5900}, + {3415, 5904, 5904}, + {5629, 5908, 5908}, + {3714, 5912, 5914}, + {5558, 5921, 5921}, + {2710, 5927, 5928}, + {1094, 5932, 5934}, + {2653, 5940, 5941}, + {4735, 5954, 5954}, + {5861, 5958, 5958}, + {1040, 5971, 5971}, + {5514, 5977, 5977}, + {5048, 5981, 5982}, + {5953, 5992, 5993}, + {3751, 5997, 5997}, + {4991, 6001, 6002}, + {5885, 6006, 6007}, + {5529, 6011, 6012}, + {4974, 6019, 6020}, + {5857, 6024, 6024}, + {3483, 6032, 6032}, + {3594, 6036, 6036}, + {1997, 6040, 6040}, + {5997, 6044, 6047}, + {5197, 6051, 6051}, + {1764, 6055, 6055}, + {6050, 6059, 6059}, + {5239, 6063, 6063}, + {5049, 6067, 6067}, + {5957, 6073, 6074}, + {1022, 6078, 6078}, + {3414, 6083, 6084}, + {3809, 6090, 6090}, + {4562, 6095, 6096}, + {5878, 6104, 6104}, + {594, 6108, 6109}, + {3353, 6115, 6116}, + {4992, 6120, 6121}, + {2424, 6125, 6125}, + {4484, 6130, 6130}, + {3900, 6134, 6135}, + {5793, 6139, 6141}, + {3562, 6145, 6145}, + {1438, 6152, 6153}, + {6058, 6157, 6158}, + {4411, 6162, 6163}, + {4590, 6167, 6171}, + {4748, 6175, 6175}, + {5517, 6183, 6184}, + {6095, 6191, 6192}, + {1471, 6203, 6203}, + {2643, 6209, 6210}, + {450, 6220, 6220}, + {5266, 6226, 6226}, + {2576, 6233, 6233}, + {2607, 6239, 6240}, + {5164, 6244, 6251}, + {6054, 6255, 6255}, + {1789, 6260, 6261}, + {5250, 6265, 6265}, + {6062, 6273, 6278}, + {5990, 6282, 6282}, + {3283, 6286, 6286}, + {5436, 6290, 6290}, + {6059, 6294, 6294}, + {5668, 6298, 6300}, + {3072, 6324, 6329}, + {3132, 6338, 6339}, + {3246, 6343, 6344}, + {28, 6348, 6349}, + {1503, 6353, 6355}, + {6067, 6359, 6359}, + {3384, 6364, 6364}, + {545, 6375, 6376}, + {5803, 6380, 6380}, + {5522, 6384, 6385}, + {5908, 6389, 6389}, + {2796, 6393, 6396}, + {4831, 6403, 6404}, + {6388, 6412, 6412}, + {6005, 6417, 6420}, + {4450, 6430, 6430}, + {4050, 6435, 6435}, + {5372, 6441, 6441}, + {4378, 6447, 6447}, + {6199, 6452, 6452}, + {3026, 6456, 6456}, + {2642, 6460, 6462}, + {6392, 6470, 6470}, + {6459, 6474, 6474}, + {2829, 6487, 6488}, + {2942, 6499, 6504}, + {5069, 6508, 6511}, + {5341, 6515, 6516}, + {5853, 6521, 6525}, + {6104, 6531, 6531}, + {5759, 6535, 6538}, + {4672, 6542, 6543}, + {2443, 6550, 6550}, + {5109, 6554, 6554}, + {6494, 6558, 6560}, + {6006, 6570, 6572}, + {6424, 6576, 6580}, + {4693, 6591, 6592}, + {6439, 6596, 6597}, + {3179, 6601, 6601}, + {5299, 6606, 6607}, + {4148, 6612, 6613}, + {3774, 6617, 6617}, + {3537, 6623, 6624}, + {4975, 6628, 6629}, + {3848, 6636, 6636}, + {856, 6640, 6640}, + {5724, 6645, 6645}, + {6632, 6651, 6651}, + {4630, 6656, 6658}, + {1440, 6662, 6662}, + {4281, 6666, 6667}, + {4302, 6671, 6672}, + {2589, 6676, 6677}, + {5647, 6681, 6687}, + {6082, 6691, 6693}, + {6144, 6698, 6698}, + {6103, 6709, 6710}, + {3710, 6714, 6714}, + {4253, 6718, 6721}, + {2467, 6730, 6730}, + {4778, 6734, 6734}, + {6528, 6738, 6738}, + {4358, 6747, 6747}, + {5889, 6753, 6753}, + {5193, 6757, 6757}, + {5797, 6761, 6761}, + {3858, 6765, 6766}, + {5951, 6776, 6776}, + {6487, 6781, 6782}, + {3282, 6786, 6787}, + {4667, 6797, 6799}, + {1927, 6803, 6806}, + {6583, 6810, 6810}, + {4937, 6814, 6814}, + {6099, 6824, 6824}, + {4415, 6835, 6836}, + {6332, 6840, 6841}, + {5160, 6850, 6850}, + {4764, 6854, 6854}, + {6814, 6858, 6859}, + {3018, 6864, 6864}, + {6293, 6868, 6869}, + {6359, 6877, 6877}, + {3047, 6884, 6886}, + {5262, 6890, 6891}, + {5471, 6900, 6900}, + {3268, 6910, 6912}, + {1047, 6916, 6916}, + {5904, 6923, 6923}, + {5798, 6933, 6938}, + {4149, 6942, 6942}, + {1821, 6946, 6946}, + {3599, 6952, 6952}, + {6470, 6957, 6957}, + {5562, 6961, 6961}, + {6268, 6965, 6967}, + {6389, 6971, 6971}, + {6596, 6975, 6976}, + {6553, 6980, 6981}, + {6576, 6985, 6989}, + {1375, 6993, 6993}, + {652, 6998, 6998}, + {4876, 7002, 7003}, + {5768, 7011, 7013}, + {3973, 7017, 7017}, + {6802, 7025, 7025}, + {6955, 7034, 7036}, + {6974, 7040, 7040}, + {5944, 7044, 7044}, + {6992, 7048, 7054}, + {6872, 7059, 7059}, + {2943, 7063, 7063}, + {6923, 7067, 7067}, + {5094, 7071, 7071}, + {4873, 7075, 7075}, + {5819, 7079, 7079}, + {5945, 7085, 7085}, + {1540, 7090, 7091}, + {2090, 7095, 7095}, + {5024, 7104, 7105}, + {6900, 7109, 7109}, + {6024, 7113, 7114}, + {6000, 7118, 7120}, + {2187, 7124, 7125}, + {6760, 7129, 7130}, + {5898, 7134, 7136}, + {7032, 7144, 7144}, + {4271, 7148, 7148}, + {3706, 7152, 7152}, + {6970, 7156, 7157}, + {7088, 7161, 7163}, + {2718, 7168, 7169}, + {5674, 7175, 7175}, + {4631, 7182, 7182}, + {7070, 7188, 7189}, + {6220, 7196, 7196}, + {3458, 7201, 7202}, + {2041, 7211, 7212}, + {1454, 7216, 7216}, + {5199, 7225, 7227}, + {3529, 7234, 7234}, + {6890, 7238, 7238}, + {3815, 7242, 7243}, + {5490, 7250, 7253}, + {6554, 7257, 7263}, + {5890, 7267, 7269}, + {6877, 7273, 7273}, + {4877, 7277, 7277}, + {2502, 7285, 7285}, + {1483, 7289, 7295}, + {7210, 7304, 7308}, + {6845, 7313, 7316}, + {7219, 7320, 7320}, + {7001, 7325, 7329}, + {6853, 7333, 7334}, + {6120, 7338, 7338}, + {6606, 7342, 7343}, + {7020, 7348, 7350}, + {3509, 7354, 7354}, + {7133, 7359, 7363}, + {3434, 7371, 7374}, + {2787, 7384, 7384}, + {7044, 7388, 7388}, + {6960, 7394, 7395}, + {6676, 7399, 7400}, + {7161, 7404, 7404}, + {7285, 7417, 7418}, + {4558, 7425, 7426}, + {4828, 7430, 7430}, + {6063, 7436, 7436}, + {3597, 7442, 7442}, + {914, 7446, 7446}, + {7320, 7452, 7454}, + {7267, 7458, 7460}, + {5076, 7464, 7464}, + {7430, 7468, 7469}, + {6273, 7473, 7474}, + {7440, 7478, 7487}, + {7348, 7491, 7494}, + {1021, 7510, 7510}, + {7473, 7515, 7515}, + {2823, 7519, 7519}, + {6264, 7527, 7527}, + {7302, 7531, 7531}, + {7089, 7535, 7535}, + {7342, 7540, 7541}, + {3688, 7547, 7551}, + {3054, 7558, 7560}, + {4177, 7566, 7567}, + {6691, 7574, 7575}, + {7156, 7585, 7586}, + {7147, 7590, 7592}, + {7407, 7598, 7598}, + {7403, 7602, 7603}, + {6868, 7607, 7607}, + {6636, 7611, 7611}, + {4805, 7617, 7617}, + {5779, 7623, 7623}, + {7063, 7627, 7627}, + {5079, 7632, 7632}, + {7377, 7637, 7637}, + {7337, 7641, 7642}, + {6738, 7655, 7655}, + {7338, 7659, 7659}, + {6541, 7669, 7671}, + {595, 7675, 7675}, + {7658, 7679, 7680}, + {7647, 7685, 7686}, + {2477, 7690, 7690}, + {5823, 7694, 7694}, + {4156, 7699, 7699}, + {5931, 7703, 7706}, + {6854, 7712, 7712}, + {4931, 7718, 7718}, + {6979, 7722, 7722}, + {5085, 7727, 7727}, + {6965, 7732, 7732}, + {7201, 7736, 7737}, + {3639, 7741, 7743}, + {7534, 7749, 7749}, + {4292, 7753, 7753}, + {3427, 7759, 7763}, + {7273, 7767, 7767}, + {940, 7778, 7778}, + {4838, 7782, 7785}, + {4216, 7790, 7792}, + {922, 7800, 7801}, + {7256, 7810, 7811}, + {7789, 7815, 7819}, + {7225, 7823, 7825}, + {7531, 7829, 7829}, + {6997, 7833, 7833}, + {7757, 7837, 7838}, + {4129, 7842, 7842}, + {7333, 7848, 7849}, + {6776, 7855, 7855}, + {7527, 7859, 7859}, + {4370, 7863, 7863}, + {4512, 7868, 7868}, + {5679, 7880, 7880}, + {3162, 7884, 7885}, + {3933, 7892, 7894}, + {7804, 7899, 7902}, + {6363, 7906, 7907}, + {7848, 7911, 7912}, + {5584, 7917, 7921}, + {874, 7926, 7926}, + {3342, 7930, 7930}, + {4507, 7935, 7937}, + {3672, 7943, 7944}, + {7911, 7948, 7949}, + {6402, 7956, 7956}, + {7940, 7960, 7960}, + {7113, 7964, 7964}, + {1073, 7968, 7968}, + {7740, 7974, 7974}, + {7601, 7978, 7982}, + {6797, 7987, 7988}, + {3528, 7994, 7995}, + {5483, 7999, 7999}, + {5717, 8011, 8011}, + {5480, 8017, 8017}, + {7770, 8023, 8030}, + {2452, 8034, 8034}, + {5282, 8047, 8047}, + {7967, 8051, 8051}, + {1128, 8058, 8066}, + {6348, 8070, 8070}, + {8055, 8077, 8077}, + {7925, 8081, 8086}, + {6810, 8090, 8090}, + {5051, 8101, 8101}, + {4696, 8109, 8110}, + {5129, 8119, 8119}, + {4449, 8123, 8123}, + {7222, 8127, 8127}, + {4649, 8131, 8134}, + {7994, 8138, 8138}, + {5954, 8148, 8148}, + {475, 8152, 8153}, + {7906, 8157, 8157}, + {7458, 8164, 8166}, + {7632, 8171, 8173}, + {3874, 8177, 8183}, + {4391, 8187, 8187}, + {561, 8191, 8191}, + {2417, 8195, 8195}, + {2357, 8204, 8204}, + {2269, 8216, 8218}, + {3968, 8222, 8222}, + {2200, 8226, 8227}, + {3453, 8247, 8247}, + {2439, 8251, 8252}, + {7175, 8257, 8257}, + {976, 8262, 8264}, + {4953, 8273, 8273}, + {4219, 8278, 8278}, + {6, 8285, 8291}, + {5703, 8295, 8296}, + {5272, 8300, 8300}, + {8037, 8304, 8304}, + {8186, 8314, 8314}, + {8304, 8318, 8318}, + {8051, 8326, 8326}, + {8318, 8330, 8330}, + {2671, 8334, 8335}, + {2662, 8339, 8339}, + {8081, 8349, 8350}, + {3328, 8356, 8356}, + {2879, 8360, 8362}, + {8050, 8370, 8371}, + {8330, 8375, 8376}, + {8375, 8386, 8386}, + {4961, 8390, 8390}, + {1017, 8403, 8405}, + {3533, 8416, 8416}, + {4555, 8422, 8422}, + {6445, 8426, 8426}, + {8169, 8432, 8432}, + {990, 8436, 8436}, + {4102, 8440, 8440}, + {7398, 8444, 8446}, + {3480, 8450, 8450}, + {6324, 8462, 8462}, + {7948, 8466, 8467}, + {5950, 8471, 8471}, + {5189, 8476, 8476}, + {4026, 8490, 8490}, + {8374, 8494, 8495}, + {4682, 8501, 8501}, + {7387, 8506, 8506}, + {8164, 8510, 8515}, + {4079, 8524, 8524}, + {8360, 8529, 8531}, + {7446, 8540, 8543}, + {7971, 8547, 8548}, + {4311, 8552, 8552}, + {5204, 8556, 8557}, + {7968, 8562, 8562}, + {7847, 8571, 8573}, + {8547, 8577, 8577}, + {5320, 8581, 8581}, + {8556, 8585, 8586}, + {8504, 8590, 8590}, + {7669, 8602, 8604}, + {5874, 8608, 8609}, + {5828, 8613, 8613}, + {7998, 8617, 8617}, + {8519, 8625, 8625}, + {7250, 8637, 8637}, + {426, 8641, 8641}, + {8436, 8645, 8645}, + {5986, 8649, 8656}, + {8157, 8660, 8660}, + {7182, 8665, 8665}, + {8421, 8675, 8675}, + {8509, 8681, 8681}, + {5137, 8688, 8689}, + {8625, 8694, 8695}, + {5228, 8701, 8702}, + {6661, 8714, 8714}, + {1010, 8719, 8719}, + {6648, 8723, 8723}, + {3500, 8728, 8728}, + {2442, 8735, 8735}, + {8494, 8740, 8741}, + {8171, 8753, 8755}, + {7242, 8763, 8764}, + {4739, 8768, 8769}, + {7079, 8773, 8773}, + {8386, 8777, 8777}, + {8624, 8781, 8787}, + {661, 8791, 8794}, + {8631, 8801, 8801}, + {7753, 8805, 8805}, + {4783, 8809, 8810}, + {1673, 8814, 8815}, + {6623, 8819, 8819}, + {4404, 8823, 8823}, + {8089, 8827, 8828}, + {8773, 8832, 8832}, + {5394, 8836, 8836}, + {6231, 8841, 8843}, + {1015, 8852, 8853}, + {6873, 8857, 8857}, + {6289, 8865, 8865}, + {8577, 8869, 8869}, + {8114, 8873, 8875}, + {8534, 8883, 8883}, + {3007, 8887, 8888}, + {8827, 8892, 8893}, + {4788, 8897, 8900}, + {5698, 8906, 8907}, + {7690, 8911, 8911}, + {6643, 8919, 8919}, + {7206, 8923, 8924}, + {7866, 8929, 8931}, + {8880, 8942, 8942}, + {8630, 8951, 8952}, + {6027, 8958, 8958}, + {7749, 8966, 8967}, + {4932, 8972, 8973}, + {8892, 8980, 8981}, + {634, 9003, 9003}, + {8109, 9007, 9008}, + {8777, 9012, 9012}, + {3981, 9016, 9017}, + {5723, 9025, 9025}, + {7662, 9034, 9038}, + {8955, 9042, 9042}, + {8070, 9060, 9062}, + {8910, 9066, 9066}, + {5363, 9070, 9071}, + {7699, 9075, 9076}, + {8991, 9081, 9081}, + {6850, 9085, 9085}, + {5811, 9092, 9094}, + {9079, 9098, 9102}, + {6456, 9106, 9106}, + {2259, 9111, 9111}, + {4752, 9116, 9116}, + {9060, 9120, 9123}, + {8090, 9127, 9127}, + {5305, 9131, 9132}, + {8623, 9137, 9137}, + {7417, 9141, 9141}, + {6564, 9148, 9149}, + {9126, 9157, 9158}, + {4285, 9169, 9170}, + {8698, 9174, 9174}, + {8869, 9178, 9178}, + {2572, 9182, 9183}, + {6482, 9188, 9190}, + {9181, 9201, 9201}, + {2968, 9208, 9209}, + {2506, 9213, 9215}, + {9127, 9219, 9219}, + {7910, 9225, 9227}, + {5422, 9235, 9239}, + {8813, 9244, 9246}, + {9178, 9250, 9250}, + {8748, 9255, 9255}, + {7354, 9265, 9265}, + {7767, 9269, 9269}, + {7710, 9281, 9283}, + {8826, 9288, 9290}, + {861, 9295, 9295}, + {4482, 9301, 9301}, + {9264, 9305, 9306}, + {8805, 9310, 9310}, + {4995, 9314, 9314}, + {6730, 9318, 9318}, + {7457, 9328, 9328}, + {2547, 9335, 9336}, + {6298, 9340, 9343}, + {9305, 9353, 9354}, + {9269, 9358, 9358}, + {6338, 9370, 9370}, + {7289, 9376, 9379}, + {5780, 9383, 9383}, + {7607, 9387, 9387}, + {2065, 9392, 9392}, + {7238, 9396, 9396}, + {8856, 9400, 9400}, + {8069, 9412, 9413}, + {611, 9420, 9420}, + {7071, 9424, 9424}, + {3089, 9430, 9431}, + {7117, 9435, 9438}, + {1976, 9445, 9445}, + {6640, 9449, 9449}, + {5488, 9453, 9453}, + {8739, 9457, 9459}, + {5958, 9466, 9466}, + {7985, 9470, 9470}, + {8735, 9475, 9475}, + {5009, 9479, 9479}, + {8073, 9483, 9484}, + {2328, 9490, 9491}, + {9250, 9495, 9495}, + {4043, 9502, 9502}, + {7712, 9506, 9506}, + {9012, 9510, 9510}, + {9028, 9514, 9515}, + {2190, 9521, 9524}, + {9029, 9528, 9528}, + {9519, 9532, 9532}, + {9495, 9536, 9536}, + {8527, 9540, 9540}, + {2137, 9550, 9550}, + {8419, 9557, 9557}, + {9383, 9561, 9562}, + {8970, 9575, 9578}, + {8911, 9582, 9582}, + {7828, 9595, 9596}, + {6180, 9600, 9600}, + {8738, 9604, 9607}, + {7540, 9611, 9612}, + {9599, 9616, 9618}, + {9187, 9623, 9623}, + {9294, 9628, 9629}, + {4536, 9639, 9639}, + {3867, 9643, 9643}, + {6305, 9648, 9648}, + {1617, 9654, 9657}, + {5762, 9666, 9666}, + {8314, 9670, 9670}, + {9666, 9674, 9675}, + {9506, 9679, 9679}, + {9669, 9685, 9686}, + {9683, 9690, 9690}, + {8763, 9697, 9698}, + {7468, 9702, 9702}, + {460, 9707, 9707}, + {3115, 9712, 9712}, + {9424, 9716, 9717}, + {7359, 9721, 9724}, + {7547, 9728, 9729}, + {7151, 9733, 9738}, + {7627, 9742, 9742}, + {2822, 9747, 9747}, + {8247, 9751, 9753}, + {9550, 9758, 9758}, + {7585, 9762, 9763}, + {1002, 9767, 9767}, + {7168, 9772, 9773}, + {6941, 9777, 9780}, + {9728, 9784, 9786}, + {9770, 9792, 9796}, + {6411, 9801, 9802}, + {3689, 9806, 9808}, + {9575, 9814, 9816}, + {7025, 9820, 9821}, + {2776, 9826, 9826}, + {9806, 9830, 9830}, + {9820, 9834, 9835}, + {9800, 9839, 9847}, + {9834, 9851, 9852}, + {9829, 9856, 9862}, + {1400, 9866, 9866}, + {3197, 9870, 9871}, + {9851, 9875, 9876}, + {9742, 9883, 9884}, + {3362, 9888, 9889}, + {9883, 9893, 9893}, + {5711, 9899, 9910}, + {7806, 9915, 9915}, + {9120, 9919, 9919}, + {9715, 9925, 9934}, + {2580, 9938, 9938}, + {4907, 9942, 9944}, + {6239, 9953, 9954}, + {6961, 9963, 9963}, + {5295, 9967, 9968}, + {1915, 9972, 9973}, + {3426, 9983, 9985}, + {9875, 9994, 9995}, + {6942, 9999, 9999}, + {6621, 10005, 10005}, + {7589, 10010, 10012}, + {9286, 10020, 10020}, + {838, 10024, 10024}, + {9980, 10028, 10031}, + {9994, 10035, 10041}, + {2702, 10048, 10051}, + {2621, 10059, 10059}, + {10054, 10065, 10065}, + {8612, 10073, 10074}, + {7033, 10078, 10078}, + {916, 10082, 10082}, + {10035, 10086, 10087}, + {8613, 10097, 10097}, + {9919, 10107, 10108}, + {6133, 10114, 10115}, + {10059, 10119, 10119}, + {10065, 10126, 10127}, + {7732, 10131, 10131}, + {7155, 10135, 10136}, + {6728, 10140, 10140}, + {6162, 10144, 10145}, + {4724, 10150, 10150}, + {1665, 10154, 10154}, + {10126, 10163, 10163}, + {9783, 10168, 10168}, + {1715, 10172, 10173}, + {7152, 10177, 10182}, + {8760, 10187, 10187}, + {7829, 10191, 10191}, + {9679, 10196, 10196}, + {9369, 10201, 10201}, + {2928, 10206, 10208}, + {6951, 10214, 10217}, + {5633, 10221, 10221}, + {7199, 10225, 10225}, + {10118, 10230, 10231}, + {9999, 10235, 10236}, + {10045, 10240, 10249}, + {5565, 10256, 10256}, + {9866, 10261, 10261}, + {10163, 10268, 10268}, + {9869, 10272, 10272}, + {9789, 10276, 10283}, + {10235, 10287, 10288}, + {10214, 10298, 10299}, + {6971, 10303, 10303}, + {3346, 10307, 10307}, + {10185, 10311, 10312}, + {9993, 10318, 10320}, + {2779, 10332, 10334}, + {1726, 10338, 10338}, + {741, 10354, 10360}, + {10230, 10372, 10373}, + {10260, 10384, 10385}, + {10131, 10389, 10398}, + {6946, 10406, 10409}, + {10158, 10413, 10420}, + {10123, 10424, 10424}, + {6157, 10428, 10429}, + {4518, 10434, 10434}, + {9893, 10438, 10438}, + {9865, 10442, 10446}, + {7558, 10454, 10454}, + {10434, 10460, 10460}, + {10064, 10466, 10468}, + {2703, 10472, 10474}, + {9751, 10478, 10479}, + {6714, 10485, 10485}, + {8020, 10490, 10490}, + {10303, 10494, 10494}, + {3521, 10499, 10500}, + {9281, 10513, 10515}, + {6028, 10519, 10523}, + {9387, 10527, 10527}, + {7614, 10531, 10531}, + {3611, 10536, 10536}, + {9162, 10540, 10540}, + {10081, 10546, 10547}, + {10034, 10560, 10562}, + {6726, 10567, 10571}, + {8237, 10575, 10575}, + {10438, 10579, 10583}, + {10140, 10587, 10587}, + {5784, 10592, 10592}, + {9819, 10597, 10600}, + {10567, 10604, 10608}, + {9335, 10613, 10613}, + {8300, 10617, 10617}, + {10575, 10621, 10621}, + {9678, 10625, 10626}, + {9962, 10632, 10633}, + {10535, 10637, 10638}, + {8199, 10642, 10642}, + {10372, 10647, 10648}, + {10637, 10656, 10657}, + {10579, 10667, 10668}, + {10465, 10677, 10680}, + {6702, 10684, 10685}, + {10073, 10691, 10692}, + {4505, 10696, 10697}, + {9042, 10701, 10701}, + {6460, 10705, 10706}, + {10010, 10714, 10716}, + {10656, 10720, 10722}, + {7282, 10727, 10729}, + {2327, 10733, 10733}, + {2491, 10740, 10741}, + {10704, 10748, 10750}, + {6465, 10754, 10754}, + {10647, 10758, 10759}, + {10424, 10763, 10763}, + {10748, 10776, 10776}, + {10546, 10780, 10781}, + {10758, 10785, 10786}, + {10287, 10790, 10797}, + {10785, 10801, 10807}, + {10240, 10811, 10826}, + {9509, 10830, 10830}, + {2579, 10836, 10838}, + {9801, 10843, 10845}, + {7555, 10849, 10850}, + {10776, 10860, 10865}, + {8023, 10869, 10869}, + {10046, 10876, 10884}, + {10253, 10888, 10892}, + {9941, 10897, 10897}, + {7898, 10901, 10905}, + {6725, 10909, 10913}, + {10757, 10921, 10923}, + {10160, 10931, 10931}, + {10916, 10935, 10942}, + {10261, 10946, 10946}, + {10318, 10952, 10954}, + {5911, 10959, 10961}, + {10801, 10965, 10966}, + {10946, 10970, 10977}, + {10592, 10982, 10984}, + {9913, 10988, 10990}, + {8510, 10994, 10996}, + {9419, 11000, 11001}, + {6765, 11006, 11007}, + {10725, 11011, 11011}, + {5537, 11017, 11019}, + {9208, 11024, 11025}, + {5850, 11030, 11030}, + {9610, 11034, 11036}, + {8846, 11041, 11047}, + {9697, 11051, 11051}, + {1622, 11055, 11058}, + {2370, 11062, 11062}, + {8393, 11067, 11067}, + {9756, 11071, 11071}, + {10172, 11076, 11076}, + {27, 11081, 11081}, + {7357, 11087, 11092}, + {8151, 11104, 11106}, + {6115, 11110, 11110}, + {10667, 11114, 11115}, + {11099, 11121, 11123}, + {10705, 11127, 11127}, + {8938, 11131, 11131}, + {11114, 11135, 11136}, + {1390, 11140, 11141}, + {10964, 11146, 11148}, + {11140, 11152, 11155}, + {9813, 11159, 11166}, + {624, 11171, 11172}, + {3118, 11177, 11179}, + {11029, 11184, 11186}, + {10186, 11190, 11190}, + {10306, 11196, 11196}, + {8665, 11201, 11201}, + {7382, 11205, 11205}, + {1100, 11210, 11210}, + {2337, 11216, 11217}, + {1609, 11221, 11223}, + {5763, 11228, 11229}, + {5220, 11233, 11233}, + {11061, 11241, 11241}, + {10617, 11246, 11246}, + {11190, 11250, 11251}, + {10144, 11255, 11256}, + {11232, 11260, 11260}, + {857, 11264, 11265}, + {10994, 11269, 11271}, + {3879, 11280, 11281}, + {11184, 11287, 11289}, + {9611, 11293, 11295}, + {11250, 11299, 11299}, + {4495, 11304, 11304}, + {7574, 11308, 11309}, + {9814, 11315, 11317}, + {1713, 11321, 11324}, + {1905, 11328, 11328}, + {8745, 11335, 11340}, + {8883, 11351, 11351}, + {8119, 11358, 11358}, + {1842, 11363, 11364}, + {11237, 11368, 11368}, + {8814, 11373, 11374}, + {5684, 11378, 11378}, + {11011, 11382, 11382}, + {6520, 11389, 11389}, + {11183, 11393, 11396}, + {1790, 11404, 11404}, + {9536, 11408, 11408}, + {11298, 11418, 11419}, + {3929, 11425, 11425}, + {5588, 11429, 11429}, + {8476, 11436, 11436}, + {4096, 11440, 11442}, + {11084, 11446, 11454}, + {10603, 11458, 11463}, + {7332, 11472, 11474}, + {7611, 11483, 11486}, + {4836, 11490, 11491}, + {10024, 11495, 11495}, + {4917, 11501, 11506}, + {6486, 11510, 11512}, + {11269, 11516, 11518}, + {3603, 11522, 11525}, + {11126, 11535, 11535}, + {11418, 11539, 11541}, + {11408, 11545, 11545}, + {9021, 11549, 11552}, + {6745, 11557, 11557}, + {5118, 11561, 11564}, + {7590, 11568, 11569}, + {4426, 11573, 11578}, + {9790, 11582, 11583}, + {6447, 11587, 11587}, + {10229, 11591, 11594}, + {10457, 11598, 11598}, + {10168, 11604, 11604}, + {10543, 11608, 11608}, + {7404, 11612, 11612}, + {11127, 11616, 11616}, + {3337, 11620, 11620}, + {11501, 11624, 11628}, + {4543, 11633, 11635}, + {8449, 11642, 11642}, + {4943, 11646, 11648}, + {10526, 11652, 11654}, + {11620, 11659, 11659}, + {8927, 11664, 11669}, + {532, 11673, 11673}, + {10513, 11677, 11679}, + {10428, 11683, 11683}, + {10999, 11689, 11690}, + {9469, 11695, 11695}, + {3606, 11699, 11699}, + {9560, 11708, 11709}, + {1564, 11714, 11714}, + {10527, 11718, 11718}, + {3071, 11723, 11726}, + {11590, 11731, 11732}, + {6605, 11737, 11737}, + {11624, 11741, 11745}, + {7822, 11749, 11752}, + {5269, 11757, 11758}, + {1339, 11767, 11767}, + {1363, 11771, 11773}, + {3704, 11777, 11777}, + {10952, 11781, 11783}, + {6764, 11793, 11795}, + {8675, 11800, 11800}, + {9963, 11804, 11804}, + {11573, 11808, 11809}, + {9548, 11813, 11813}, + {11591, 11817, 11818}, + {11446, 11822, 11822}, + {9224, 11828, 11828}, + {3158, 11836, 11836}, + {10830, 11840, 11840}, + {7234, 11846, 11846}, + {11299, 11850, 11850}, + {11544, 11854, 11855}, + {11498, 11859, 11859}, + {10993, 11865, 11868}, + {9720, 11872, 11878}, + {10489, 11882, 11890}, + {11712, 11898, 11904}, + {11516, 11908, 11910}, + {11568, 11914, 11915}, + {10177, 11919, 11924}, + {11363, 11928, 11929}, + {10494, 11933, 11933}, + {9870, 11937, 11938}, + {9427, 11942, 11942}, + {11481, 11949, 11949}, + {6030, 11955, 11957}, + {11718, 11961, 11961}, + {10531, 11965, 11983}, + {5126, 11987, 11987}, + {7515, 11991, 11991}, + {10646, 11996, 11997}, + {2947, 12001, 12001}, + {9582, 12009, 12010}, + {6202, 12017, 12018}, + {11714, 12022, 12022}, + {9235, 12033, 12037}, + {9721, 12041, 12044}, + {11932, 12051, 12052}, + {12040, 12056, 12056}, + {12051, 12060, 12060}, + {11601, 12066, 12066}, + {8426, 12070, 12070}, + {4053, 12077, 12077}, + {4262, 12081, 12081}, + {9761, 12086, 12088}, + {11582, 12092, 12093}, + {10965, 12097, 12098}, + {11803, 12103, 12104}, + {11933, 12108, 12109}, + {10688, 12117, 12117}, + {12107, 12125, 12126}, + {6774, 12130, 12132}, + {6286, 12137, 12137}, + {9543, 12141, 12141}, + {12097, 12145, 12146}, + {10790, 12150, 12150}, + {10125, 12154, 12156}, + {12125, 12164, 12164}, + {12064, 12168, 12172}, + {10811, 12178, 12188}, + {12092, 12192, 12193}, + {10058, 12197, 12198}, + {11611, 12211, 12212}, + {3459, 12216, 12216}, + {10291, 12225, 12228}, + {12191, 12232, 12234}, + {12145, 12238, 12238}, + {12001, 12242, 12250}, + {3840, 12255, 12255}, + {12216, 12259, 12259}, + {674, 12272, 12272}, + {12141, 12276, 12276}, + {10766, 12280, 12280}, + {11545, 12284, 12284}, + {6496, 12290, 12290}, + {11381, 12294, 12295}, + {603, 12302, 12303}, + {12276, 12308, 12308}, + {11850, 12313, 12314}, + {565, 12319, 12319}, + {9351, 12324, 12324}, + {11822, 12328, 12328}, + {2691, 12333, 12334}, + {11840, 12338, 12338}, + {11070, 12343, 12343}, + {9510, 12347, 12347}, + {11024, 12352, 12353}, + {7173, 12359, 12359}, + {517, 12363, 12363}, + {6311, 12367, 12368}, + {11367, 12372, 12373}, + {12008, 12377, 12377}, + {11372, 12382, 12384}, + {11358, 12391, 12392}, + {11382, 12396, 12396}, + {6882, 12400, 12401}, + {11246, 12405, 12405}, + {8359, 12409, 12412}, + {10154, 12418, 12418}, + {12016, 12425, 12426}, + {8972, 12434, 12435}, + {10478, 12439, 12440}, + {12395, 12449, 12449}, + {11612, 12454, 12454}, + {12347, 12458, 12458}, + {10700, 12466, 12467}, + {3637, 12471, 12476}, + {1042, 12480, 12481}, + {6747, 12488, 12488}, + {12396, 12492, 12493}, + {9420, 12497, 12497}, + {11285, 12501, 12510}, + {4470, 12515, 12515}, + {9374, 12519, 12519}, + {11293, 12528, 12528}, + {2058, 12534, 12535}, + {6521, 12539, 12539}, + {12492, 12543, 12543}, + {3043, 12547, 12547}, + {2982, 12551, 12553}, + {11030, 12557, 12563}, + {7636, 12568, 12568}, + {9639, 12572, 12572}, + {12543, 12576, 12576}, + {5989, 12580, 12583}, + {11051, 12587, 12587}, + {1061, 12592, 12594}, + {12313, 12599, 12601}, + {11846, 12605, 12605}, + {12576, 12609, 12609}, + {11040, 12618, 12625}, + {12479, 12629, 12629}, + {6903, 12633, 12633}, + {12322, 12639, 12639}, + {12253, 12643, 12645}, + {5594, 12651, 12651}, + {12522, 12655, 12655}, + {11703, 12659, 12659}, + {1377, 12665, 12665}, + {8022, 12669, 12669}, + {12280, 12674, 12674}, + {9023, 12680, 12681}, + {12328, 12685, 12685}, + {3085, 12689, 12693}, + {4700, 12698, 12698}, + {10224, 12702, 12702}, + {8781, 12706, 12706}, + {1651, 12710, 12710}, + {12458, 12714, 12714}, + {12005, 12718, 12721}, + {11908, 12725, 12726}, + {8202, 12733, 12733}, + {11708, 12739, 12740}, + {12599, 12744, 12745}, + {12284, 12749, 12749}, + {5285, 12756, 12756}, + {12055, 12775, 12777}, + {6919, 12782, 12782}, + {12242, 12786, 12786}, + {12009, 12790, 12790}, + {9628, 12794, 12796}, + {11354, 12801, 12802}, + {10225, 12806, 12807}, + {579, 12813, 12813}, + {8935, 12817, 12822}, + {8753, 12827, 12829}, + {11006, 12835, 12835}, + {858, 12841, 12845}, + {476, 12849, 12849}, + {7667, 12854, 12854}, + {12760, 12860, 12871}, + {11677, 12875, 12877}, + {12714, 12881, 12881}, + {12731, 12885, 12890}, + {7108, 12894, 12896}, + {1165, 12900, 12900}, + {4021, 12906, 12906}, + {10829, 12910, 12911}, + {12331, 12915, 12915}, + {8887, 12919, 12921}, + {11639, 12925, 12925}, + {7964, 12929, 12929}, + {12528, 12937, 12937}, + {8148, 12941, 12941}, + {12770, 12948, 12950}, + {12609, 12954, 12954}, + {12685, 12958, 12958}, + {2803, 12962, 12962}, + {9561, 12966, 12966}, + {6671, 12972, 12973}, + {12056, 12977, 12977}, + {6380, 12981, 12981}, + {12048, 12985, 12985}, + {11961, 12989, 12993}, + {3368, 12997, 12999}, + {6634, 13004, 13004}, + {6775, 13009, 13010}, + {12136, 13014, 13019}, + {10341, 13023, 13023}, + {13002, 13027, 13027}, + {10587, 13031, 13031}, + {10307, 13035, 13035}, + {12736, 13039, 13039}, + {12744, 13043, 13044}, + {6175, 13048, 13048}, + {9702, 13053, 13054}, + {662, 13059, 13061}, + {12718, 13065, 13068}, + {12893, 13072, 13075}, + {8299, 13086, 13091}, + {12604, 13095, 13096}, + {12848, 13100, 13101}, + {12749, 13105, 13105}, + {12526, 13109, 13114}, + {9173, 13122, 13122}, + {12769, 13128, 13128}, + {13038, 13132, 13132}, + {12725, 13136, 13137}, + {12639, 13146, 13146}, + {9711, 13150, 13151}, + {12137, 13155, 13155}, + {13039, 13159, 13159}, + {4681, 13163, 13164}, + {12954, 13168, 13168}, + {13158, 13175, 13176}, + {13105, 13180, 13180}, + {10754, 13184, 13184}, + {13167, 13188, 13188}, + {12658, 13192, 13192}, + {4294, 13199, 13200}, + {11682, 13204, 13205}, + {11695, 13209, 13209}, + {11076, 13214, 13214}, + {12232, 13218, 13218}, + {9399, 13223, 13224}, + {12880, 13228, 13229}, + {13048, 13234, 13234}, + {9701, 13238, 13239}, + {13209, 13243, 13243}, + {3658, 13248, 13248}, + {3698, 13252, 13254}, + {12237, 13260, 13260}, + {8872, 13266, 13266}, + {12957, 13272, 13273}, + {1393, 13281, 13281}, + {2013, 13285, 13288}, + {4244, 13296, 13299}, + {9428, 13303, 13303}, + {12702, 13307, 13307}, + {13078, 13311, 13311}, + {6071, 13315, 13315}, + {3061, 13319, 13319}, + {2051, 13324, 13324}, + {11560, 13328, 13331}, + {6584, 13336, 13336}, + {8482, 13340, 13340}, + {5331, 13344, 13344}, + {4171, 13348, 13348}, + {8501, 13352, 13352}, + {9219, 13356, 13356}, + {9473, 13360, 13363}, + {12881, 13367, 13367}, + {13065, 13371, 13375}, + {2979, 13379, 13384}, + {1518, 13388, 13388}, + {11177, 13392, 13392}, + {9457, 13398, 13398}, + {12293, 13407, 13410}, + {3697, 13414, 13417}, + {10338, 13425, 13425}, + {13367, 13429, 13429}, + {11074, 13433, 13437}, + {4201, 13441, 13443}, + {1812, 13447, 13448}, + {13360, 13452, 13456}, + {13188, 13463, 13463}, + {9732, 13470, 13470}, + {11332, 13477, 13477}, + {9918, 13487, 13487}, + {6337, 13497, 13497}, + {13429, 13501, 13501}, + {11413, 13505, 13505}, + {4685, 13512, 13513}, + {13136, 13517, 13519}, + {7416, 13528, 13530}, + {12929, 13534, 13534}, + {11110, 13539, 13539}, + {11521, 13543, 13543}, + {12825, 13553, 13553}, + {13447, 13557, 13558}, + {12299, 13562, 13563}, + {9003, 13570, 13570}, + {12500, 13577, 13577}, + {13501, 13581, 13581}, + {9392, 13586, 13586}, + {12454, 13590, 13590}, + {6189, 13595, 13595}, + {13053, 13599, 13599}, + {11881, 13604, 13604}, + {13159, 13608, 13608}, + {4894, 13612, 13612}, + {13221, 13621, 13621}, + {8950, 13625, 13625}, + {13533, 13629, 13629}, + {9633, 13633, 13633}, + {7892, 13637, 13639}, + {13581, 13643, 13643}, + {13616, 13647, 13649}, + {12794, 13653, 13654}, + {8919, 13659, 13659}, + {9674, 13663, 13663}, + {13577, 13668, 13668}, + {12966, 13672, 13672}, + {12659, 13676, 13683}, + {6124, 13688, 13688}, + {9225, 13693, 13695}, + {11833, 13702, 13702}, + {12904, 13709, 13717}, + {13647, 13721, 13722}, + {11687, 13726, 13727}, + {12434, 13731, 13732}, + {12689, 13736, 13742}, + {13168, 13746, 13746}, + {6151, 13751, 13752}, + {11821, 13756, 13757}, + {6467, 13764, 13764}, + {5730, 13769, 13769}, + {5136, 13780, 13780}, + {724, 13784, 13785}, + {13517, 13789, 13791}, + {640, 13795, 13796}, + {7721, 13800, 13802}, + {11121, 13806, 13807}, + {5791, 13811, 13815}, + {12894, 13819, 13819}, + {11100, 13824, 13824}, + {7011, 13830, 13830}, + {7129, 13834, 13837}, + {13833, 13841, 13841}, + {11276, 13847, 13847}, + {13621, 13853, 13853}, + {13589, 13862, 13863}, + {12989, 13867, 13867}, + {12789, 13871, 13871}, + {1239, 13875, 13875}, + {4675, 13879, 13881}, + {4686, 13885, 13885}, + {707, 13889, 13889}, + {5449, 13897, 13898}, + {13867, 13902, 13903}, + {10613, 13908, 13908}, + {13789, 13912, 13914}, + {4451, 13918, 13919}, + {9200, 13924, 13924}, + {2011, 13930, 13930}, + {11433, 13934, 13936}, + {4695, 13942, 13943}, + {9435, 13948, 13951}, + {13688, 13955, 13957}, + {11694, 13961, 13962}, + {5712, 13966, 13966}, + {5991, 13970, 13972}, + {13477, 13976, 13976}, + {10213, 13987, 13987}, + {11839, 13991, 13993}, + {12272, 13997, 13997}, + {6206, 14001, 14001}, + {13179, 14006, 14007}, + {2939, 14011, 14011}, + {12972, 14016, 14017}, + {13918, 14021, 14022}, + {7436, 14026, 14027}, + {7678, 14032, 14034}, + {13586, 14040, 14040}, + {13347, 14044, 14044}, + {13109, 14048, 14051}, + {9244, 14055, 14057}, + {13315, 14061, 14061}, + {13276, 14067, 14067}, + {11435, 14073, 14074}, + {13853, 14078, 14078}, + {13452, 14082, 14082}, + {14044, 14087, 14087}, + {4440, 14091, 14095}, + {4479, 14100, 14103}, + {9395, 14107, 14109}, + {6834, 14119, 14119}, + {10458, 14123, 14124}, + {1429, 14129, 14129}, + {8443, 14135, 14135}, + {10365, 14140, 14140}, + {5267, 14145, 14145}, + {11834, 14151, 14153}, +} diff --git a/vendor/github.com/golang/snappy/misc/main.cpp b/vendor/github.com/golang/snappy/misc/main.cpp new file mode 100644 index 0000000..24a3d9a --- /dev/null +++ b/vendor/github.com/golang/snappy/misc/main.cpp @@ -0,0 +1,79 @@ +/* +This is a C version of the cmd/snappytool Go program. + +To build the snappytool binary: +g++ main.cpp /usr/lib/libsnappy.a -o snappytool +or, if you have built the C++ snappy library from source: +g++ main.cpp /path/to/your/snappy/.libs/libsnappy.a -o snappytool +after running "make" from your snappy checkout directory. +*/ + +#include +#include +#include +#include + +#include "snappy.h" + +#define N 1000000 + +char dst[N]; +char src[N]; + +int main(int argc, char** argv) { + // Parse args. + if (argc != 2) { + fprintf(stderr, "exactly one of -d or -e must be given\n"); + return 1; + } + bool decode = strcmp(argv[1], "-d") == 0; + bool encode = strcmp(argv[1], "-e") == 0; + if (decode == encode) { + fprintf(stderr, "exactly one of -d or -e must be given\n"); + return 1; + } + + // Read all of stdin into src[:s]. + size_t s = 0; + while (1) { + if (s == N) { + fprintf(stderr, "input too large\n"); + return 1; + } + ssize_t n = read(0, src+s, N-s); + if (n == 0) { + break; + } + if (n < 0) { + fprintf(stderr, "read error: %s\n", strerror(errno)); + // TODO: handle EAGAIN, EINTR? + return 1; + } + s += n; + } + + // Encode or decode src[:s] to dst[:d], and write to stdout. + size_t d = 0; + if (encode) { + if (N < snappy::MaxCompressedLength(s)) { + fprintf(stderr, "input too large after encoding\n"); + return 1; + } + snappy::RawCompress(src, s, dst, &d); + } else { + if (!snappy::GetUncompressedLength(src, s, &d)) { + fprintf(stderr, "could not get uncompressed length\n"); + return 1; + } + if (N < d) { + fprintf(stderr, "input too large after decoding\n"); + return 1; + } + if (!snappy::RawUncompress(src, s, dst)) { + fprintf(stderr, "input was not valid Snappy-compressed data\n"); + return 1; + } + } + write(1, dst, d); + return 0; +} diff --git a/vendor/github.com/golang/snappy/snappy.go b/vendor/github.com/golang/snappy/snappy.go new file mode 100644 index 0000000..ece692e --- /dev/null +++ b/vendor/github.com/golang/snappy/snappy.go @@ -0,0 +1,98 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package snappy implements the Snappy compression format. It aims for very +// high speeds and reasonable compression. +// +// There are actually two Snappy formats: block and stream. They are related, +// but different: trying to decompress block-compressed data as a Snappy stream +// will fail, and vice versa. The block format is the Decode and Encode +// functions and the stream format is the Reader and Writer types. +// +// The block format, the more common case, is used when the complete size (the +// number of bytes) of the original data is known upfront, at the time +// compression starts. The stream format, also known as the framing format, is +// for when that isn't always true. +// +// The canonical, C++ implementation is at https://github.com/google/snappy and +// it only implements the block format. +package snappy // import "github.com/golang/snappy" + +import ( + "hash/crc32" +) + +/* +Each encoded block begins with the varint-encoded length of the decoded data, +followed by a sequence of chunks. Chunks begin and end on byte boundaries. The +first byte of each chunk is broken into its 2 least and 6 most significant bits +called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. +Zero means a literal tag. All other values mean a copy tag. + +For literal tags: + - If m < 60, the next 1 + m bytes are literal bytes. + - Otherwise, let n be the little-endian unsigned integer denoted by the next + m - 59 bytes. The next 1 + n bytes after that are literal bytes. + +For copy tags, length bytes are copied from offset bytes ago, in the style of +Lempel-Ziv compression algorithms. In particular: + - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). + The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 + of the offset. The next byte is bits 0-7 of the offset. + - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). + The length is 1 + m. The offset is the little-endian unsigned integer + denoted by the next 2 bytes. + - For l == 3, this tag is a legacy format that is no longer issued by most + encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in + [1, 65). The length is 1 + m. The offset is the little-endian unsigned + integer denoted by the next 4 bytes. +*/ +const ( + tagLiteral = 0x00 + tagCopy1 = 0x01 + tagCopy2 = 0x02 + tagCopy4 = 0x03 +) + +const ( + checksumSize = 4 + chunkHeaderSize = 4 + magicChunk = "\xff\x06\x00\x00" + magicBody + magicBody = "sNaPpY" + + // maxBlockSize is the maximum size of the input to encodeBlock. It is not + // part of the wire format per se, but some parts of the encoder assume + // that an offset fits into a uint16. + // + // Also, for the framing format (Writer type instead of Encode function), + // https://github.com/google/snappy/blob/master/framing_format.txt says + // that "the uncompressed data in a chunk must be no longer than 65536 + // bytes". + maxBlockSize = 65536 + + // maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is + // hard coded to be a const instead of a variable, so that obufLen can also + // be a const. Their equivalence is confirmed by + // TestMaxEncodedLenOfMaxBlockSize. + maxEncodedLenOfMaxBlockSize = 76490 + + obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize + obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize +) + +const ( + chunkTypeCompressedData = 0x00 + chunkTypeUncompressedData = 0x01 + chunkTypePadding = 0xfe + chunkTypeStreamIdentifier = 0xff +) + +var crcTable = crc32.MakeTable(crc32.Castagnoli) + +// crc implements the checksum specified in section 3 of +// https://github.com/google/snappy/blob/master/framing_format.txt +func crc(b []byte) uint32 { + c := crc32.Update(0, crcTable, b) + return uint32(c>>15|c<<17) + 0xa282ead8 +} diff --git a/vendor/github.com/golang/snappy/snappy_test.go b/vendor/github.com/golang/snappy/snappy_test.go new file mode 100644 index 0000000..2712710 --- /dev/null +++ b/vendor/github.com/golang/snappy/snappy_test.go @@ -0,0 +1,1353 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "bytes" + "encoding/binary" + "flag" + "fmt" + "io" + "io/ioutil" + "math/rand" + "net/http" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + "testing" +) + +var ( + download = flag.Bool("download", false, "If true, download any missing files before running benchmarks") + testdataDir = flag.String("testdataDir", "testdata", "Directory containing the test data") + benchdataDir = flag.String("benchdataDir", "testdata/bench", "Directory containing the benchmark data") +) + +// goEncoderShouldMatchCppEncoder is whether to test that the algorithm used by +// Go's encoder matches byte-for-byte what the C++ snappy encoder produces, on +// this GOARCH. There is more than one valid encoding of any given input, and +// there is more than one good algorithm along the frontier of trading off +// throughput for output size. Nonetheless, we presume that the C++ encoder's +// algorithm is a good one and has been tested on a wide range of inputs, so +// matching that exactly should mean that the Go encoder's algorithm is also +// good, without needing to gather our own corpus of test data. +// +// The exact algorithm used by the C++ code is potentially endian dependent, as +// it puns a byte pointer to a uint32 pointer to load, hash and compare 4 bytes +// at a time. The Go implementation is endian agnostic, in that its output is +// the same (as little-endian C++ code), regardless of the CPU's endianness. +// +// Thus, when comparing Go's output to C++ output generated beforehand, such as +// the "testdata/pi.txt.rawsnappy" file generated by C++ code on a little- +// endian system, we can run that test regardless of the runtime.GOARCH value. +// +// When comparing Go's output to dynamically generated C++ output, i.e. the +// result of fork/exec'ing a C++ program, we can run that test only on +// little-endian systems, because the C++ output might be different on +// big-endian systems. The runtime package doesn't export endianness per se, +// but we can restrict this match-C++ test to common little-endian systems. +const goEncoderShouldMatchCppEncoder = runtime.GOARCH == "386" || runtime.GOARCH == "amd64" || runtime.GOARCH == "arm" + +func TestMaxEncodedLenOfMaxBlockSize(t *testing.T) { + got := maxEncodedLenOfMaxBlockSize + want := MaxEncodedLen(maxBlockSize) + if got != want { + t.Fatalf("got %d, want %d", got, want) + } +} + +func cmp(a, b []byte) error { + if bytes.Equal(a, b) { + return nil + } + if len(a) != len(b) { + return fmt.Errorf("got %d bytes, want %d", len(a), len(b)) + } + for i := range a { + if a[i] != b[i] { + return fmt.Errorf("byte #%d: got 0x%02x, want 0x%02x", i, a[i], b[i]) + } + } + return nil +} + +func roundtrip(b, ebuf, dbuf []byte) error { + d, err := Decode(dbuf, Encode(ebuf, b)) + if err != nil { + return fmt.Errorf("decoding error: %v", err) + } + if err := cmp(d, b); err != nil { + return fmt.Errorf("roundtrip mismatch: %v", err) + } + return nil +} + +func TestEmpty(t *testing.T) { + if err := roundtrip(nil, nil, nil); err != nil { + t.Fatal(err) + } +} + +func TestSmallCopy(t *testing.T) { + for _, ebuf := range [][]byte{nil, make([]byte, 20), make([]byte, 64)} { + for _, dbuf := range [][]byte{nil, make([]byte, 20), make([]byte, 64)} { + for i := 0; i < 32; i++ { + s := "aaaa" + strings.Repeat("b", i) + "aaaabbbb" + if err := roundtrip([]byte(s), ebuf, dbuf); err != nil { + t.Errorf("len(ebuf)=%d, len(dbuf)=%d, i=%d: %v", len(ebuf), len(dbuf), i, err) + } + } + } + } +} + +func TestSmallRand(t *testing.T) { + rng := rand.New(rand.NewSource(1)) + for n := 1; n < 20000; n += 23 { + b := make([]byte, n) + for i := range b { + b[i] = uint8(rng.Intn(256)) + } + if err := roundtrip(b, nil, nil); err != nil { + t.Fatal(err) + } + } +} + +func TestSmallRegular(t *testing.T) { + for n := 1; n < 20000; n += 23 { + b := make([]byte, n) + for i := range b { + b[i] = uint8(i%10 + 'a') + } + if err := roundtrip(b, nil, nil); err != nil { + t.Fatal(err) + } + } +} + +func TestInvalidVarint(t *testing.T) { + testCases := []struct { + desc string + input string + }{{ + "invalid varint, final byte has continuation bit set", + "\xff", + }, { + "invalid varint, value overflows uint64", + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00", + }, { + // https://github.com/google/snappy/blob/master/format_description.txt + // says that "the stream starts with the uncompressed length [as a + // varint] (up to a maximum of 2^32 - 1)". + "valid varint (as uint64), but value overflows uint32", + "\x80\x80\x80\x80\x10", + }} + + for _, tc := range testCases { + input := []byte(tc.input) + if _, err := DecodedLen(input); err != ErrCorrupt { + t.Errorf("%s: DecodedLen: got %v, want ErrCorrupt", tc.desc, err) + } + if _, err := Decode(nil, input); err != ErrCorrupt { + t.Errorf("%s: Decode: got %v, want ErrCorrupt", tc.desc, err) + } + } +} + +func TestDecode(t *testing.T) { + lit40Bytes := make([]byte, 40) + for i := range lit40Bytes { + lit40Bytes[i] = byte(i) + } + lit40 := string(lit40Bytes) + + testCases := []struct { + desc string + input string + want string + wantErr error + }{{ + `decodedLen=0; valid input`, + "\x00", + "", + nil, + }, { + `decodedLen=3; tagLiteral, 0-byte length; length=3; valid input`, + "\x03" + "\x08\xff\xff\xff", + "\xff\xff\xff", + nil, + }, { + `decodedLen=2; tagLiteral, 0-byte length; length=3; not enough dst bytes`, + "\x02" + "\x08\xff\xff\xff", + "", + ErrCorrupt, + }, { + `decodedLen=3; tagLiteral, 0-byte length; length=3; not enough src bytes`, + "\x03" + "\x08\xff\xff", + "", + ErrCorrupt, + }, { + `decodedLen=40; tagLiteral, 0-byte length; length=40; valid input`, + "\x28" + "\x9c" + lit40, + lit40, + nil, + }, { + `decodedLen=1; tagLiteral, 1-byte length; not enough length bytes`, + "\x01" + "\xf0", + "", + ErrCorrupt, + }, { + `decodedLen=3; tagLiteral, 1-byte length; length=3; valid input`, + "\x03" + "\xf0\x02\xff\xff\xff", + "\xff\xff\xff", + nil, + }, { + `decodedLen=1; tagLiteral, 2-byte length; not enough length bytes`, + "\x01" + "\xf4\x00", + "", + ErrCorrupt, + }, { + `decodedLen=3; tagLiteral, 2-byte length; length=3; valid input`, + "\x03" + "\xf4\x02\x00\xff\xff\xff", + "\xff\xff\xff", + nil, + }, { + `decodedLen=1; tagLiteral, 3-byte length; not enough length bytes`, + "\x01" + "\xf8\x00\x00", + "", + ErrCorrupt, + }, { + `decodedLen=3; tagLiteral, 3-byte length; length=3; valid input`, + "\x03" + "\xf8\x02\x00\x00\xff\xff\xff", + "\xff\xff\xff", + nil, + }, { + `decodedLen=1; tagLiteral, 4-byte length; not enough length bytes`, + "\x01" + "\xfc\x00\x00\x00", + "", + ErrCorrupt, + }, { + `decodedLen=1; tagLiteral, 4-byte length; length=3; not enough dst bytes`, + "\x01" + "\xfc\x02\x00\x00\x00\xff\xff\xff", + "", + ErrCorrupt, + }, { + `decodedLen=4; tagLiteral, 4-byte length; length=3; not enough src bytes`, + "\x04" + "\xfc\x02\x00\x00\x00\xff", + "", + ErrCorrupt, + }, { + `decodedLen=3; tagLiteral, 4-byte length; length=3; valid input`, + "\x03" + "\xfc\x02\x00\x00\x00\xff\xff\xff", + "\xff\xff\xff", + nil, + }, { + `decodedLen=4; tagCopy1, 1 extra length|offset byte; not enough extra bytes`, + "\x04" + "\x01", + "", + ErrCorrupt, + }, { + `decodedLen=4; tagCopy2, 2 extra length|offset bytes; not enough extra bytes`, + "\x04" + "\x02\x00", + "", + ErrCorrupt, + }, { + `decodedLen=4; tagCopy4, 4 extra length|offset bytes; not enough extra bytes`, + "\x04" + "\x03\x00\x00\x00", + "", + ErrCorrupt, + }, { + `decodedLen=4; tagLiteral (4 bytes "abcd"); valid input`, + "\x04" + "\x0cabcd", + "abcd", + nil, + }, { + `decodedLen=13; tagLiteral (4 bytes "abcd"); tagCopy1; length=9 offset=4; valid input`, + "\x0d" + "\x0cabcd" + "\x15\x04", + "abcdabcdabcda", + nil, + }, { + `decodedLen=8; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=4; valid input`, + "\x08" + "\x0cabcd" + "\x01\x04", + "abcdabcd", + nil, + }, { + `decodedLen=8; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=2; valid input`, + "\x08" + "\x0cabcd" + "\x01\x02", + "abcdcdcd", + nil, + }, { + `decodedLen=8; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=1; valid input`, + "\x08" + "\x0cabcd" + "\x01\x01", + "abcddddd", + nil, + }, { + `decodedLen=8; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=0; zero offset`, + "\x08" + "\x0cabcd" + "\x01\x00", + "", + ErrCorrupt, + }, { + `decodedLen=9; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=4; inconsistent dLen`, + "\x09" + "\x0cabcd" + "\x01\x04", + "", + ErrCorrupt, + }, { + `decodedLen=8; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=5; offset too large`, + "\x08" + "\x0cabcd" + "\x01\x05", + "", + ErrCorrupt, + }, { + `decodedLen=7; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=4; length too large`, + "\x07" + "\x0cabcd" + "\x01\x04", + "", + ErrCorrupt, + }, { + `decodedLen=6; tagLiteral (4 bytes "abcd"); tagCopy2; length=2 offset=3; valid input`, + "\x06" + "\x0cabcd" + "\x06\x03\x00", + "abcdbc", + nil, + }, { + `decodedLen=6; tagLiteral (4 bytes "abcd"); tagCopy4; length=2 offset=3; valid input`, + "\x06" + "\x0cabcd" + "\x07\x03\x00\x00\x00", + "abcdbc", + nil, + }} + + const ( + // notPresentXxx defines a range of byte values [0xa0, 0xc5) that are + // not present in either the input or the output. It is written to dBuf + // to check that Decode does not write bytes past the end of + // dBuf[:dLen]. + // + // The magic number 37 was chosen because it is prime. A more 'natural' + // number like 32 might lead to a false negative if, for example, a + // byte was incorrectly copied 4*8 bytes later. + notPresentBase = 0xa0 + notPresentLen = 37 + ) + + var dBuf [100]byte +loop: + for i, tc := range testCases { + input := []byte(tc.input) + for _, x := range input { + if notPresentBase <= x && x < notPresentBase+notPresentLen { + t.Errorf("#%d (%s): input shouldn't contain %#02x\ninput: % x", i, tc.desc, x, input) + continue loop + } + } + + dLen, n := binary.Uvarint(input) + if n <= 0 { + t.Errorf("#%d (%s): invalid varint-encoded dLen", i, tc.desc) + continue + } + if dLen > uint64(len(dBuf)) { + t.Errorf("#%d (%s): dLen %d is too large", i, tc.desc, dLen) + continue + } + + for j := range dBuf { + dBuf[j] = byte(notPresentBase + j%notPresentLen) + } + g, gotErr := Decode(dBuf[:], input) + if got := string(g); got != tc.want || gotErr != tc.wantErr { + t.Errorf("#%d (%s):\ngot %q, %v\nwant %q, %v", + i, tc.desc, got, gotErr, tc.want, tc.wantErr) + continue + } + for j, x := range dBuf { + if uint64(j) < dLen { + continue + } + if w := byte(notPresentBase + j%notPresentLen); x != w { + t.Errorf("#%d (%s): Decode overrun: dBuf[%d] was modified: got %#02x, want %#02x\ndBuf: % x", + i, tc.desc, j, x, w, dBuf) + continue loop + } + } + } +} + +func TestDecodeCopy4(t *testing.T) { + dots := strings.Repeat(".", 65536) + + input := strings.Join([]string{ + "\x89\x80\x04", // decodedLen = 65545. + "\x0cpqrs", // 4-byte literal "pqrs". + "\xf4\xff\xff" + dots, // 65536-byte literal dots. + "\x13\x04\x00\x01\x00", // tagCopy4; length=5 offset=65540. + }, "") + + gotBytes, err := Decode(nil, []byte(input)) + if err != nil { + t.Fatal(err) + } + got := string(gotBytes) + want := "pqrs" + dots + "pqrs." + if len(got) != len(want) { + t.Fatalf("got %d bytes, want %d", len(got), len(want)) + } + if got != want { + for i := 0; i < len(got); i++ { + if g, w := got[i], want[i]; g != w { + t.Fatalf("byte #%d: got %#02x, want %#02x", i, g, w) + } + } + } +} + +// TestDecodeLengthOffset tests decoding an encoding of the form literal + +// copy-length-offset + literal. For example: "abcdefghijkl" + "efghij" + "AB". +func TestDecodeLengthOffset(t *testing.T) { + const ( + prefix = "abcdefghijklmnopqr" + suffix = "ABCDEFGHIJKLMNOPQR" + + // notPresentXxx defines a range of byte values [0xa0, 0xc5) that are + // not present in either the input or the output. It is written to + // gotBuf to check that Decode does not write bytes past the end of + // gotBuf[:totalLen]. + // + // The magic number 37 was chosen because it is prime. A more 'natural' + // number like 32 might lead to a false negative if, for example, a + // byte was incorrectly copied 4*8 bytes later. + notPresentBase = 0xa0 + notPresentLen = 37 + ) + var gotBuf, wantBuf, inputBuf [128]byte + for length := 1; length <= 18; length++ { + for offset := 1; offset <= 18; offset++ { + loop: + for suffixLen := 0; suffixLen <= 18; suffixLen++ { + totalLen := len(prefix) + length + suffixLen + + inputLen := binary.PutUvarint(inputBuf[:], uint64(totalLen)) + inputBuf[inputLen] = tagLiteral + 4*byte(len(prefix)-1) + inputLen++ + inputLen += copy(inputBuf[inputLen:], prefix) + inputBuf[inputLen+0] = tagCopy2 + 4*byte(length-1) + inputBuf[inputLen+1] = byte(offset) + inputBuf[inputLen+2] = 0x00 + inputLen += 3 + if suffixLen > 0 { + inputBuf[inputLen] = tagLiteral + 4*byte(suffixLen-1) + inputLen++ + inputLen += copy(inputBuf[inputLen:], suffix[:suffixLen]) + } + input := inputBuf[:inputLen] + + for i := range gotBuf { + gotBuf[i] = byte(notPresentBase + i%notPresentLen) + } + got, err := Decode(gotBuf[:], input) + if err != nil { + t.Errorf("length=%d, offset=%d; suffixLen=%d: %v", length, offset, suffixLen, err) + continue + } + + wantLen := 0 + wantLen += copy(wantBuf[wantLen:], prefix) + for i := 0; i < length; i++ { + wantBuf[wantLen] = wantBuf[wantLen-offset] + wantLen++ + } + wantLen += copy(wantBuf[wantLen:], suffix[:suffixLen]) + want := wantBuf[:wantLen] + + for _, x := range input { + if notPresentBase <= x && x < notPresentBase+notPresentLen { + t.Errorf("length=%d, offset=%d; suffixLen=%d: input shouldn't contain %#02x\ninput: % x", + length, offset, suffixLen, x, input) + continue loop + } + } + for i, x := range gotBuf { + if i < totalLen { + continue + } + if w := byte(notPresentBase + i%notPresentLen); x != w { + t.Errorf("length=%d, offset=%d; suffixLen=%d; totalLen=%d: "+ + "Decode overrun: gotBuf[%d] was modified: got %#02x, want %#02x\ngotBuf: % x", + length, offset, suffixLen, totalLen, i, x, w, gotBuf) + continue loop + } + } + for _, x := range want { + if notPresentBase <= x && x < notPresentBase+notPresentLen { + t.Errorf("length=%d, offset=%d; suffixLen=%d: want shouldn't contain %#02x\nwant: % x", + length, offset, suffixLen, x, want) + continue loop + } + } + + if !bytes.Equal(got, want) { + t.Errorf("length=%d, offset=%d; suffixLen=%d:\ninput % x\ngot % x\nwant % x", + length, offset, suffixLen, input, got, want) + continue + } + } + } + } +} + +const ( + goldenText = "Mark.Twain-Tom.Sawyer.txt" + goldenCompressed = goldenText + ".rawsnappy" +) + +func TestDecodeGoldenInput(t *testing.T) { + tDir := filepath.FromSlash(*testdataDir) + src, err := ioutil.ReadFile(filepath.Join(tDir, goldenCompressed)) + if err != nil { + t.Fatalf("ReadFile: %v", err) + } + got, err := Decode(nil, src) + if err != nil { + t.Fatalf("Decode: %v", err) + } + want, err := ioutil.ReadFile(filepath.Join(tDir, goldenText)) + if err != nil { + t.Fatalf("ReadFile: %v", err) + } + if err := cmp(got, want); err != nil { + t.Fatal(err) + } +} + +func TestEncodeGoldenInput(t *testing.T) { + tDir := filepath.FromSlash(*testdataDir) + src, err := ioutil.ReadFile(filepath.Join(tDir, goldenText)) + if err != nil { + t.Fatalf("ReadFile: %v", err) + } + got := Encode(nil, src) + want, err := ioutil.ReadFile(filepath.Join(tDir, goldenCompressed)) + if err != nil { + t.Fatalf("ReadFile: %v", err) + } + if err := cmp(got, want); err != nil { + t.Fatal(err) + } +} + +func TestExtendMatchGoldenInput(t *testing.T) { + tDir := filepath.FromSlash(*testdataDir) + src, err := ioutil.ReadFile(filepath.Join(tDir, goldenText)) + if err != nil { + t.Fatalf("ReadFile: %v", err) + } + for i, tc := range extendMatchGoldenTestCases { + got := extendMatch(src, tc.i, tc.j) + if got != tc.want { + t.Errorf("test #%d: i, j = %5d, %5d: got %5d (= j + %6d), want %5d (= j + %6d)", + i, tc.i, tc.j, got, got-tc.j, tc.want, tc.want-tc.j) + } + } +} + +func TestExtendMatch(t *testing.T) { + // ref is a simple, reference implementation of extendMatch. + ref := func(src []byte, i, j int) int { + for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { + } + return j + } + + nums := []int{0, 1, 2, 7, 8, 9, 29, 30, 31, 32, 33, 34, 38, 39, 40} + for yIndex := 40; yIndex > 30; yIndex-- { + xxx := bytes.Repeat([]byte("x"), 40) + if yIndex < len(xxx) { + xxx[yIndex] = 'y' + } + for _, i := range nums { + for _, j := range nums { + if i >= j { + continue + } + got := extendMatch(xxx, i, j) + want := ref(xxx, i, j) + if got != want { + t.Errorf("yIndex=%d, i=%d, j=%d: got %d, want %d", yIndex, i, j, got, want) + } + } + } + } +} + +const snappytoolCmdName = "cmd/snappytool/snappytool" + +func skipTestSameEncodingAsCpp() (msg string) { + if !goEncoderShouldMatchCppEncoder { + return fmt.Sprintf("skipping testing that the encoding is byte-for-byte identical to C++: GOARCH=%s", runtime.GOARCH) + } + if _, err := os.Stat(snappytoolCmdName); err != nil { + return fmt.Sprintf("could not find snappytool: %v", err) + } + return "" +} + +func runTestSameEncodingAsCpp(src []byte) error { + got := Encode(nil, src) + + cmd := exec.Command(snappytoolCmdName, "-e") + cmd.Stdin = bytes.NewReader(src) + want, err := cmd.Output() + if err != nil { + return fmt.Errorf("could not run snappytool: %v", err) + } + return cmp(got, want) +} + +func TestSameEncodingAsCppShortCopies(t *testing.T) { + if msg := skipTestSameEncodingAsCpp(); msg != "" { + t.Skip(msg) + } + src := bytes.Repeat([]byte{'a'}, 20) + for i := 0; i <= len(src); i++ { + if err := runTestSameEncodingAsCpp(src[:i]); err != nil { + t.Errorf("i=%d: %v", i, err) + } + } +} + +func TestSameEncodingAsCppLongFiles(t *testing.T) { + if msg := skipTestSameEncodingAsCpp(); msg != "" { + t.Skip(msg) + } + bDir := filepath.FromSlash(*benchdataDir) + failed := false + for i, tf := range testFiles { + if err := downloadBenchmarkFiles(t, tf.filename); err != nil { + t.Fatalf("failed to download testdata: %s", err) + } + data := readFile(t, filepath.Join(bDir, tf.filename)) + if n := tf.sizeLimit; 0 < n && n < len(data) { + data = data[:n] + } + if err := runTestSameEncodingAsCpp(data); err != nil { + t.Errorf("i=%d: %v", i, err) + failed = true + } + } + if failed { + t.Errorf("was the snappytool program built against the C++ snappy library version " + + "d53de187 or later, commited on 2016-04-05? See " + + "https://github.com/google/snappy/commit/d53de18799418e113e44444252a39b12a0e4e0cc") + } +} + +// TestSlowForwardCopyOverrun tests the "expand the pattern" algorithm +// described in decode_amd64.s and its claim of a 10 byte overrun worst case. +func TestSlowForwardCopyOverrun(t *testing.T) { + const base = 100 + + for length := 1; length < 18; length++ { + for offset := 1; offset < 18; offset++ { + highWaterMark := base + d := base + l := length + o := offset + + // makeOffsetAtLeast8 + for o < 8 { + if end := d + 8; highWaterMark < end { + highWaterMark = end + } + l -= o + d += o + o += o + } + + // fixUpSlowForwardCopy + a := d + d += l + + // finishSlowForwardCopy + for l > 0 { + if end := a + 8; highWaterMark < end { + highWaterMark = end + } + a += 8 + l -= 8 + } + + dWant := base + length + overrun := highWaterMark - dWant + if d != dWant || overrun < 0 || 10 < overrun { + t.Errorf("length=%d, offset=%d: d and overrun: got (%d, %d), want (%d, something in [0, 10])", + length, offset, d, overrun, dWant) + } + } + } +} + +// TestEncodeNoiseThenRepeats encodes input for which the first half is very +// incompressible and the second half is very compressible. The encoded form's +// length should be closer to 50% of the original length than 100%. +func TestEncodeNoiseThenRepeats(t *testing.T) { + for _, origLen := range []int{256 * 1024, 2048 * 1024} { + src := make([]byte, origLen) + rng := rand.New(rand.NewSource(1)) + firstHalf, secondHalf := src[:origLen/2], src[origLen/2:] + for i := range firstHalf { + firstHalf[i] = uint8(rng.Intn(256)) + } + for i := range secondHalf { + secondHalf[i] = uint8(i >> 8) + } + dst := Encode(nil, src) + if got, want := len(dst), origLen*3/4; got >= want { + t.Errorf("origLen=%d: got %d encoded bytes, want less than %d", origLen, got, want) + } + } +} + +func TestFramingFormat(t *testing.T) { + // src is comprised of alternating 1e5-sized sequences of random + // (incompressible) bytes and repeated (compressible) bytes. 1e5 was chosen + // because it is larger than maxBlockSize (64k). + src := make([]byte, 1e6) + rng := rand.New(rand.NewSource(1)) + for i := 0; i < 10; i++ { + if i%2 == 0 { + for j := 0; j < 1e5; j++ { + src[1e5*i+j] = uint8(rng.Intn(256)) + } + } else { + for j := 0; j < 1e5; j++ { + src[1e5*i+j] = uint8(i) + } + } + } + + buf := new(bytes.Buffer) + if _, err := NewWriter(buf).Write(src); err != nil { + t.Fatalf("Write: encoding: %v", err) + } + dst, err := ioutil.ReadAll(NewReader(buf)) + if err != nil { + t.Fatalf("ReadAll: decoding: %v", err) + } + if err := cmp(dst, src); err != nil { + t.Fatal(err) + } +} + +func TestWriterGoldenOutput(t *testing.T) { + buf := new(bytes.Buffer) + w := NewBufferedWriter(buf) + defer w.Close() + w.Write([]byte("abcd")) // Not compressible. + w.Flush() + w.Write(bytes.Repeat([]byte{'A'}, 150)) // Compressible. + w.Flush() + // The next chunk is also compressible, but a naive, greedy encoding of the + // overall length 67 copy as a length 64 copy (the longest expressible as a + // tagCopy1 or tagCopy2) plus a length 3 remainder would be two 3-byte + // tagCopy2 tags (6 bytes), since the minimum length for a tagCopy1 is 4 + // bytes. Instead, we could do it shorter, in 5 bytes: a 3-byte tagCopy2 + // (of length 60) and a 2-byte tagCopy1 (of length 7). + w.Write(bytes.Repeat([]byte{'B'}, 68)) + w.Write([]byte("efC")) // Not compressible. + w.Write(bytes.Repeat([]byte{'C'}, 20)) // Compressible. + w.Write(bytes.Repeat([]byte{'B'}, 20)) // Compressible. + w.Write([]byte("g")) // Not compressible. + w.Flush() + + got := buf.String() + want := strings.Join([]string{ + magicChunk, + "\x01\x08\x00\x00", // Uncompressed chunk, 8 bytes long (including 4 byte checksum). + "\x68\x10\xe6\xb6", // Checksum. + "\x61\x62\x63\x64", // Uncompressed payload: "abcd". + "\x00\x11\x00\x00", // Compressed chunk, 17 bytes long (including 4 byte checksum). + "\x5f\xeb\xf2\x10", // Checksum. + "\x96\x01", // Compressed payload: Uncompressed length (varint encoded): 150. + "\x00\x41", // Compressed payload: tagLiteral, length=1, "A". + "\xfe\x01\x00", // Compressed payload: tagCopy2, length=64, offset=1. + "\xfe\x01\x00", // Compressed payload: tagCopy2, length=64, offset=1. + "\x52\x01\x00", // Compressed payload: tagCopy2, length=21, offset=1. + "\x00\x18\x00\x00", // Compressed chunk, 24 bytes long (including 4 byte checksum). + "\x30\x85\x69\xeb", // Checksum. + "\x70", // Compressed payload: Uncompressed length (varint encoded): 112. + "\x00\x42", // Compressed payload: tagLiteral, length=1, "B". + "\xee\x01\x00", // Compressed payload: tagCopy2, length=60, offset=1. + "\x0d\x01", // Compressed payload: tagCopy1, length=7, offset=1. + "\x08\x65\x66\x43", // Compressed payload: tagLiteral, length=3, "efC". + "\x4e\x01\x00", // Compressed payload: tagCopy2, length=20, offset=1. + "\x4e\x5a\x00", // Compressed payload: tagCopy2, length=20, offset=90. + "\x00\x67", // Compressed payload: tagLiteral, length=1, "g". + }, "") + if got != want { + t.Fatalf("\ngot: % x\nwant: % x", got, want) + } +} + +func TestEmitLiteral(t *testing.T) { + testCases := []struct { + length int + want string + }{ + {1, "\x00"}, + {2, "\x04"}, + {59, "\xe8"}, + {60, "\xec"}, + {61, "\xf0\x3c"}, + {62, "\xf0\x3d"}, + {254, "\xf0\xfd"}, + {255, "\xf0\xfe"}, + {256, "\xf0\xff"}, + {257, "\xf4\x00\x01"}, + {65534, "\xf4\xfd\xff"}, + {65535, "\xf4\xfe\xff"}, + {65536, "\xf4\xff\xff"}, + } + + dst := make([]byte, 70000) + nines := bytes.Repeat([]byte{0x99}, 65536) + for _, tc := range testCases { + lit := nines[:tc.length] + n := emitLiteral(dst, lit) + if !bytes.HasSuffix(dst[:n], lit) { + t.Errorf("length=%d: did not end with that many literal bytes", tc.length) + continue + } + got := string(dst[:n-tc.length]) + if got != tc.want { + t.Errorf("length=%d:\ngot % x\nwant % x", tc.length, got, tc.want) + continue + } + } +} + +func TestEmitCopy(t *testing.T) { + testCases := []struct { + offset int + length int + want string + }{ + {8, 04, "\x01\x08"}, + {8, 11, "\x1d\x08"}, + {8, 12, "\x2e\x08\x00"}, + {8, 13, "\x32\x08\x00"}, + {8, 59, "\xea\x08\x00"}, + {8, 60, "\xee\x08\x00"}, + {8, 61, "\xf2\x08\x00"}, + {8, 62, "\xf6\x08\x00"}, + {8, 63, "\xfa\x08\x00"}, + {8, 64, "\xfe\x08\x00"}, + {8, 65, "\xee\x08\x00\x05\x08"}, + {8, 66, "\xee\x08\x00\x09\x08"}, + {8, 67, "\xee\x08\x00\x0d\x08"}, + {8, 68, "\xfe\x08\x00\x01\x08"}, + {8, 69, "\xfe\x08\x00\x05\x08"}, + {8, 80, "\xfe\x08\x00\x3e\x08\x00"}, + + {256, 04, "\x21\x00"}, + {256, 11, "\x3d\x00"}, + {256, 12, "\x2e\x00\x01"}, + {256, 13, "\x32\x00\x01"}, + {256, 59, "\xea\x00\x01"}, + {256, 60, "\xee\x00\x01"}, + {256, 61, "\xf2\x00\x01"}, + {256, 62, "\xf6\x00\x01"}, + {256, 63, "\xfa\x00\x01"}, + {256, 64, "\xfe\x00\x01"}, + {256, 65, "\xee\x00\x01\x25\x00"}, + {256, 66, "\xee\x00\x01\x29\x00"}, + {256, 67, "\xee\x00\x01\x2d\x00"}, + {256, 68, "\xfe\x00\x01\x21\x00"}, + {256, 69, "\xfe\x00\x01\x25\x00"}, + {256, 80, "\xfe\x00\x01\x3e\x00\x01"}, + + {2048, 04, "\x0e\x00\x08"}, + {2048, 11, "\x2a\x00\x08"}, + {2048, 12, "\x2e\x00\x08"}, + {2048, 13, "\x32\x00\x08"}, + {2048, 59, "\xea\x00\x08"}, + {2048, 60, "\xee\x00\x08"}, + {2048, 61, "\xf2\x00\x08"}, + {2048, 62, "\xf6\x00\x08"}, + {2048, 63, "\xfa\x00\x08"}, + {2048, 64, "\xfe\x00\x08"}, + {2048, 65, "\xee\x00\x08\x12\x00\x08"}, + {2048, 66, "\xee\x00\x08\x16\x00\x08"}, + {2048, 67, "\xee\x00\x08\x1a\x00\x08"}, + {2048, 68, "\xfe\x00\x08\x0e\x00\x08"}, + {2048, 69, "\xfe\x00\x08\x12\x00\x08"}, + {2048, 80, "\xfe\x00\x08\x3e\x00\x08"}, + } + + dst := make([]byte, 1024) + for _, tc := range testCases { + n := emitCopy(dst, tc.offset, tc.length) + got := string(dst[:n]) + if got != tc.want { + t.Errorf("offset=%d, length=%d:\ngot % x\nwant % x", tc.offset, tc.length, got, tc.want) + } + } +} + +func TestNewBufferedWriter(t *testing.T) { + // Test all 32 possible sub-sequences of these 5 input slices. + // + // Their lengths sum to 400,000, which is over 6 times the Writer ibuf + // capacity: 6 * maxBlockSize is 393,216. + inputs := [][]byte{ + bytes.Repeat([]byte{'a'}, 40000), + bytes.Repeat([]byte{'b'}, 150000), + bytes.Repeat([]byte{'c'}, 60000), + bytes.Repeat([]byte{'d'}, 120000), + bytes.Repeat([]byte{'e'}, 30000), + } +loop: + for i := 0; i < 1< 0; { + i := copy(x, src) + x = x[i:] + } + return dst +} + +func benchWords(b *testing.B, n int, decode bool) { + // Note: the file is OS-language dependent so the resulting values are not + // directly comparable for non-US-English OS installations. + data := expand(readFile(b, "/usr/share/dict/words"), n) + if decode { + benchDecode(b, data) + } else { + benchEncode(b, data) + } +} + +func BenchmarkWordsDecode1e1(b *testing.B) { benchWords(b, 1e1, true) } +func BenchmarkWordsDecode1e2(b *testing.B) { benchWords(b, 1e2, true) } +func BenchmarkWordsDecode1e3(b *testing.B) { benchWords(b, 1e3, true) } +func BenchmarkWordsDecode1e4(b *testing.B) { benchWords(b, 1e4, true) } +func BenchmarkWordsDecode1e5(b *testing.B) { benchWords(b, 1e5, true) } +func BenchmarkWordsDecode1e6(b *testing.B) { benchWords(b, 1e6, true) } +func BenchmarkWordsEncode1e1(b *testing.B) { benchWords(b, 1e1, false) } +func BenchmarkWordsEncode1e2(b *testing.B) { benchWords(b, 1e2, false) } +func BenchmarkWordsEncode1e3(b *testing.B) { benchWords(b, 1e3, false) } +func BenchmarkWordsEncode1e4(b *testing.B) { benchWords(b, 1e4, false) } +func BenchmarkWordsEncode1e5(b *testing.B) { benchWords(b, 1e5, false) } +func BenchmarkWordsEncode1e6(b *testing.B) { benchWords(b, 1e6, false) } + +func BenchmarkRandomEncode(b *testing.B) { + rng := rand.New(rand.NewSource(1)) + data := make([]byte, 1<<20) + for i := range data { + data[i] = uint8(rng.Intn(256)) + } + benchEncode(b, data) +} + +// testFiles' values are copied directly from +// https://raw.githubusercontent.com/google/snappy/master/snappy_unittest.cc +// The label field is unused in snappy-go. +var testFiles = []struct { + label string + filename string + sizeLimit int +}{ + {"html", "html", 0}, + {"urls", "urls.10K", 0}, + {"jpg", "fireworks.jpeg", 0}, + {"jpg_200", "fireworks.jpeg", 200}, + {"pdf", "paper-100k.pdf", 0}, + {"html4", "html_x_4", 0}, + {"txt1", "alice29.txt", 0}, + {"txt2", "asyoulik.txt", 0}, + {"txt3", "lcet10.txt", 0}, + {"txt4", "plrabn12.txt", 0}, + {"pb", "geo.protodata", 0}, + {"gaviota", "kppkn.gtb", 0}, +} + +const ( + // The benchmark data files are at this canonical URL. + benchURL = "https://raw.githubusercontent.com/google/snappy/master/testdata/" +) + +func downloadBenchmarkFiles(b testing.TB, basename string) (errRet error) { + bDir := filepath.FromSlash(*benchdataDir) + filename := filepath.Join(bDir, basename) + if stat, err := os.Stat(filename); err == nil && stat.Size() != 0 { + return nil + } + + if !*download { + b.Skipf("test data not found; skipping %s without the -download flag", testOrBenchmark(b)) + } + // Download the official snappy C++ implementation reference test data + // files for benchmarking. + if err := os.MkdirAll(bDir, 0777); err != nil && !os.IsExist(err) { + return fmt.Errorf("failed to create %s: %s", bDir, err) + } + + f, err := os.Create(filename) + if err != nil { + return fmt.Errorf("failed to create %s: %s", filename, err) + } + defer f.Close() + defer func() { + if errRet != nil { + os.Remove(filename) + } + }() + url := benchURL + basename + resp, err := http.Get(url) + if err != nil { + return fmt.Errorf("failed to download %s: %s", url, err) + } + defer resp.Body.Close() + if s := resp.StatusCode; s != http.StatusOK { + return fmt.Errorf("downloading %s: HTTP status code %d (%s)", url, s, http.StatusText(s)) + } + _, err = io.Copy(f, resp.Body) + if err != nil { + return fmt.Errorf("failed to download %s to %s: %s", url, filename, err) + } + return nil +} + +func benchFile(b *testing.B, i int, decode bool) { + if err := downloadBenchmarkFiles(b, testFiles[i].filename); err != nil { + b.Fatalf("failed to download testdata: %s", err) + } + bDir := filepath.FromSlash(*benchdataDir) + data := readFile(b, filepath.Join(bDir, testFiles[i].filename)) + if n := testFiles[i].sizeLimit; 0 < n && n < len(data) { + data = data[:n] + } + if decode { + benchDecode(b, data) + } else { + benchEncode(b, data) + } +} + +// Naming convention is kept similar to what snappy's C++ implementation uses. +func Benchmark_UFlat0(b *testing.B) { benchFile(b, 0, true) } +func Benchmark_UFlat1(b *testing.B) { benchFile(b, 1, true) } +func Benchmark_UFlat2(b *testing.B) { benchFile(b, 2, true) } +func Benchmark_UFlat3(b *testing.B) { benchFile(b, 3, true) } +func Benchmark_UFlat4(b *testing.B) { benchFile(b, 4, true) } +func Benchmark_UFlat5(b *testing.B) { benchFile(b, 5, true) } +func Benchmark_UFlat6(b *testing.B) { benchFile(b, 6, true) } +func Benchmark_UFlat7(b *testing.B) { benchFile(b, 7, true) } +func Benchmark_UFlat8(b *testing.B) { benchFile(b, 8, true) } +func Benchmark_UFlat9(b *testing.B) { benchFile(b, 9, true) } +func Benchmark_UFlat10(b *testing.B) { benchFile(b, 10, true) } +func Benchmark_UFlat11(b *testing.B) { benchFile(b, 11, true) } +func Benchmark_ZFlat0(b *testing.B) { benchFile(b, 0, false) } +func Benchmark_ZFlat1(b *testing.B) { benchFile(b, 1, false) } +func Benchmark_ZFlat2(b *testing.B) { benchFile(b, 2, false) } +func Benchmark_ZFlat3(b *testing.B) { benchFile(b, 3, false) } +func Benchmark_ZFlat4(b *testing.B) { benchFile(b, 4, false) } +func Benchmark_ZFlat5(b *testing.B) { benchFile(b, 5, false) } +func Benchmark_ZFlat6(b *testing.B) { benchFile(b, 6, false) } +func Benchmark_ZFlat7(b *testing.B) { benchFile(b, 7, false) } +func Benchmark_ZFlat8(b *testing.B) { benchFile(b, 8, false) } +func Benchmark_ZFlat9(b *testing.B) { benchFile(b, 9, false) } +func Benchmark_ZFlat10(b *testing.B) { benchFile(b, 10, false) } +func Benchmark_ZFlat11(b *testing.B) { benchFile(b, 11, false) } + +func BenchmarkExtendMatch(b *testing.B) { + tDir := filepath.FromSlash(*testdataDir) + src, err := ioutil.ReadFile(filepath.Join(tDir, goldenText)) + if err != nil { + b.Fatalf("ReadFile: %v", err) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + for _, tc := range extendMatchGoldenTestCases { + extendMatch(src, tc.i, tc.j) + } + } +} diff --git a/vendor/github.com/golang/snappy/testdata/Mark.Twain-Tom.Sawyer.txt b/vendor/github.com/golang/snappy/testdata/Mark.Twain-Tom.Sawyer.txt new file mode 100644 index 0000000..86a1875 --- /dev/null +++ b/vendor/github.com/golang/snappy/testdata/Mark.Twain-Tom.Sawyer.txt @@ -0,0 +1,396 @@ +Produced by David Widger. The previous edition was updated by Jose +Menendez. + + + + + + THE ADVENTURES OF TOM SAWYER + BY + MARK TWAIN + (Samuel Langhorne Clemens) + + + + + P R E F A C E + +MOST of the adventures recorded in this book really occurred; one or +two were experiences of my own, the rest those of boys who were +schoolmates of mine. Huck Finn is drawn from life; Tom Sawyer also, but +not from an individual--he is a combination of the characteristics of +three boys whom I knew, and therefore belongs to the composite order of +architecture. + +The odd superstitions touched upon were all prevalent among children +and slaves in the West at the period of this story--that is to say, +thirty or forty years ago. + +Although my book is intended mainly for the entertainment of boys and +girls, I hope it will not be shunned by men and women on that account, +for part of my plan has been to try to pleasantly remind adults of what +they once were themselves, and of how they felt and thought and talked, +and what queer enterprises they sometimes engaged in. + + THE AUTHOR. + +HARTFORD, 1876. + + + + T O M S A W Y E R + + + +CHAPTER I + +"TOM!" + +No answer. + +"TOM!" + +No answer. + +"What's gone with that boy, I wonder? You TOM!" + +No answer. + +The old lady pulled her spectacles down and looked over them about the +room; then she put them up and looked out under them. She seldom or +never looked THROUGH them for so small a thing as a boy; they were her +state pair, the pride of her heart, and were built for "style," not +service--she could have seen through a pair of stove-lids just as well. +She looked perplexed for a moment, and then said, not fiercely, but +still loud enough for the furniture to hear: + +"Well, I lay if I get hold of you I'll--" + +She did not finish, for by this time she was bending down and punching +under the bed with the broom, and so she needed breath to punctuate the +punches with. She resurrected nothing but the cat. + +"I never did see the beat of that boy!" + +She went to the open door and stood in it and looked out among the +tomato vines and "jimpson" weeds that constituted the garden. No Tom. +So she lifted up her voice at an angle calculated for distance and +shouted: + +"Y-o-u-u TOM!" + +There was a slight noise behind her and she turned just in time to +seize a small boy by the slack of his roundabout and arrest his flight. + +"There! I might 'a' thought of that closet. What you been doing in +there?" + +"Nothing." + +"Nothing! Look at your hands. And look at your mouth. What IS that +truck?" + +"I don't know, aunt." + +"Well, I know. It's jam--that's what it is. Forty times I've said if +you didn't let that jam alone I'd skin you. Hand me that switch." + +The switch hovered in the air--the peril was desperate-- + +"My! Look behind you, aunt!" + +The old lady whirled round, and snatched her skirts out of danger. The +lad fled on the instant, scrambled up the high board-fence, and +disappeared over it. + +His aunt Polly stood surprised a moment, and then broke into a gentle +laugh. + +"Hang the boy, can't I never learn anything? Ain't he played me tricks +enough like that for me to be looking out for him by this time? But old +fools is the biggest fools there is. Can't learn an old dog new tricks, +as the saying is. But my goodness, he never plays them alike, two days, +and how is a body to know what's coming? He 'pears to know just how +long he can torment me before I get my dander up, and he knows if he +can make out to put me off for a minute or make me laugh, it's all down +again and I can't hit him a lick. I ain't doing my duty by that boy, +and that's the Lord's truth, goodness knows. Spare the rod and spile +the child, as the Good Book says. I'm a laying up sin and suffering for +us both, I know. He's full of the Old Scratch, but laws-a-me! he's my +own dead sister's boy, poor thing, and I ain't got the heart to lash +him, somehow. Every time I let him off, my conscience does hurt me so, +and every time I hit him my old heart most breaks. Well-a-well, man +that is born of woman is of few days and full of trouble, as the +Scripture says, and I reckon it's so. He'll play hookey this evening, * +and [* Southwestern for "afternoon"] I'll just be obleeged to make him +work, to-morrow, to punish him. It's mighty hard to make him work +Saturdays, when all the boys is having holiday, but he hates work more +than he hates anything else, and I've GOT to do some of my duty by him, +or I'll be the ruination of the child." + +Tom did play hookey, and he had a very good time. He got back home +barely in season to help Jim, the small colored boy, saw next-day's +wood and split the kindlings before supper--at least he was there in +time to tell his adventures to Jim while Jim did three-fourths of the +work. Tom's younger brother (or rather half-brother) Sid was already +through with his part of the work (picking up chips), for he was a +quiet boy, and had no adventurous, troublesome ways. + +While Tom was eating his supper, and stealing sugar as opportunity +offered, Aunt Polly asked him questions that were full of guile, and +very deep--for she wanted to trap him into damaging revealments. Like +many other simple-hearted souls, it was her pet vanity to believe she +was endowed with a talent for dark and mysterious diplomacy, and she +loved to contemplate her most transparent devices as marvels of low +cunning. Said she: + +"Tom, it was middling warm in school, warn't it?" + +"Yes'm." + +"Powerful warm, warn't it?" + +"Yes'm." + +"Didn't you want to go in a-swimming, Tom?" + +A bit of a scare shot through Tom--a touch of uncomfortable suspicion. +He searched Aunt Polly's face, but it told him nothing. So he said: + +"No'm--well, not very much." + +The old lady reached out her hand and felt Tom's shirt, and said: + +"But you ain't too warm now, though." And it flattered her to reflect +that she had discovered that the shirt was dry without anybody knowing +that that was what she had in her mind. But in spite of her, Tom knew +where the wind lay, now. So he forestalled what might be the next move: + +"Some of us pumped on our heads--mine's damp yet. See?" + +Aunt Polly was vexed to think she had overlooked that bit of +circumstantial evidence, and missed a trick. Then she had a new +inspiration: + +"Tom, you didn't have to undo your shirt collar where I sewed it, to +pump on your head, did you? Unbutton your jacket!" + +The trouble vanished out of Tom's face. He opened his jacket. His +shirt collar was securely sewed. + +"Bother! Well, go 'long with you. I'd made sure you'd played hookey +and been a-swimming. But I forgive ye, Tom. I reckon you're a kind of a +singed cat, as the saying is--better'n you look. THIS time." + +She was half sorry her sagacity had miscarried, and half glad that Tom +had stumbled into obedient conduct for once. + +But Sidney said: + +"Well, now, if I didn't think you sewed his collar with white thread, +but it's black." + +"Why, I did sew it with white! Tom!" + +But Tom did not wait for the rest. As he went out at the door he said: + +"Siddy, I'll lick you for that." + +In a safe place Tom examined two large needles which were thrust into +the lapels of his jacket, and had thread bound about them--one needle +carried white thread and the other black. He said: + +"She'd never noticed if it hadn't been for Sid. Confound it! sometimes +she sews it with white, and sometimes she sews it with black. I wish to +geeminy she'd stick to one or t'other--I can't keep the run of 'em. But +I bet you I'll lam Sid for that. I'll learn him!" + +He was not the Model Boy of the village. He knew the model boy very +well though--and loathed him. + +Within two minutes, or even less, he had forgotten all his troubles. +Not because his troubles were one whit less heavy and bitter to him +than a man's are to a man, but because a new and powerful interest bore +them down and drove them out of his mind for the time--just as men's +misfortunes are forgotten in the excitement of new enterprises. This +new interest was a valued novelty in whistling, which he had just +acquired from a negro, and he was suffering to practise it undisturbed. +It consisted in a peculiar bird-like turn, a sort of liquid warble, +produced by touching the tongue to the roof of the mouth at short +intervals in the midst of the music--the reader probably remembers how +to do it, if he has ever been a boy. Diligence and attention soon gave +him the knack of it, and he strode down the street with his mouth full +of harmony and his soul full of gratitude. He felt much as an +astronomer feels who has discovered a new planet--no doubt, as far as +strong, deep, unalloyed pleasure is concerned, the advantage was with +the boy, not the astronomer. + +The summer evenings were long. It was not dark, yet. Presently Tom +checked his whistle. A stranger was before him--a boy a shade larger +than himself. A new-comer of any age or either sex was an impressive +curiosity in the poor little shabby village of St. Petersburg. This boy +was well dressed, too--well dressed on a week-day. This was simply +astounding. His cap was a dainty thing, his close-buttoned blue cloth +roundabout was new and natty, and so were his pantaloons. He had shoes +on--and it was only Friday. He even wore a necktie, a bright bit of +ribbon. He had a citified air about him that ate into Tom's vitals. The +more Tom stared at the splendid marvel, the higher he turned up his +nose at his finery and the shabbier and shabbier his own outfit seemed +to him to grow. Neither boy spoke. If one moved, the other moved--but +only sidewise, in a circle; they kept face to face and eye to eye all +the time. Finally Tom said: + +"I can lick you!" + +"I'd like to see you try it." + +"Well, I can do it." + +"No you can't, either." + +"Yes I can." + +"No you can't." + +"I can." + +"You can't." + +"Can!" + +"Can't!" + +An uncomfortable pause. Then Tom said: + +"What's your name?" + +"'Tisn't any of your business, maybe." + +"Well I 'low I'll MAKE it my business." + +"Well why don't you?" + +"If you say much, I will." + +"Much--much--MUCH. There now." + +"Oh, you think you're mighty smart, DON'T you? I could lick you with +one hand tied behind me, if I wanted to." + +"Well why don't you DO it? You SAY you can do it." + +"Well I WILL, if you fool with me." + +"Oh yes--I've seen whole families in the same fix." + +"Smarty! You think you're SOME, now, DON'T you? Oh, what a hat!" + +"You can lump that hat if you don't like it. I dare you to knock it +off--and anybody that'll take a dare will suck eggs." + +"You're a liar!" + +"You're another." + +"You're a fighting liar and dasn't take it up." + +"Aw--take a walk!" + +"Say--if you give me much more of your sass I'll take and bounce a +rock off'n your head." + +"Oh, of COURSE you will." + +"Well I WILL." + +"Well why don't you DO it then? What do you keep SAYING you will for? +Why don't you DO it? It's because you're afraid." + +"I AIN'T afraid." + +"You are." + +"I ain't." + +"You are." + +Another pause, and more eying and sidling around each other. Presently +they were shoulder to shoulder. Tom said: + +"Get away from here!" + +"Go away yourself!" + +"I won't." + +"I won't either." + +So they stood, each with a foot placed at an angle as a brace, and +both shoving with might and main, and glowering at each other with +hate. But neither could get an advantage. After struggling till both +were hot and flushed, each relaxed his strain with watchful caution, +and Tom said: + +"You're a coward and a pup. I'll tell my big brother on you, and he +can thrash you with his little finger, and I'll make him do it, too." + +"What do I care for your big brother? I've got a brother that's bigger +than he is--and what's more, he can throw him over that fence, too." +[Both brothers were imaginary.] + +"That's a lie." + +"YOUR saying so don't make it so." + +Tom drew a line in the dust with his big toe, and said: + +"I dare you to step over that, and I'll lick you till you can't stand +up. Anybody that'll take a dare will steal sheep." + +The new boy stepped over promptly, and said: + +"Now you said you'd do it, now let's see you do it." + +"Don't you crowd me now; you better look out." + +"Well, you SAID you'd do it--why don't you do it?" + +"By jingo! for two cents I WILL do it." + +The new boy took two broad coppers out of his pocket and held them out +with derision. Tom struck them to the ground. In an instant both boys +were rolling and tumbling in the dirt, gripped together like cats; and +for the space of a minute they tugged and tore at each other's hair and +clothes, punched and scratched each other's nose, and covered +themselves with dust and glory. Presently the confusion took form, and +through the fog of battle Tom appeared, seated astride the new boy, and +pounding him with his fists. "Holler 'nuff!" said he. + +The boy only struggled to free himself. He was crying--mainly from rage. + +"Holler 'nuff!"--and the pounding went on. + +At last the stranger got out a smothered "'Nuff!" and Tom let him up +and said: + +"Now that'll learn you. Better look out who you're fooling with next +time." + +The new boy went off brushing the dust from his clothes, sobbing, +snuffling, and occasionally looking back and shaking his head and +threatening what he would do to Tom the "next time he caught him out." +To which Tom responded with jeers, and started off in high feather, and +as soon as his back was turned the new boy snatched up a stone, threw +it and hit him between the shoulders and then turned tail and ran like +an antelope. Tom chased the traitor home, and thus found out where he +lived. He then held a position at the gate for some time, daring the +enemy to come outside, but the enemy only made faces at him through the +window and declined. At last the enemy's mother appeared, and called +Tom a bad, vicious, vulgar child, and ordered him away. So he went +away; but he said he "'lowed" to "lay" for that boy. + +He got home pretty late that night, and when he climbed cautiously in +at the window, he uncovered an ambuscade, in the person of his aunt; +and when she saw the state his clothes were in her resolution to turn +his Saturday holiday into captivity at hard labor became adamantine in +its firmness. diff --git a/vendor/github.com/golang/snappy/testdata/Mark.Twain-Tom.Sawyer.txt.rawsnappy b/vendor/github.com/golang/snappy/testdata/Mark.Twain-Tom.Sawyer.txt.rawsnappy new file mode 100644 index 0000000..9c56d98 Binary files /dev/null and b/vendor/github.com/golang/snappy/testdata/Mark.Twain-Tom.Sawyer.txt.rawsnappy differ diff --git a/vendor/github.com/satori/go.uuid/.travis.yml b/vendor/github.com/satori/go.uuid/.travis.yml deleted file mode 100644 index 20dd53b..0000000 --- a/vendor/github.com/satori/go.uuid/.travis.yml +++ /dev/null @@ -1,23 +0,0 @@ -language: go -sudo: false -go: - - 1.2 - - 1.3 - - 1.4 - - 1.5 - - 1.6 - - 1.7 - - 1.8 - - 1.9 - - tip -matrix: - allow_failures: - - go: tip - fast_finish: true -before_install: - - go get github.com/mattn/goveralls - - go get golang.org/x/tools/cmd/cover -script: - - $HOME/gopath/bin/goveralls -service=travis-ci -notifications: - email: false diff --git a/vendor/github.com/satori/go.uuid/LICENSE b/vendor/github.com/satori/go.uuid/LICENSE deleted file mode 100644 index 926d549..0000000 --- a/vendor/github.com/satori/go.uuid/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -Copyright (C) 2013-2018 by Maxim Bublis - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/satori/go.uuid/README.md b/vendor/github.com/satori/go.uuid/README.md deleted file mode 100644 index 7b1a722..0000000 --- a/vendor/github.com/satori/go.uuid/README.md +++ /dev/null @@ -1,65 +0,0 @@ -# UUID package for Go language - -[![Build Status](https://travis-ci.org/satori/go.uuid.png?branch=master)](https://travis-ci.org/satori/go.uuid) -[![Coverage Status](https://coveralls.io/repos/github/satori/go.uuid/badge.svg?branch=master)](https://coveralls.io/github/satori/go.uuid) -[![GoDoc](http://godoc.org/github.com/satori/go.uuid?status.png)](http://godoc.org/github.com/satori/go.uuid) - -This package provides pure Go implementation of Universally Unique Identifier (UUID). Supported both creation and parsing of UUIDs. - -With 100% test coverage and benchmarks out of box. - -Supported versions: -* Version 1, based on timestamp and MAC address (RFC 4122) -* Version 2, based on timestamp, MAC address and POSIX UID/GID (DCE 1.1) -* Version 3, based on MD5 hashing (RFC 4122) -* Version 4, based on random numbers (RFC 4122) -* Version 5, based on SHA-1 hashing (RFC 4122) - -## Installation - -Use the `go` command: - - $ go get github.com/satori/go.uuid - -## Requirements - -UUID package requires Go >= 1.2. - -## Example - -```go -package main - -import ( - "fmt" - "github.com/satori/go.uuid" -) - -func main() { - // Creating UUID Version 4 - u1 := uuid.NewV4() - fmt.Printf("UUIDv4: %s\n", u1) - - // Parsing UUID from string input - u2, err := uuid.FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8") - if err != nil { - fmt.Printf("Something gone wrong: %s", err) - } - fmt.Printf("Successfully parsed: %s", u2) -} -``` - -## Documentation - -[Documentation](http://godoc.org/github.com/satori/go.uuid) is hosted at GoDoc project. - -## Links -* [RFC 4122](http://tools.ietf.org/html/rfc4122) -* [DCE 1.1: Authentication and Security Services](http://pubs.opengroup.org/onlinepubs/9696989899/chap5.htm#tagcjh_08_02_01_01) - -## Copyright - -Copyright (C) 2013-2018 by Maxim Bublis . - -UUID package released under MIT License. -See [LICENSE](https://github.com/satori/go.uuid/blob/master/LICENSE) for details. diff --git a/vendor/github.com/satori/go.uuid/codec.go b/vendor/github.com/satori/go.uuid/codec.go deleted file mode 100644 index 656892c..0000000 --- a/vendor/github.com/satori/go.uuid/codec.go +++ /dev/null @@ -1,206 +0,0 @@ -// Copyright (C) 2013-2018 by Maxim Bublis -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -package uuid - -import ( - "bytes" - "encoding/hex" - "fmt" -) - -// FromBytes returns UUID converted from raw byte slice input. -// It will return error if the slice isn't 16 bytes long. -func FromBytes(input []byte) (u UUID, err error) { - err = u.UnmarshalBinary(input) - return -} - -// FromBytesOrNil returns UUID converted from raw byte slice input. -// Same behavior as FromBytes, but returns a Nil UUID on error. -func FromBytesOrNil(input []byte) UUID { - uuid, err := FromBytes(input) - if err != nil { - return Nil - } - return uuid -} - -// FromString returns UUID parsed from string input. -// Input is expected in a form accepted by UnmarshalText. -func FromString(input string) (u UUID, err error) { - err = u.UnmarshalText([]byte(input)) - return -} - -// FromStringOrNil returns UUID parsed from string input. -// Same behavior as FromString, but returns a Nil UUID on error. -func FromStringOrNil(input string) UUID { - uuid, err := FromString(input) - if err != nil { - return Nil - } - return uuid -} - -// MarshalText implements the encoding.TextMarshaler interface. -// The encoding is the same as returned by String. -func (u UUID) MarshalText() (text []byte, err error) { - text = []byte(u.String()) - return -} - -// UnmarshalText implements the encoding.TextUnmarshaler interface. -// Following formats are supported: -// "6ba7b810-9dad-11d1-80b4-00c04fd430c8", -// "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}", -// "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8" -// "6ba7b8109dad11d180b400c04fd430c8" -// ABNF for supported UUID text representation follows: -// uuid := canonical | hashlike | braced | urn -// plain := canonical | hashlike -// canonical := 4hexoct '-' 2hexoct '-' 2hexoct '-' 6hexoct -// hashlike := 12hexoct -// braced := '{' plain '}' -// urn := URN ':' UUID-NID ':' plain -// URN := 'urn' -// UUID-NID := 'uuid' -// 12hexoct := 6hexoct 6hexoct -// 6hexoct := 4hexoct 2hexoct -// 4hexoct := 2hexoct 2hexoct -// 2hexoct := hexoct hexoct -// hexoct := hexdig hexdig -// hexdig := '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' | -// 'a' | 'b' | 'c' | 'd' | 'e' | 'f' | -// 'A' | 'B' | 'C' | 'D' | 'E' | 'F' -func (u *UUID) UnmarshalText(text []byte) (err error) { - switch len(text) { - case 32: - return u.decodeHashLike(text) - case 36: - return u.decodeCanonical(text) - case 38: - return u.decodeBraced(text) - case 41: - fallthrough - case 45: - return u.decodeURN(text) - default: - return fmt.Errorf("uuid: incorrect UUID length: %s", text) - } -} - -// decodeCanonical decodes UUID string in format -// "6ba7b810-9dad-11d1-80b4-00c04fd430c8". -func (u *UUID) decodeCanonical(t []byte) (err error) { - if t[8] != '-' || t[13] != '-' || t[18] != '-' || t[23] != '-' { - return fmt.Errorf("uuid: incorrect UUID format %s", t) - } - - src := t[:] - dst := u[:] - - for i, byteGroup := range byteGroups { - if i > 0 { - src = src[1:] // skip dash - } - _, err = hex.Decode(dst[:byteGroup/2], src[:byteGroup]) - if err != nil { - return - } - src = src[byteGroup:] - dst = dst[byteGroup/2:] - } - - return -} - -// decodeHashLike decodes UUID string in format -// "6ba7b8109dad11d180b400c04fd430c8". -func (u *UUID) decodeHashLike(t []byte) (err error) { - src := t[:] - dst := u[:] - - if _, err = hex.Decode(dst, src); err != nil { - return err - } - return -} - -// decodeBraced decodes UUID string in format -// "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}" or in format -// "{6ba7b8109dad11d180b400c04fd430c8}". -func (u *UUID) decodeBraced(t []byte) (err error) { - l := len(t) - - if t[0] != '{' || t[l-1] != '}' { - return fmt.Errorf("uuid: incorrect UUID format %s", t) - } - - return u.decodePlain(t[1 : l-1]) -} - -// decodeURN decodes UUID string in format -// "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8" or in format -// "urn:uuid:6ba7b8109dad11d180b400c04fd430c8". -func (u *UUID) decodeURN(t []byte) (err error) { - total := len(t) - - urn_uuid_prefix := t[:9] - - if !bytes.Equal(urn_uuid_prefix, urnPrefix) { - return fmt.Errorf("uuid: incorrect UUID format: %s", t) - } - - return u.decodePlain(t[9:total]) -} - -// decodePlain decodes UUID string in canonical format -// "6ba7b810-9dad-11d1-80b4-00c04fd430c8" or in hash-like format -// "6ba7b8109dad11d180b400c04fd430c8". -func (u *UUID) decodePlain(t []byte) (err error) { - switch len(t) { - case 32: - return u.decodeHashLike(t) - case 36: - return u.decodeCanonical(t) - default: - return fmt.Errorf("uuid: incorrrect UUID length: %s", t) - } -} - -// MarshalBinary implements the encoding.BinaryMarshaler interface. -func (u UUID) MarshalBinary() (data []byte, err error) { - data = u.Bytes() - return -} - -// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. -// It will return error if the slice isn't 16 bytes long. -func (u *UUID) UnmarshalBinary(data []byte) (err error) { - if len(data) != Size { - err = fmt.Errorf("uuid: UUID must be exactly 16 bytes long, got %d bytes", len(data)) - return - } - copy(u[:], data) - - return -} diff --git a/vendor/github.com/satori/go.uuid/codec_test.go b/vendor/github.com/satori/go.uuid/codec_test.go deleted file mode 100644 index 101ec52..0000000 --- a/vendor/github.com/satori/go.uuid/codec_test.go +++ /dev/null @@ -1,248 +0,0 @@ -// Copyright (C) 2013-2018 by Maxim Bublis -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -package uuid - -import ( - "bytes" - - . "gopkg.in/check.v1" -) - -type codecTestSuite struct{} - -var _ = Suite(&codecTestSuite{}) - -func (s *codecTestSuite) TestFromBytes(c *C) { - u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} - b1 := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} - - u1, err := FromBytes(b1) - c.Assert(err, IsNil) - c.Assert(u1, Equals, u) - - b2 := []byte{} - _, err = FromBytes(b2) - c.Assert(err, NotNil) -} - -func (s *codecTestSuite) BenchmarkFromBytes(c *C) { - bytes := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} - for i := 0; i < c.N; i++ { - FromBytes(bytes) - } -} - -func (s *codecTestSuite) TestMarshalBinary(c *C) { - u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} - b1 := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} - - b2, err := u.MarshalBinary() - c.Assert(err, IsNil) - c.Assert(bytes.Equal(b1, b2), Equals, true) -} - -func (s *codecTestSuite) BenchmarkMarshalBinary(c *C) { - u := NewV4() - for i := 0; i < c.N; i++ { - u.MarshalBinary() - } -} - -func (s *codecTestSuite) TestUnmarshalBinary(c *C) { - u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} - b1 := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} - - u1 := UUID{} - err := u1.UnmarshalBinary(b1) - c.Assert(err, IsNil) - c.Assert(u1, Equals, u) - - b2 := []byte{} - u2 := UUID{} - err = u2.UnmarshalBinary(b2) - c.Assert(err, NotNil) -} - -func (s *codecTestSuite) TestFromString(c *C) { - u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} - - s1 := "6ba7b810-9dad-11d1-80b4-00c04fd430c8" - s2 := "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}" - s3 := "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8" - s4 := "6ba7b8109dad11d180b400c04fd430c8" - s5 := "urn:uuid:6ba7b8109dad11d180b400c04fd430c8" - - _, err := FromString("") - c.Assert(err, NotNil) - - u1, err := FromString(s1) - c.Assert(err, IsNil) - c.Assert(u1, Equals, u) - - u2, err := FromString(s2) - c.Assert(err, IsNil) - c.Assert(u2, Equals, u) - - u3, err := FromString(s3) - c.Assert(err, IsNil) - c.Assert(u3, Equals, u) - - u4, err := FromString(s4) - c.Assert(err, IsNil) - c.Assert(u4, Equals, u) - - u5, err := FromString(s5) - c.Assert(err, IsNil) - c.Assert(u5, Equals, u) -} - -func (s *codecTestSuite) BenchmarkFromString(c *C) { - str := "6ba7b810-9dad-11d1-80b4-00c04fd430c8" - for i := 0; i < c.N; i++ { - FromString(str) - } -} - -func (s *codecTestSuite) BenchmarkFromStringUrn(c *C) { - str := "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8" - for i := 0; i < c.N; i++ { - FromString(str) - } -} - -func (s *codecTestSuite) BenchmarkFromStringWithBrackets(c *C) { - str := "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}" - for i := 0; i < c.N; i++ { - FromString(str) - } -} - -func (s *codecTestSuite) TestFromStringShort(c *C) { - // Invalid 35-character UUID string - s1 := "6ba7b810-9dad-11d1-80b4-00c04fd430c" - - for i := len(s1); i >= 0; i-- { - _, err := FromString(s1[:i]) - c.Assert(err, NotNil) - } -} - -func (s *codecTestSuite) TestFromStringLong(c *C) { - // Invalid 37+ character UUID string - strings := []string{ - "6ba7b810-9dad-11d1-80b4-00c04fd430c8=", - "6ba7b810-9dad-11d1-80b4-00c04fd430c8}", - "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}f", - "6ba7b810-9dad-11d1-80b4-00c04fd430c800c04fd430c8", - } - - for _, str := range strings { - _, err := FromString(str) - c.Assert(err, NotNil) - } -} - -func (s *codecTestSuite) TestFromStringInvalid(c *C) { - // Invalid UUID string formats - strings := []string{ - "6ba7b8109dad11d180b400c04fd430c86ba7b8109dad11d180b400c04fd430c8", - "urn:uuid:{6ba7b810-9dad-11d1-80b4-00c04fd430c8}", - "uuid:urn:6ba7b810-9dad-11d1-80b4-00c04fd430c8", - "uuid:urn:6ba7b8109dad11d180b400c04fd430c8", - "6ba7b8109-dad-11d1-80b4-00c04fd430c8", - "6ba7b810-9dad1-1d1-80b4-00c04fd430c8", - "6ba7b810-9dad-11d18-0b4-00c04fd430c8", - "6ba7b810-9dad-11d1-80b40-0c04fd430c8", - "6ba7b810+9dad+11d1+80b4+00c04fd430c8", - "(6ba7b810-9dad-11d1-80b4-00c04fd430c8}", - "{6ba7b810-9dad-11d1-80b4-00c04fd430c8>", - "zba7b810-9dad-11d1-80b4-00c04fd430c8", - "6ba7b810-9dad11d180b400c04fd430c8", - "6ba7b8109dad-11d180b400c04fd430c8", - "6ba7b8109dad11d1-80b400c04fd430c8", - "6ba7b8109dad11d180b4-00c04fd430c8", - } - - for _, str := range strings { - _, err := FromString(str) - c.Assert(err, NotNil) - } -} - -func (s *codecTestSuite) TestFromStringOrNil(c *C) { - u := FromStringOrNil("") - c.Assert(u, Equals, Nil) -} - -func (s *codecTestSuite) TestFromBytesOrNil(c *C) { - b := []byte{} - u := FromBytesOrNil(b) - c.Assert(u, Equals, Nil) -} - -func (s *codecTestSuite) TestMarshalText(c *C) { - u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} - b1 := []byte("6ba7b810-9dad-11d1-80b4-00c04fd430c8") - - b2, err := u.MarshalText() - c.Assert(err, IsNil) - c.Assert(bytes.Equal(b1, b2), Equals, true) -} - -func (s *codecTestSuite) BenchmarkMarshalText(c *C) { - u := NewV4() - for i := 0; i < c.N; i++ { - u.MarshalText() - } -} - -func (s *codecTestSuite) TestUnmarshalText(c *C) { - u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} - b1 := []byte("6ba7b810-9dad-11d1-80b4-00c04fd430c8") - - u1 := UUID{} - err := u1.UnmarshalText(b1) - c.Assert(err, IsNil) - c.Assert(u1, Equals, u) - - b2 := []byte("") - u2 := UUID{} - err = u2.UnmarshalText(b2) - c.Assert(err, NotNil) -} - -func (s *codecTestSuite) BenchmarkUnmarshalText(c *C) { - bytes := []byte("6ba7b810-9dad-11d1-80b4-00c04fd430c8") - u := UUID{} - for i := 0; i < c.N; i++ { - u.UnmarshalText(bytes) - } -} - -var sink string - -func (s *codecTestSuite) BenchmarkMarshalToString(c *C) { - u := NewV4() - for i := 0; i < c.N; i++ { - sink = u.String() - } -} diff --git a/vendor/github.com/satori/go.uuid/generator.go b/vendor/github.com/satori/go.uuid/generator.go deleted file mode 100644 index 3f2f1da..0000000 --- a/vendor/github.com/satori/go.uuid/generator.go +++ /dev/null @@ -1,239 +0,0 @@ -// Copyright (C) 2013-2018 by Maxim Bublis -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -package uuid - -import ( - "crypto/md5" - "crypto/rand" - "crypto/sha1" - "encoding/binary" - "hash" - "net" - "os" - "sync" - "time" -) - -// Difference in 100-nanosecond intervals between -// UUID epoch (October 15, 1582) and Unix epoch (January 1, 1970). -const epochStart = 122192928000000000 - -var ( - global = newDefaultGenerator() - - epochFunc = unixTimeFunc - posixUID = uint32(os.Getuid()) - posixGID = uint32(os.Getgid()) -) - -// NewV1 returns UUID based on current timestamp and MAC address. -func NewV1() UUID { - return global.NewV1() -} - -// NewV2 returns DCE Security UUID based on POSIX UID/GID. -func NewV2(domain byte) UUID { - return global.NewV2(domain) -} - -// NewV3 returns UUID based on MD5 hash of namespace UUID and name. -func NewV3(ns UUID, name string) UUID { - return global.NewV3(ns, name) -} - -// NewV4 returns random generated UUID. -func NewV4() UUID { - return global.NewV4() -} - -// NewV5 returns UUID based on SHA-1 hash of namespace UUID and name. -func NewV5(ns UUID, name string) UUID { - return global.NewV5(ns, name) -} - -// Generator provides interface for generating UUIDs. -type Generator interface { - NewV1() UUID - NewV2(domain byte) UUID - NewV3(ns UUID, name string) UUID - NewV4() UUID - NewV5(ns UUID, name string) UUID -} - -// Default generator implementation. -type generator struct { - storageOnce sync.Once - storageMutex sync.Mutex - - lastTime uint64 - clockSequence uint16 - hardwareAddr [6]byte -} - -func newDefaultGenerator() Generator { - return &generator{} -} - -// NewV1 returns UUID based on current timestamp and MAC address. -func (g *generator) NewV1() UUID { - u := UUID{} - - timeNow, clockSeq, hardwareAddr := g.getStorage() - - binary.BigEndian.PutUint32(u[0:], uint32(timeNow)) - binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>32)) - binary.BigEndian.PutUint16(u[6:], uint16(timeNow>>48)) - binary.BigEndian.PutUint16(u[8:], clockSeq) - - copy(u[10:], hardwareAddr) - - u.SetVersion(V1) - u.SetVariant(VariantRFC4122) - - return u -} - -// NewV2 returns DCE Security UUID based on POSIX UID/GID. -func (g *generator) NewV2(domain byte) UUID { - u := UUID{} - - timeNow, clockSeq, hardwareAddr := g.getStorage() - - switch domain { - case DomainPerson: - binary.BigEndian.PutUint32(u[0:], posixUID) - case DomainGroup: - binary.BigEndian.PutUint32(u[0:], posixGID) - } - - binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>32)) - binary.BigEndian.PutUint16(u[6:], uint16(timeNow>>48)) - binary.BigEndian.PutUint16(u[8:], clockSeq) - u[9] = domain - - copy(u[10:], hardwareAddr) - - u.SetVersion(V2) - u.SetVariant(VariantRFC4122) - - return u -} - -// NewV3 returns UUID based on MD5 hash of namespace UUID and name. -func (g *generator) NewV3(ns UUID, name string) UUID { - u := newFromHash(md5.New(), ns, name) - u.SetVersion(V3) - u.SetVariant(VariantRFC4122) - - return u -} - -// NewV4 returns random generated UUID. -func (g *generator) NewV4() UUID { - u := UUID{} - g.safeRandom(u[:]) - u.SetVersion(V4) - u.SetVariant(VariantRFC4122) - - return u -} - -// NewV5 returns UUID based on SHA-1 hash of namespace UUID and name. -func (g *generator) NewV5(ns UUID, name string) UUID { - u := newFromHash(sha1.New(), ns, name) - u.SetVersion(V5) - u.SetVariant(VariantRFC4122) - - return u -} - -func (g *generator) initStorage() { - g.initClockSequence() - g.initHardwareAddr() -} - -func (g *generator) initClockSequence() { - buf := make([]byte, 2) - g.safeRandom(buf) - g.clockSequence = binary.BigEndian.Uint16(buf) -} - -func (g *generator) initHardwareAddr() { - interfaces, err := net.Interfaces() - if err == nil { - for _, iface := range interfaces { - if len(iface.HardwareAddr) >= 6 { - copy(g.hardwareAddr[:], iface.HardwareAddr) - return - } - } - } - - // Initialize hardwareAddr randomly in case - // of real network interfaces absence - g.safeRandom(g.hardwareAddr[:]) - - // Set multicast bit as recommended in RFC 4122 - g.hardwareAddr[0] |= 0x01 -} - -func (g *generator) safeRandom(dest []byte) { - if _, err := rand.Read(dest); err != nil { - panic(err) - } -} - -// Returns UUID v1/v2 storage state. -// Returns epoch timestamp, clock sequence, and hardware address. -func (g *generator) getStorage() (uint64, uint16, []byte) { - g.storageOnce.Do(g.initStorage) - - g.storageMutex.Lock() - defer g.storageMutex.Unlock() - - timeNow := epochFunc() - // Clock changed backwards since last UUID generation. - // Should increase clock sequence. - if timeNow <= g.lastTime { - g.clockSequence++ - } - g.lastTime = timeNow - - return timeNow, g.clockSequence, g.hardwareAddr[:] -} - -// Returns difference in 100-nanosecond intervals between -// UUID epoch (October 15, 1582) and current time. -// This is default epoch calculation function. -func unixTimeFunc() uint64 { - return epochStart + uint64(time.Now().UnixNano()/100) -} - -// Returns UUID based on hashing of namespace UUID and name. -func newFromHash(h hash.Hash, ns UUID, name string) UUID { - u := UUID{} - h.Write(ns[:]) - h.Write([]byte(name)) - copy(u[:], h.Sum(nil)) - - return u -} diff --git a/vendor/github.com/satori/go.uuid/generator_test.go b/vendor/github.com/satori/go.uuid/generator_test.go deleted file mode 100644 index cd69e2e..0000000 --- a/vendor/github.com/satori/go.uuid/generator_test.go +++ /dev/null @@ -1,134 +0,0 @@ -// Copyright (C) 2013-2018 by Maxim Bublis -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -package uuid - -import ( - . "gopkg.in/check.v1" -) - -type genTestSuite struct{} - -var _ = Suite(&genTestSuite{}) - -func (s *genTestSuite) TestNewV1(c *C) { - u := NewV1() - c.Assert(u.Version(), Equals, V1) - c.Assert(u.Variant(), Equals, VariantRFC4122) - - u1 := NewV1() - u2 := NewV1() - c.Assert(u1, Not(Equals), u2) - - oldFunc := epochFunc - epochFunc = func() uint64 { return 0 } - - u3 := NewV1() - u4 := NewV1() - c.Assert(u3, Not(Equals), u4) - - epochFunc = oldFunc -} - -func (s *genTestSuite) BenchmarkNewV1(c *C) { - for i := 0; i < c.N; i++ { - NewV1() - } -} - -func (s *genTestSuite) TestNewV2(c *C) { - u1 := NewV2(DomainPerson) - c.Assert(u1.Version(), Equals, V2) - c.Assert(u1.Variant(), Equals, VariantRFC4122) - - u2 := NewV2(DomainGroup) - c.Assert(u2.Version(), Equals, V2) - c.Assert(u2.Variant(), Equals, VariantRFC4122) -} - -func (s *genTestSuite) BenchmarkNewV2(c *C) { - for i := 0; i < c.N; i++ { - NewV2(DomainPerson) - } -} - -func (s *genTestSuite) TestNewV3(c *C) { - u := NewV3(NamespaceDNS, "www.example.com") - c.Assert(u.Version(), Equals, V3) - c.Assert(u.Variant(), Equals, VariantRFC4122) - c.Assert(u.String(), Equals, "5df41881-3aed-3515-88a7-2f4a814cf09e") - - u = NewV3(NamespaceDNS, "python.org") - c.Assert(u.String(), Equals, "6fa459ea-ee8a-3ca4-894e-db77e160355e") - - u1 := NewV3(NamespaceDNS, "golang.org") - u2 := NewV3(NamespaceDNS, "golang.org") - c.Assert(u1, Equals, u2) - - u3 := NewV3(NamespaceDNS, "example.com") - c.Assert(u1, Not(Equals), u3) - - u4 := NewV3(NamespaceURL, "golang.org") - c.Assert(u1, Not(Equals), u4) -} - -func (s *genTestSuite) BenchmarkNewV3(c *C) { - for i := 0; i < c.N; i++ { - NewV3(NamespaceDNS, "www.example.com") - } -} - -func (s *genTestSuite) TestNewV4(c *C) { - u := NewV4() - c.Assert(u.Version(), Equals, V4) - c.Assert(u.Variant(), Equals, VariantRFC4122) -} - -func (s *genTestSuite) BenchmarkNewV4(c *C) { - for i := 0; i < c.N; i++ { - NewV4() - } -} - -func (s *genTestSuite) TestNewV5(c *C) { - u := NewV5(NamespaceDNS, "www.example.com") - c.Assert(u.Version(), Equals, V5) - c.Assert(u.Variant(), Equals, VariantRFC4122) - - u = NewV5(NamespaceDNS, "python.org") - c.Assert(u.String(), Equals, "886313e1-3b8a-5372-9b90-0c9aee199e5d") - - u1 := NewV5(NamespaceDNS, "golang.org") - u2 := NewV5(NamespaceDNS, "golang.org") - c.Assert(u1, Equals, u2) - - u3 := NewV5(NamespaceDNS, "example.com") - c.Assert(u1, Not(Equals), u3) - - u4 := NewV5(NamespaceURL, "golang.org") - c.Assert(u1, Not(Equals), u4) -} - -func (s *genTestSuite) BenchmarkNewV5(c *C) { - for i := 0; i < c.N; i++ { - NewV5(NamespaceDNS, "www.example.com") - } -} diff --git a/vendor/github.com/satori/go.uuid/sql.go b/vendor/github.com/satori/go.uuid/sql.go deleted file mode 100644 index 56759d3..0000000 --- a/vendor/github.com/satori/go.uuid/sql.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright (C) 2013-2018 by Maxim Bublis -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -package uuid - -import ( - "database/sql/driver" - "fmt" -) - -// Value implements the driver.Valuer interface. -func (u UUID) Value() (driver.Value, error) { - return u.String(), nil -} - -// Scan implements the sql.Scanner interface. -// A 16-byte slice is handled by UnmarshalBinary, while -// a longer byte slice or a string is handled by UnmarshalText. -func (u *UUID) Scan(src interface{}) error { - switch src := src.(type) { - case []byte: - if len(src) == Size { - return u.UnmarshalBinary(src) - } - return u.UnmarshalText(src) - - case string: - return u.UnmarshalText([]byte(src)) - } - - return fmt.Errorf("uuid: cannot convert %T to UUID", src) -} - -// NullUUID can be used with the standard sql package to represent a -// UUID value that can be NULL in the database -type NullUUID struct { - UUID UUID - Valid bool -} - -// Value implements the driver.Valuer interface. -func (u NullUUID) Value() (driver.Value, error) { - if !u.Valid { - return nil, nil - } - // Delegate to UUID Value function - return u.UUID.Value() -} - -// Scan implements the sql.Scanner interface. -func (u *NullUUID) Scan(src interface{}) error { - if src == nil { - u.UUID, u.Valid = Nil, false - return nil - } - - // Delegate to UUID Scan function - u.Valid = true - return u.UUID.Scan(src) -} diff --git a/vendor/github.com/satori/go.uuid/sql_test.go b/vendor/github.com/satori/go.uuid/sql_test.go deleted file mode 100644 index 74255f5..0000000 --- a/vendor/github.com/satori/go.uuid/sql_test.go +++ /dev/null @@ -1,136 +0,0 @@ -// Copyright (C) 2013-2018 by Maxim Bublis -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -package uuid - -import ( - . "gopkg.in/check.v1" -) - -type sqlTestSuite struct{} - -var _ = Suite(&sqlTestSuite{}) - -func (s *sqlTestSuite) TestValue(c *C) { - u, err := FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8") - c.Assert(err, IsNil) - - val, err := u.Value() - c.Assert(err, IsNil) - c.Assert(val, Equals, u.String()) -} - -func (s *sqlTestSuite) TestValueNil(c *C) { - u := UUID{} - - val, err := u.Value() - c.Assert(err, IsNil) - c.Assert(val, Equals, Nil.String()) -} - -func (s *sqlTestSuite) TestNullUUIDValueNil(c *C) { - u := NullUUID{} - - val, err := u.Value() - c.Assert(err, IsNil) - c.Assert(val, IsNil) -} - -func (s *sqlTestSuite) TestScanBinary(c *C) { - u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} - b1 := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} - - u1 := UUID{} - err := u1.Scan(b1) - c.Assert(err, IsNil) - c.Assert(u, Equals, u1) - - b2 := []byte{} - u2 := UUID{} - - err = u2.Scan(b2) - c.Assert(err, NotNil) -} - -func (s *sqlTestSuite) TestScanString(c *C) { - u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} - s1 := "6ba7b810-9dad-11d1-80b4-00c04fd430c8" - - u1 := UUID{} - err := u1.Scan(s1) - c.Assert(err, IsNil) - c.Assert(u, Equals, u1) - - s2 := "" - u2 := UUID{} - - err = u2.Scan(s2) - c.Assert(err, NotNil) -} - -func (s *sqlTestSuite) TestScanText(c *C) { - u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} - b1 := []byte("6ba7b810-9dad-11d1-80b4-00c04fd430c8") - - u1 := UUID{} - err := u1.Scan(b1) - c.Assert(err, IsNil) - c.Assert(u, Equals, u1) - - b2 := []byte("") - u2 := UUID{} - err = u2.Scan(b2) - c.Assert(err, NotNil) -} - -func (s *sqlTestSuite) TestScanUnsupported(c *C) { - u := UUID{} - - err := u.Scan(true) - c.Assert(err, NotNil) -} - -func (s *sqlTestSuite) TestScanNil(c *C) { - u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} - - err := u.Scan(nil) - c.Assert(err, NotNil) -} - -func (s *sqlTestSuite) TestNullUUIDScanValid(c *C) { - u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} - s1 := "6ba7b810-9dad-11d1-80b4-00c04fd430c8" - - u1 := NullUUID{} - err := u1.Scan(s1) - c.Assert(err, IsNil) - c.Assert(u1.Valid, Equals, true) - c.Assert(u1.UUID, Equals, u) -} - -func (s *sqlTestSuite) TestNullUUIDScanNil(c *C) { - u := NullUUID{UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}, true} - - err := u.Scan(nil) - c.Assert(err, IsNil) - c.Assert(u.Valid, Equals, false) - c.Assert(u.UUID, Equals, Nil) -} diff --git a/vendor/github.com/satori/go.uuid/uuid.go b/vendor/github.com/satori/go.uuid/uuid.go deleted file mode 100644 index a2b8e2c..0000000 --- a/vendor/github.com/satori/go.uuid/uuid.go +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright (C) 2013-2018 by Maxim Bublis -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -// Package uuid provides implementation of Universally Unique Identifier (UUID). -// Supported versions are 1, 3, 4 and 5 (as specified in RFC 4122) and -// version 2 (as specified in DCE 1.1). -package uuid - -import ( - "bytes" - "encoding/hex" -) - -// Size of a UUID in bytes. -const Size = 16 - -// UUID representation compliant with specification -// described in RFC 4122. -type UUID [Size]byte - -// UUID versions -const ( - _ byte = iota - V1 - V2 - V3 - V4 - V5 -) - -// UUID layout variants. -const ( - VariantNCS byte = iota - VariantRFC4122 - VariantMicrosoft - VariantFuture -) - -// UUID DCE domains. -const ( - DomainPerson = iota - DomainGroup - DomainOrg -) - -// String parse helpers. -var ( - urnPrefix = []byte("urn:uuid:") - byteGroups = []int{8, 4, 4, 4, 12} -) - -// Nil is special form of UUID that is specified to have all -// 128 bits set to zero. -var Nil = UUID{} - -// Predefined namespace UUIDs. -var ( - NamespaceDNS = Must(FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8")) - NamespaceURL = Must(FromString("6ba7b811-9dad-11d1-80b4-00c04fd430c8")) - NamespaceOID = Must(FromString("6ba7b812-9dad-11d1-80b4-00c04fd430c8")) - NamespaceX500 = Must(FromString("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) -) - -// Equal returns true if u1 and u2 equals, otherwise returns false. -func Equal(u1 UUID, u2 UUID) bool { - return bytes.Equal(u1[:], u2[:]) -} - -// Version returns algorithm version used to generate UUID. -func (u UUID) Version() byte { - return u[6] >> 4 -} - -// Variant returns UUID layout variant. -func (u UUID) Variant() byte { - switch { - case (u[8] >> 7) == 0x00: - return VariantNCS - case (u[8] >> 6) == 0x02: - return VariantRFC4122 - case (u[8] >> 5) == 0x06: - return VariantMicrosoft - case (u[8] >> 5) == 0x07: - fallthrough - default: - return VariantFuture - } -} - -// Bytes returns bytes slice representation of UUID. -func (u UUID) Bytes() []byte { - return u[:] -} - -// Returns canonical string representation of UUID: -// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx. -func (u UUID) String() string { - buf := make([]byte, 36) - - hex.Encode(buf[0:8], u[0:4]) - buf[8] = '-' - hex.Encode(buf[9:13], u[4:6]) - buf[13] = '-' - hex.Encode(buf[14:18], u[6:8]) - buf[18] = '-' - hex.Encode(buf[19:23], u[8:10]) - buf[23] = '-' - hex.Encode(buf[24:], u[10:]) - - return string(buf) -} - -// SetVersion sets version bits. -func (u *UUID) SetVersion(v byte) { - u[6] = (u[6] & 0x0f) | (v << 4) -} - -// SetVariant sets variant bits. -func (u *UUID) SetVariant(v byte) { - switch v { - case VariantNCS: - u[8] = (u[8]&(0xff>>1) | (0x00 << 7)) - case VariantRFC4122: - u[8] = (u[8]&(0xff>>2) | (0x02 << 6)) - case VariantMicrosoft: - u[8] = (u[8]&(0xff>>3) | (0x06 << 5)) - case VariantFuture: - fallthrough - default: - u[8] = (u[8]&(0xff>>3) | (0x07 << 5)) - } -} - -// Must is a helper that wraps a call to a function returning (UUID, error) -// and panics if the error is non-nil. It is intended for use in variable -// initializations such as -// var packageUUID = uuid.Must(uuid.FromString("123e4567-e89b-12d3-a456-426655440000")); -func Must(u UUID, err error) UUID { - if err != nil { - panic(err) - } - return u -} diff --git a/vendor/github.com/satori/go.uuid/uuid_test.go b/vendor/github.com/satori/go.uuid/uuid_test.go deleted file mode 100644 index beb336d..0000000 --- a/vendor/github.com/satori/go.uuid/uuid_test.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright (C) 2013-2018 by Maxim Bublis -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -package uuid - -import ( - "bytes" - "testing" - - . "gopkg.in/check.v1" -) - -// Hook up gocheck into the "go test" runner. -func TestUUID(t *testing.T) { TestingT(t) } - -type testSuite struct{} - -var _ = Suite(&testSuite{}) - -func (s *testSuite) TestBytes(c *C) { - u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} - - bytes1 := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} - - c.Assert(bytes.Equal(u.Bytes(), bytes1), Equals, true) -} - -func (s *testSuite) TestString(c *C) { - c.Assert(NamespaceDNS.String(), Equals, "6ba7b810-9dad-11d1-80b4-00c04fd430c8") -} - -func (s *testSuite) TestEqual(c *C) { - c.Assert(Equal(NamespaceDNS, NamespaceDNS), Equals, true) - c.Assert(Equal(NamespaceDNS, NamespaceURL), Equals, false) -} - -func (s *testSuite) TestVersion(c *C) { - u := UUID{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} - c.Assert(u.Version(), Equals, V1) -} - -func (s *testSuite) TestSetVersion(c *C) { - u := UUID{} - u.SetVersion(4) - c.Assert(u.Version(), Equals, V4) -} - -func (s *testSuite) TestVariant(c *C) { - u1 := UUID{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} - c.Assert(u1.Variant(), Equals, VariantNCS) - - u2 := UUID{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} - c.Assert(u2.Variant(), Equals, VariantRFC4122) - - u3 := UUID{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} - c.Assert(u3.Variant(), Equals, VariantMicrosoft) - - u4 := UUID{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} - c.Assert(u4.Variant(), Equals, VariantFuture) -} - -func (s *testSuite) TestSetVariant(c *C) { - u := UUID{} - u.SetVariant(VariantNCS) - c.Assert(u.Variant(), Equals, VariantNCS) - u.SetVariant(VariantRFC4122) - c.Assert(u.Variant(), Equals, VariantRFC4122) - u.SetVariant(VariantMicrosoft) - c.Assert(u.Variant(), Equals, VariantMicrosoft) - u.SetVariant(VariantFuture) - c.Assert(u.Variant(), Equals, VariantFuture) -} diff --git a/vendor/github.com/streadway/amqp/.gitignore b/vendor/github.com/streadway/amqp/.gitignore new file mode 100644 index 0000000..667fb50 --- /dev/null +++ b/vendor/github.com/streadway/amqp/.gitignore @@ -0,0 +1,12 @@ +certs/* +spec/spec +examples/simple-consumer/simple-consumer +examples/simple-producer/simple-producer + +.idea/**/workspace.xml +.idea/**/tasks.xml +.idea/**/usage.statistics.xml +.idea/**/dictionaries +.idea/**/shelf + +.idea/**/contentModel.xml diff --git a/vendor/github.com/streadway/amqp/.travis.yml b/vendor/github.com/streadway/amqp/.travis.yml new file mode 100644 index 0000000..2d22a7a --- /dev/null +++ b/vendor/github.com/streadway/amqp/.travis.yml @@ -0,0 +1,19 @@ +language: go + +go: + - 1.10.x + - 1.11.x + - 1.12.x + +services: + - rabbitmq + +env: + - GO111MODULE=on AMQP_URL=amqp://guest:guest@127.0.0.1:5672/ + +before_install: + - go get -v golang.org/x/lint/golint + +script: + - ./pre-commit + - go test -cpu=1,2 -v -tags integration ./... diff --git a/vendor/github.com/streadway/amqp/CONTRIBUTING.md b/vendor/github.com/streadway/amqp/CONTRIBUTING.md new file mode 100644 index 0000000..c87f3d7 --- /dev/null +++ b/vendor/github.com/streadway/amqp/CONTRIBUTING.md @@ -0,0 +1,35 @@ +## Prequisites + +1. Go: [https://golang.org/dl/](https://golang.org/dl/) +1. Golint `go get -u -v github.com/golang/lint/golint` + +## Contributing + +The workflow is pretty standard: + +1. Fork github.com/streadway/amqp +1. Add the pre-commit hook: `ln -s ../../pre-commit .git/hooks/pre-commit` +1. Create your feature branch (`git checkout -b my-new-feature`) +1. Run integration tests (see below) +1. **Implement tests** +1. Implement fixs +1. Commit your changes (`git commit -am 'Add some feature'`) +1. Push to a branch (`git push -u origin my-new-feature`) +1. Submit a pull request + +## Running Tests + +The test suite assumes that: + + * A RabbitMQ node is running on localhost with all defaults: [https://www.rabbitmq.com/download.html](https://www.rabbitmq.com/download.html) + * `AMQP_URL` is exported to `amqp://guest:guest@127.0.0.1:5672/` + +### Integration Tests + +After starting a local RabbitMQ, run integration tests with the following: + + env AMQP_URL=amqp://guest:guest@127.0.0.1:5672/ go test -v -cpu 2 -tags integration -race + +All integration tests should use the `integrationConnection(...)` test +helpers defined in `integration_test.go` to setup the integration environment +and logging. diff --git a/vendor/github.com/streadway/amqp/LICENSE b/vendor/github.com/streadway/amqp/LICENSE new file mode 100644 index 0000000..07b8968 --- /dev/null +++ b/vendor/github.com/streadway/amqp/LICENSE @@ -0,0 +1,23 @@ +Copyright (c) 2012-2019, Sean Treadway, SoundCloud Ltd. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +Redistributions of source code must retain the above copyright notice, this +list of conditions and the following disclaimer. + +Redistributions in binary form must reproduce the above copyright notice, this +list of conditions and the following disclaimer in the documentation and/or +other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/streadway/amqp/README.md b/vendor/github.com/streadway/amqp/README.md new file mode 100644 index 0000000..287830b --- /dev/null +++ b/vendor/github.com/streadway/amqp/README.md @@ -0,0 +1,93 @@ +[![Build Status](https://api.travis-ci.org/streadway/amqp.svg)](http://travis-ci.org/streadway/amqp) [![GoDoc](https://godoc.org/github.com/streadway/amqp?status.svg)](http://godoc.org/github.com/streadway/amqp) + +# Go RabbitMQ Client Library + +This is an AMQP 0.9.1 client with RabbitMQ extensions in Go. + +## Project Maturity + +This project has been used in production systems for many years. It is reasonably mature +and feature complete, and as of November 2016 has [a team of maintainers](https://github.com/streadway/amqp/issues/215). + +Future API changes are unlikely but possible. They will be discussed on [Github +issues](https://github.com/streadway/amqp/issues) along with any bugs or +enhancements. + +## Supported Go Versions + +This library supports two most recent Go release series, currently 1.10 and 1.11. + + +## Supported RabbitMQ Versions + +This project supports RabbitMQ versions starting with `2.0` but primarily tested +against reasonably recent `3.x` releases. Some features and behaviours may be +server version-specific. + +## Goals + +Provide a functional interface that closely represents the AMQP 0.9.1 model +targeted to RabbitMQ as a server. This includes the minimum necessary to +interact the semantics of the protocol. + +## Non-goals + +Things not intended to be supported. + + * Auto reconnect and re-synchronization of client and server topologies. + * Reconnection would require understanding the error paths when the + topology cannot be declared on reconnect. This would require a new set + of types and code paths that are best suited at the call-site of this + package. AMQP has a dynamic topology that needs all peers to agree. If + this doesn't happen, the behavior is undefined. Instead of producing a + possible interface with undefined behavior, this package is designed to + be simple for the caller to implement the necessary connection-time + topology declaration so that reconnection is trivial and encapsulated in + the caller's application code. + * AMQP Protocol negotiation for forward or backward compatibility. + * 0.9.1 is stable and widely deployed. Versions 0.10 and 1.0 are divergent + specifications that change the semantics and wire format of the protocol. + We will accept patches for other protocol support but have no plans for + implementation ourselves. + * Anything other than PLAIN and EXTERNAL authentication mechanisms. + * Keeping the mechanisms interface modular makes it possible to extend + outside of this package. If other mechanisms prove to be popular, then + we would accept patches to include them in this package. + +## Usage + +See the 'examples' subdirectory for simple producers and consumers executables. +If you have a use-case in mind which isn't well-represented by the examples, +please file an issue. + +## Documentation + +Use [Godoc documentation](http://godoc.org/github.com/streadway/amqp) for +reference and usage. + +[RabbitMQ tutorials in +Go](https://github.com/rabbitmq/rabbitmq-tutorials/tree/master/go) are also +available. + +## Contributing + +Pull requests are very much welcomed. Create your pull request on a non-master +branch, make sure a test or example is included that covers your change and +your commits represent coherent changes that include a reason for the change. + +To run the integration tests, make sure you have RabbitMQ running on any host, +export the environment variable `AMQP_URL=amqp://host/` and run `go test -tags +integration`. TravisCI will also run the integration tests. + +Thanks to the [community of contributors](https://github.com/streadway/amqp/graphs/contributors). + +## External packages + + * [Google App Engine Dialer support](https://github.com/soundtrackyourbrand/gaeamqp) + * [RabbitMQ examples in Go](https://github.com/rabbitmq/rabbitmq-tutorials/tree/master/go) + +## License + +BSD 2 clause - see LICENSE for more details. + + diff --git a/vendor/github.com/streadway/amqp/_examples/pubsub/pubsub.go b/vendor/github.com/streadway/amqp/_examples/pubsub/pubsub.go new file mode 100644 index 0000000..edeb470 --- /dev/null +++ b/vendor/github.com/streadway/amqp/_examples/pubsub/pubsub.go @@ -0,0 +1,234 @@ +// Command pubsub is an example of a fanout exchange with dynamic reliable +// membership, reading from stdin, writing to stdout. +// +// This example shows how to implement reconnect logic independent from a +// publish/subscribe loop with bridges to application types. + +package main + +import ( + "bufio" + "crypto/sha1" + "flag" + "fmt" + "io" + "log" + "os" + + "github.com/streadway/amqp" + "golang.org/x/net/context" +) + +var url = flag.String("url", "amqp:///", "AMQP url for both the publisher and subscriber") + +// exchange binds the publishers to the subscribers +const exchange = "pubsub" + +// message is the application type for a message. This can contain identity, +// or a reference to the recevier chan for further demuxing. +type message []byte + +// session composes an amqp.Connection with an amqp.Channel +type session struct { + *amqp.Connection + *amqp.Channel +} + +// Close tears the connection down, taking the channel with it. +func (s session) Close() error { + if s.Connection == nil { + return nil + } + return s.Connection.Close() +} + +// redial continually connects to the URL, exiting the program when no longer possible +func redial(ctx context.Context, url string) chan chan session { + sessions := make(chan chan session) + + go func() { + sess := make(chan session) + defer close(sessions) + + for { + select { + case sessions <- sess: + case <-ctx.Done(): + log.Println("shutting down session factory") + return + } + + conn, err := amqp.Dial(url) + if err != nil { + log.Fatalf("cannot (re)dial: %v: %q", err, url) + } + + ch, err := conn.Channel() + if err != nil { + log.Fatalf("cannot create channel: %v", err) + } + + if err := ch.ExchangeDeclare(exchange, "fanout", false, true, false, false, nil); err != nil { + log.Fatalf("cannot declare fanout exchange: %v", err) + } + + select { + case sess <- session{conn, ch}: + case <-ctx.Done(): + log.Println("shutting down new session") + return + } + } + }() + + return sessions +} + +// publish publishes messages to a reconnecting session to a fanout exchange. +// It receives from the application specific source of messages. +func publish(sessions chan chan session, messages <-chan message) { + for session := range sessions { + var ( + running bool + reading = messages + pending = make(chan message, 1) + confirm = make(chan amqp.Confirmation, 1) + ) + + pub := <-session + + // publisher confirms for this channel/connection + if err := pub.Confirm(false); err != nil { + log.Printf("publisher confirms not supported") + close(confirm) // confirms not supported, simulate by always nacking + } else { + pub.NotifyPublish(confirm) + } + + log.Printf("publishing...") + + Publish: + for { + var body message + select { + case confirmed, ok := <-confirm: + if !ok { + break Publish + } + if !confirmed.Ack { + log.Printf("nack message %d, body: %q", confirmed.DeliveryTag, string(body)) + } + reading = messages + + case body = <-pending: + routingKey := "ignored for fanout exchanges, application dependent for other exchanges" + err := pub.Publish(exchange, routingKey, false, false, amqp.Publishing{ + Body: body, + }) + // Retry failed delivery on the next session + if err != nil { + pending <- body + pub.Close() + break Publish + } + + case body, running = <-reading: + // all messages consumed + if !running { + return + } + // work on pending delivery until ack'd + pending <- body + reading = nil + } + } + } +} + +// identity returns the same host/process unique string for the lifetime of +// this process so that subscriber reconnections reuse the same queue name. +func identity() string { + hostname, err := os.Hostname() + h := sha1.New() + fmt.Fprint(h, hostname) + fmt.Fprint(h, err) + fmt.Fprint(h, os.Getpid()) + return fmt.Sprintf("%x", h.Sum(nil)) +} + +// subscribe consumes deliveries from an exclusive queue from a fanout exchange and sends to the application specific messages chan. +func subscribe(sessions chan chan session, messages chan<- message) { + queue := identity() + + for session := range sessions { + sub := <-session + + if _, err := sub.QueueDeclare(queue, false, true, true, false, nil); err != nil { + log.Printf("cannot consume from exclusive queue: %q, %v", queue, err) + return + } + + routingKey := "application specific routing key for fancy toplogies" + if err := sub.QueueBind(queue, routingKey, exchange, false, nil); err != nil { + log.Printf("cannot consume without a binding to exchange: %q, %v", exchange, err) + return + } + + deliveries, err := sub.Consume(queue, "", false, true, false, false, nil) + if err != nil { + log.Printf("cannot consume from: %q, %v", queue, err) + return + } + + log.Printf("subscribed...") + + for msg := range deliveries { + messages <- message(msg.Body) + sub.Ack(msg.DeliveryTag, false) + } + } +} + +// read is this application's translation to the message format, scanning from +// stdin. +func read(r io.Reader) <-chan message { + lines := make(chan message) + go func() { + defer close(lines) + scan := bufio.NewScanner(r) + for scan.Scan() { + lines <- message(scan.Bytes()) + } + }() + return lines +} + +// write is this application's subscriber of application messages, printing to +// stdout. +func write(w io.Writer) chan<- message { + lines := make(chan message) + go func() { + for line := range lines { + fmt.Fprintln(w, string(line)) + } + }() + return lines +} + +func main() { + flag.Parse() + + ctx, done := context.WithCancel(context.Background()) + + go func() { + publish(redial(ctx, *url), read(os.Stdin)) + done() + }() + + go func() { + subscribe(redial(ctx, *url), write(os.Stdout)) + done() + }() + + <-ctx.Done() +} diff --git a/vendor/github.com/streadway/amqp/_examples/simple-consumer/consumer.go b/vendor/github.com/streadway/amqp/_examples/simple-consumer/consumer.go new file mode 100644 index 0000000..03d30de --- /dev/null +++ b/vendor/github.com/streadway/amqp/_examples/simple-consumer/consumer.go @@ -0,0 +1,169 @@ +// This example declares a durable Exchange, an ephemeral (auto-delete) Queue, +// binds the Queue to the Exchange with a binding key, and consumes every +// message published to that Exchange with that routing key. +// +package main + +import ( + "flag" + "fmt" + "github.com/streadway/amqp" + "log" + "time" +) + +var ( + uri = flag.String("uri", "amqp://guest:guest@localhost:5672/", "AMQP URI") + exchange = flag.String("exchange", "test-exchange", "Durable, non-auto-deleted AMQP exchange name") + exchangeType = flag.String("exchange-type", "direct", "Exchange type - direct|fanout|topic|x-custom") + queue = flag.String("queue", "test-queue", "Ephemeral AMQP queue name") + bindingKey = flag.String("key", "test-key", "AMQP binding key") + consumerTag = flag.String("consumer-tag", "simple-consumer", "AMQP consumer tag (should not be blank)") + lifetime = flag.Duration("lifetime", 5*time.Second, "lifetime of process before shutdown (0s=infinite)") +) + +func init() { + flag.Parse() +} + +func main() { + c, err := NewConsumer(*uri, *exchange, *exchangeType, *queue, *bindingKey, *consumerTag) + if err != nil { + log.Fatalf("%s", err) + } + + if *lifetime > 0 { + log.Printf("running for %s", *lifetime) + time.Sleep(*lifetime) + } else { + log.Printf("running forever") + select {} + } + + log.Printf("shutting down") + + if err := c.Shutdown(); err != nil { + log.Fatalf("error during shutdown: %s", err) + } +} + +type Consumer struct { + conn *amqp.Connection + channel *amqp.Channel + tag string + done chan error +} + +func NewConsumer(amqpURI, exchange, exchangeType, queueName, key, ctag string) (*Consumer, error) { + c := &Consumer{ + conn: nil, + channel: nil, + tag: ctag, + done: make(chan error), + } + + var err error + + log.Printf("dialing %q", amqpURI) + c.conn, err = amqp.Dial(amqpURI) + if err != nil { + return nil, fmt.Errorf("Dial: %s", err) + } + + go func() { + fmt.Printf("closing: %s", <-c.conn.NotifyClose(make(chan *amqp.Error))) + }() + + log.Printf("got Connection, getting Channel") + c.channel, err = c.conn.Channel() + if err != nil { + return nil, fmt.Errorf("Channel: %s", err) + } + + log.Printf("got Channel, declaring Exchange (%q)", exchange) + if err = c.channel.ExchangeDeclare( + exchange, // name of the exchange + exchangeType, // type + true, // durable + false, // delete when complete + false, // internal + false, // noWait + nil, // arguments + ); err != nil { + return nil, fmt.Errorf("Exchange Declare: %s", err) + } + + log.Printf("declared Exchange, declaring Queue %q", queueName) + queue, err := c.channel.QueueDeclare( + queueName, // name of the queue + true, // durable + false, // delete when unused + false, // exclusive + false, // noWait + nil, // arguments + ) + if err != nil { + return nil, fmt.Errorf("Queue Declare: %s", err) + } + + log.Printf("declared Queue (%q %d messages, %d consumers), binding to Exchange (key %q)", + queue.Name, queue.Messages, queue.Consumers, key) + + if err = c.channel.QueueBind( + queue.Name, // name of the queue + key, // bindingKey + exchange, // sourceExchange + false, // noWait + nil, // arguments + ); err != nil { + return nil, fmt.Errorf("Queue Bind: %s", err) + } + + log.Printf("Queue bound to Exchange, starting Consume (consumer tag %q)", c.tag) + deliveries, err := c.channel.Consume( + queue.Name, // name + c.tag, // consumerTag, + false, // noAck + false, // exclusive + false, // noLocal + false, // noWait + nil, // arguments + ) + if err != nil { + return nil, fmt.Errorf("Queue Consume: %s", err) + } + + go handle(deliveries, c.done) + + return c, nil +} + +func (c *Consumer) Shutdown() error { + // will close() the deliveries channel + if err := c.channel.Cancel(c.tag, true); err != nil { + return fmt.Errorf("Consumer cancel failed: %s", err) + } + + if err := c.conn.Close(); err != nil { + return fmt.Errorf("AMQP connection close error: %s", err) + } + + defer log.Printf("AMQP shutdown OK") + + // wait for handle() to exit + return <-c.done +} + +func handle(deliveries <-chan amqp.Delivery, done chan error) { + for d := range deliveries { + log.Printf( + "got %dB delivery: [%v] %q", + len(d.Body), + d.DeliveryTag, + d.Body, + ) + d.Ack(false) + } + log.Printf("handle: deliveries channel closed") + done <- nil +} diff --git a/vendor/github.com/streadway/amqp/_examples/simple-producer/producer.go b/vendor/github.com/streadway/amqp/_examples/simple-producer/producer.go new file mode 100644 index 0000000..1998683 --- /dev/null +++ b/vendor/github.com/streadway/amqp/_examples/simple-producer/producer.go @@ -0,0 +1,112 @@ +// This example declares a durable Exchange, and publishes a single message to +// that Exchange with a given routing key. +// +package main + +import ( + "flag" + "fmt" + "log" + + "github.com/streadway/amqp" +) + +var ( + uri = flag.String("uri", "amqp://guest:guest@localhost:5672/", "AMQP URI") + exchangeName = flag.String("exchange", "test-exchange", "Durable AMQP exchange name") + exchangeType = flag.String("exchange-type", "direct", "Exchange type - direct|fanout|topic|x-custom") + routingKey = flag.String("key", "test-key", "AMQP routing key") + body = flag.String("body", "foobar", "Body of message") + reliable = flag.Bool("reliable", true, "Wait for the publisher confirmation before exiting") +) + +func init() { + flag.Parse() +} + +func main() { + if err := publish(*uri, *exchangeName, *exchangeType, *routingKey, *body, *reliable); err != nil { + log.Fatalf("%s", err) + } + log.Printf("published %dB OK", len(*body)) +} + +func publish(amqpURI, exchange, exchangeType, routingKey, body string, reliable bool) error { + + // This function dials, connects, declares, publishes, and tears down, + // all in one go. In a real service, you probably want to maintain a + // long-lived connection as state, and publish against that. + + log.Printf("dialing %q", amqpURI) + connection, err := amqp.Dial(amqpURI) + if err != nil { + return fmt.Errorf("Dial: %s", err) + } + defer connection.Close() + + log.Printf("got Connection, getting Channel") + channel, err := connection.Channel() + if err != nil { + return fmt.Errorf("Channel: %s", err) + } + + log.Printf("got Channel, declaring %q Exchange (%q)", exchangeType, exchange) + if err := channel.ExchangeDeclare( + exchange, // name + exchangeType, // type + true, // durable + false, // auto-deleted + false, // internal + false, // noWait + nil, // arguments + ); err != nil { + return fmt.Errorf("Exchange Declare: %s", err) + } + + // Reliable publisher confirms require confirm.select support from the + // connection. + if reliable { + log.Printf("enabling publishing confirms.") + if err := channel.Confirm(false); err != nil { + return fmt.Errorf("Channel could not be put into confirm mode: %s", err) + } + + confirms := channel.NotifyPublish(make(chan amqp.Confirmation, 1)) + + defer confirmOne(confirms) + } + + log.Printf("declared Exchange, publishing %dB body (%q)", len(body), body) + if err = channel.Publish( + exchange, // publish to an exchange + routingKey, // routing to 0 or more queues + false, // mandatory + false, // immediate + amqp.Publishing{ + Headers: amqp.Table{}, + ContentType: "text/plain", + ContentEncoding: "", + Body: []byte(body), + DeliveryMode: amqp.Transient, // 1=non-persistent, 2=persistent + Priority: 0, // 0-9 + // a bunch of application/implementation-specific fields + }, + ); err != nil { + return fmt.Errorf("Exchange Publish: %s", err) + } + + return nil +} + +// One would typically keep a channel of publishings, a sequence number, and a +// set of unacknowledged sequence numbers and loop until the publishing channel +// is closed. +func confirmOne(confirms <-chan amqp.Confirmation) { + log.Printf("waiting for confirmation of one publishing") + + if confirmed := <-confirms; confirmed.Ack { + log.Printf("confirmed delivery with delivery tag: %d", confirmed.DeliveryTag) + } else { + log.Printf("failed delivery of delivery tag: %d", confirmed.DeliveryTag) + } +} diff --git a/vendor/github.com/streadway/amqp/allocator.go b/vendor/github.com/streadway/amqp/allocator.go new file mode 100644 index 0000000..53620e7 --- /dev/null +++ b/vendor/github.com/streadway/amqp/allocator.go @@ -0,0 +1,106 @@ +package amqp + +import ( + "bytes" + "fmt" + "math/big" +) + +const ( + free = 0 + allocated = 1 +) + +// allocator maintains a bitset of allocated numbers. +type allocator struct { + pool *big.Int + last int + low int + high int +} + +// NewAllocator reserves and frees integers out of a range between low and +// high. +// +// O(N) worst case space used, where N is maximum allocated, divided by +// sizeof(big.Word) +func newAllocator(low, high int) *allocator { + return &allocator{ + pool: big.NewInt(0), + last: low, + low: low, + high: high, + } +} + +// String returns a string describing the contents of the allocator like +// "allocator[low..high] reserved..until" +// +// O(N) where N is high-low +func (a allocator) String() string { + b := &bytes.Buffer{} + fmt.Fprintf(b, "allocator[%d..%d]", a.low, a.high) + + for low := a.low; low <= a.high; low++ { + high := low + for a.reserved(high) && high <= a.high { + high++ + } + + if high > low+1 { + fmt.Fprintf(b, " %d..%d", low, high-1) + } else if high > low { + fmt.Fprintf(b, " %d", high-1) + } + + low = high + } + return b.String() +} + +// Next reserves and returns the next available number out of the range between +// low and high. If no number is available, false is returned. +// +// O(N) worst case runtime where N is allocated, but usually O(1) due to a +// rolling index into the oldest allocation. +func (a *allocator) next() (int, bool) { + wrapped := a.last + + // Find trailing bit + for ; a.last <= a.high; a.last++ { + if a.reserve(a.last) { + return a.last, true + } + } + + // Find preceding free'd pool + a.last = a.low + + for ; a.last < wrapped; a.last++ { + if a.reserve(a.last) { + return a.last, true + } + } + + return 0, false +} + +// reserve claims the bit if it is not already claimed, returning true if +// successfully claimed. +func (a *allocator) reserve(n int) bool { + if a.reserved(n) { + return false + } + a.pool.SetBit(a.pool, n-a.low, allocated) + return true +} + +// reserved returns true if the integer has been allocated +func (a *allocator) reserved(n int) bool { + return a.pool.Bit(n-a.low) == allocated +} + +// release frees the use of the number for another allocation +func (a *allocator) release(n int) { + a.pool.SetBit(a.pool, n-a.low, free) +} diff --git a/vendor/github.com/streadway/amqp/allocator_test.go b/vendor/github.com/streadway/amqp/allocator_test.go new file mode 100644 index 0000000..1a4e7de --- /dev/null +++ b/vendor/github.com/streadway/amqp/allocator_test.go @@ -0,0 +1,94 @@ +package amqp + +import ( + "math/rand" + "testing" +) + +func TestAllocatorFirstShouldBeTheLow(t *testing.T) { + n, ok := newAllocator(1, 2).next() + if !ok { + t.Fatalf("expected to allocate between 1 and 2") + } + + if want, got := 1, n; want != got { + t.Fatalf("expected to first allocation to be 1") + } +} + +func TestAllocatorShouldBeBoundByHigh(t *testing.T) { + a := newAllocator(1, 2) + + if n, ok := a.next(); n != 1 || !ok { + t.Fatalf("expected to allocate between 1 and 2, got %d, %v", n, ok) + } + if n, ok := a.next(); n != 2 || !ok { + t.Fatalf("expected to allocate between 1 and 2, got %d, %v", n, ok) + } + if _, ok := a.next(); ok { + t.Fatalf("expected not to allocate outside of 1 and 2") + } +} + +func TestAllocatorStringShouldIncludeAllocatedRanges(t *testing.T) { + a := newAllocator(1, 10) + a.reserve(1) + a.reserve(2) + a.reserve(3) + a.reserve(5) + a.reserve(6) + a.reserve(8) + a.reserve(10) + + if want, got := "allocator[1..10] 1..3 5..6 8 10", a.String(); want != got { + t.Fatalf("expected String of %q, got %q", want, got) + } +} + +func TestAllocatorShouldReuseReleased(t *testing.T) { + a := newAllocator(1, 2) + + first, _ := a.next() + if want, got := 1, first; want != got { + t.Fatalf("expected allocation to be %d, got: %d", want, got) + } + + second, _ := a.next() + if want, got := 2, second; want != got { + t.Fatalf("expected allocation to be %d, got: %d", want, got) + } + + a.release(first) + + third, _ := a.next() + if want, got := first, third; want != got { + t.Fatalf("expected third allocation to be %d, got: %d", want, got) + } + + _, ok := a.next() + if want, got := false, ok; want != got { + t.Fatalf("expected fourth allocation to saturate the pool") + } +} + +func TestAllocatorReleasesKeepUpWithAllocationsForAllSizes(t *testing.T) { + if testing.Short() { + t.Skip() + } + + const runs = 5 + const max = 13 + + for lim := 1; lim < 2<= lim { // fills the allocator + a.release(int(rand.Int63n(int64(lim)))) + } + if _, ok := a.next(); !ok { + t.Fatalf("expected %d runs of random release of size %d not to fail on allocation %d", runs, lim, i) + } + } + } +} diff --git a/vendor/github.com/streadway/amqp/auth.go b/vendor/github.com/streadway/amqp/auth.go new file mode 100644 index 0000000..435c94b --- /dev/null +++ b/vendor/github.com/streadway/amqp/auth.go @@ -0,0 +1,62 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +package amqp + +import ( + "fmt" +) + +// Authentication interface provides a means for different SASL authentication +// mechanisms to be used during connection tuning. +type Authentication interface { + Mechanism() string + Response() string +} + +// PlainAuth is a similar to Basic Auth in HTTP. +type PlainAuth struct { + Username string + Password string +} + +// Mechanism returns "PLAIN" +func (auth *PlainAuth) Mechanism() string { + return "PLAIN" +} + +// Response returns the null character delimited encoding for the SASL PLAIN Mechanism. +func (auth *PlainAuth) Response() string { + return fmt.Sprintf("\000%s\000%s", auth.Username, auth.Password) +} + +// AMQPlainAuth is similar to PlainAuth +type AMQPlainAuth struct { + Username string + Password string +} + +// Mechanism returns "AMQPLAIN" +func (auth *AMQPlainAuth) Mechanism() string { + return "AMQPLAIN" +} + +// Response returns the null character delimited encoding for the SASL PLAIN Mechanism. +func (auth *AMQPlainAuth) Response() string { + return fmt.Sprintf("LOGIN:%sPASSWORD:%s", auth.Username, auth.Password) +} + +// Finds the first mechanism preferred by the client that the server supports. +func pickSASLMechanism(client []Authentication, serverMechanisms []string) (auth Authentication, ok bool) { + for _, auth = range client { + for _, mech := range serverMechanisms { + if auth.Mechanism() == mech { + return auth, true + } + } + } + + return +} diff --git a/vendor/github.com/streadway/amqp/certs.sh b/vendor/github.com/streadway/amqp/certs.sh new file mode 100755 index 0000000..834f422 --- /dev/null +++ b/vendor/github.com/streadway/amqp/certs.sh @@ -0,0 +1,159 @@ +#!/bin/sh +# +# Creates the CA, server and client certs to be used by tls_test.go +# http://www.rabbitmq.com/ssl.html +# +# Copy stdout into the const section of tls_test.go or use for RabbitMQ +# +root=$PWD/certs + +if [ -f $root/ca/serial ]; then + echo >&2 "Previous installation found" + echo >&2 "Remove $root/ca and rerun to overwrite" + exit 1 +fi + +mkdir -p $root/ca/private +mkdir -p $root/ca/certs +mkdir -p $root/server +mkdir -p $root/client + +cd $root/ca + +chmod 700 private +touch index.txt +echo 'unique_subject = no' > index.txt.attr +echo '01' > serial +echo >openssl.cnf ' +[ ca ] +default_ca = testca + +[ testca ] +dir = . +certificate = $dir/cacert.pem +database = $dir/index.txt +new_certs_dir = $dir/certs +private_key = $dir/private/cakey.pem +serial = $dir/serial + +default_crl_days = 7 +default_days = 3650 +default_md = sha1 + +policy = testca_policy +x509_extensions = certificate_extensions + +[ testca_policy ] +commonName = supplied +stateOrProvinceName = optional +countryName = optional +emailAddress = optional +organizationName = optional +organizationalUnitName = optional + +[ certificate_extensions ] +basicConstraints = CA:false + +[ req ] +default_bits = 2048 +default_keyfile = ./private/cakey.pem +default_md = sha1 +prompt = yes +distinguished_name = root_ca_distinguished_name +x509_extensions = root_ca_extensions + +[ root_ca_distinguished_name ] +commonName = hostname + +[ root_ca_extensions ] +basicConstraints = CA:true +keyUsage = keyCertSign, cRLSign + +[ client_ca_extensions ] +basicConstraints = CA:false +keyUsage = digitalSignature +extendedKeyUsage = 1.3.6.1.5.5.7.3.2 + +[ server_ca_extensions ] +basicConstraints = CA:false +keyUsage = keyEncipherment +extendedKeyUsage = 1.3.6.1.5.5.7.3.1 +subjectAltName = @alt_names + +[ alt_names ] +IP.1 = 127.0.0.1 +' + +openssl req \ + -x509 \ + -nodes \ + -config openssl.cnf \ + -newkey rsa:2048 \ + -days 3650 \ + -subj "/CN=MyTestCA/" \ + -out cacert.pem \ + -outform PEM + +openssl x509 \ + -in cacert.pem \ + -out cacert.cer \ + -outform DER + +openssl genrsa -out $root/server/key.pem 2048 +openssl genrsa -out $root/client/key.pem 2048 + +openssl req \ + -new \ + -nodes \ + -config openssl.cnf \ + -subj "/CN=127.0.0.1/O=server/" \ + -key $root/server/key.pem \ + -out $root/server/req.pem \ + -outform PEM + +openssl req \ + -new \ + -nodes \ + -config openssl.cnf \ + -subj "/CN=127.0.0.1/O=client/" \ + -key $root/client/key.pem \ + -out $root/client/req.pem \ + -outform PEM + +openssl ca \ + -config openssl.cnf \ + -in $root/server/req.pem \ + -out $root/server/cert.pem \ + -notext \ + -batch \ + -extensions server_ca_extensions + +openssl ca \ + -config openssl.cnf \ + -in $root/client/req.pem \ + -out $root/client/cert.pem \ + -notext \ + -batch \ + -extensions client_ca_extensions + +cat <<-END +const caCert = \` +`cat $root/ca/cacert.pem` +\` + +const serverCert = \` +`cat $root/server/cert.pem` +\` + +const serverKey = \` +`cat $root/server/key.pem` +\` + +const clientCert = \` +`cat $root/client/cert.pem` +\` + +const clientKey = \` +`cat $root/client/key.pem` +\` +END diff --git a/vendor/github.com/streadway/amqp/channel.go b/vendor/github.com/streadway/amqp/channel.go new file mode 100644 index 0000000..3898ed7 --- /dev/null +++ b/vendor/github.com/streadway/amqp/channel.go @@ -0,0 +1,1590 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +package amqp + +import ( + "reflect" + "sync" + "sync/atomic" +) + +// 0 1 3 7 size+7 size+8 +// +------+---------+-------------+ +------------+ +-----------+ +// | type | channel | size | | payload | | frame-end | +// +------+---------+-------------+ +------------+ +-----------+ +// octet short long size octets octet +const frameHeaderSize = 1 + 2 + 4 + 1 + +/* +Channel represents an AMQP channel. Used as a context for valid message +exchange. Errors on methods with this Channel as a receiver means this channel +should be discarded and a new channel established. + +*/ +type Channel struct { + destructor sync.Once + m sync.Mutex // struct field mutex + confirmM sync.Mutex // publisher confirms state mutex + notifyM sync.RWMutex + + connection *Connection + + rpc chan message + consumers *consumers + + id uint16 + + // closed is set to 1 when the channel has been closed - see Channel.send() + closed int32 + + // true when we will never notify again + noNotify bool + + // Channel and Connection exceptions will be broadcast on these listeners. + closes []chan *Error + + // Listeners for active=true flow control. When true is sent to a listener, + // publishing should pause until false is sent to listeners. + flows []chan bool + + // Listeners for returned publishings for unroutable messages on mandatory + // publishings or undeliverable messages on immediate publishings. + returns []chan Return + + // Listeners for when the server notifies the client that + // a consumer has been cancelled. + cancels []chan string + + // Allocated when in confirm mode in order to track publish counter and order confirms + confirms *confirms + confirming bool + + // Selects on any errors from shutdown during RPC + errors chan *Error + + // State machine that manages frame order, must only be mutated by the connection + recv func(*Channel, frame) error + + // Current state for frame re-assembly, only mutated from recv + message messageWithContent + header *headerFrame + body []byte +} + +// Constructs a new channel with the given framing rules +func newChannel(c *Connection, id uint16) *Channel { + return &Channel{ + connection: c, + id: id, + rpc: make(chan message), + consumers: makeConsumers(), + confirms: newConfirms(), + recv: (*Channel).recvMethod, + errors: make(chan *Error, 1), + } +} + +// shutdown is called by Connection after the channel has been removed from the +// connection registry. +func (ch *Channel) shutdown(e *Error) { + ch.destructor.Do(func() { + ch.m.Lock() + defer ch.m.Unlock() + + // Grab an exclusive lock for the notify channels + ch.notifyM.Lock() + defer ch.notifyM.Unlock() + + // Broadcast abnormal shutdown + if e != nil { + for _, c := range ch.closes { + c <- e + } + } + + // Signal that from now on, Channel.send() should call + // Channel.sendClosed() + atomic.StoreInt32(&ch.closed, 1) + + // Notify RPC if we're selecting + if e != nil { + ch.errors <- e + } + + ch.consumers.close() + + for _, c := range ch.closes { + close(c) + } + + for _, c := range ch.flows { + close(c) + } + + for _, c := range ch.returns { + close(c) + } + + for _, c := range ch.cancels { + close(c) + } + + // Set the slices to nil to prevent the dispatch() range from sending on + // the now closed channels after we release the notifyM mutex + ch.flows = nil + ch.closes = nil + ch.returns = nil + ch.cancels = nil + + if ch.confirms != nil { + ch.confirms.Close() + } + + close(ch.errors) + ch.noNotify = true + }) +} + +// send calls Channel.sendOpen() during normal operation. +// +// After the channel has been closed, send calls Channel.sendClosed(), ensuring +// only 'channel.close' is sent to the server. +func (ch *Channel) send(msg message) (err error) { + // If the channel is closed, use Channel.sendClosed() + if atomic.LoadInt32(&ch.closed) == 1 { + return ch.sendClosed(msg) + } + + return ch.sendOpen(msg) +} + +func (ch *Channel) open() error { + return ch.call(&channelOpen{}, &channelOpenOk{}) +} + +// Performs a request/response call for when the message is not NoWait and is +// specified as Synchronous. +func (ch *Channel) call(req message, res ...message) error { + if err := ch.send(req); err != nil { + return err + } + + if req.wait() { + select { + case e, ok := <-ch.errors: + if ok { + return e + } + return ErrClosed + + case msg := <-ch.rpc: + if msg != nil { + for _, try := range res { + if reflect.TypeOf(msg) == reflect.TypeOf(try) { + // *res = *msg + vres := reflect.ValueOf(try).Elem() + vmsg := reflect.ValueOf(msg).Elem() + vres.Set(vmsg) + return nil + } + } + return ErrCommandInvalid + } + // RPC channel has been closed without an error, likely due to a hard + // error on the Connection. This indicates we have already been + // shutdown and if were waiting, will have returned from the errors chan. + return ErrClosed + } + } + + return nil +} + +func (ch *Channel) sendClosed(msg message) (err error) { + // After a 'channel.close' is sent or received the only valid response is + // channel.close-ok + if _, ok := msg.(*channelCloseOk); ok { + return ch.connection.send(&methodFrame{ + ChannelId: ch.id, + Method: msg, + }) + } + + return ErrClosed +} + +func (ch *Channel) sendOpen(msg message) (err error) { + if content, ok := msg.(messageWithContent); ok { + props, body := content.getContent() + class, _ := content.id() + + // catch client max frame size==0 and server max frame size==0 + // set size to length of what we're trying to publish + var size int + if ch.connection.Config.FrameSize > 0 { + size = ch.connection.Config.FrameSize - frameHeaderSize + } else { + size = len(body) + } + + if err = ch.connection.send(&methodFrame{ + ChannelId: ch.id, + Method: content, + }); err != nil { + return + } + + if err = ch.connection.send(&headerFrame{ + ChannelId: ch.id, + ClassId: class, + Size: uint64(len(body)), + Properties: props, + }); err != nil { + return + } + + // chunk body into size (max frame size - frame header size) + for i, j := 0, size; i < len(body); i, j = j, j+size { + if j > len(body) { + j = len(body) + } + + if err = ch.connection.send(&bodyFrame{ + ChannelId: ch.id, + Body: body[i:j], + }); err != nil { + return + } + } + } else { + err = ch.connection.send(&methodFrame{ + ChannelId: ch.id, + Method: msg, + }) + } + + return +} + +// Eventually called via the state machine from the connection's reader +// goroutine, so assumes serialized access. +func (ch *Channel) dispatch(msg message) { + switch m := msg.(type) { + case *channelClose: + // lock before sending connection.close-ok + // to avoid unexpected interleaving with basic.publish frames if + // publishing is happening concurrently + ch.m.Lock() + ch.send(&channelCloseOk{}) + ch.m.Unlock() + ch.connection.closeChannel(ch, newError(m.ReplyCode, m.ReplyText)) + + case *channelFlow: + ch.notifyM.RLock() + for _, c := range ch.flows { + c <- m.Active + } + ch.notifyM.RUnlock() + ch.send(&channelFlowOk{Active: m.Active}) + + case *basicCancel: + ch.notifyM.RLock() + for _, c := range ch.cancels { + c <- m.ConsumerTag + } + ch.notifyM.RUnlock() + ch.consumers.cancel(m.ConsumerTag) + + case *basicReturn: + ret := newReturn(*m) + ch.notifyM.RLock() + for _, c := range ch.returns { + c <- *ret + } + ch.notifyM.RUnlock() + + case *basicAck: + if ch.confirming { + if m.Multiple { + ch.confirms.Multiple(Confirmation{m.DeliveryTag, true}) + } else { + ch.confirms.One(Confirmation{m.DeliveryTag, true}) + } + } + + case *basicNack: + if ch.confirming { + if m.Multiple { + ch.confirms.Multiple(Confirmation{m.DeliveryTag, false}) + } else { + ch.confirms.One(Confirmation{m.DeliveryTag, false}) + } + } + + case *basicDeliver: + ch.consumers.send(m.ConsumerTag, newDelivery(ch, m)) + // TODO log failed consumer and close channel, this can happen when + // deliveries are in flight and a no-wait cancel has happened + + default: + ch.rpc <- msg + } +} + +func (ch *Channel) transition(f func(*Channel, frame) error) error { + ch.recv = f + return nil +} + +func (ch *Channel) recvMethod(f frame) error { + switch frame := f.(type) { + case *methodFrame: + if msg, ok := frame.Method.(messageWithContent); ok { + ch.body = make([]byte, 0) + ch.message = msg + return ch.transition((*Channel).recvHeader) + } + + ch.dispatch(frame.Method) // termination state + return ch.transition((*Channel).recvMethod) + + case *headerFrame: + // drop + return ch.transition((*Channel).recvMethod) + + case *bodyFrame: + // drop + return ch.transition((*Channel).recvMethod) + } + + panic("unexpected frame type") +} + +func (ch *Channel) recvHeader(f frame) error { + switch frame := f.(type) { + case *methodFrame: + // interrupt content and handle method + return ch.recvMethod(f) + + case *headerFrame: + // start collecting if we expect body frames + ch.header = frame + + if frame.Size == 0 { + ch.message.setContent(ch.header.Properties, ch.body) + ch.dispatch(ch.message) // termination state + return ch.transition((*Channel).recvMethod) + } + return ch.transition((*Channel).recvContent) + + case *bodyFrame: + // drop and reset + return ch.transition((*Channel).recvMethod) + } + + panic("unexpected frame type") +} + +// state after method + header and before the length +// defined by the header has been reached +func (ch *Channel) recvContent(f frame) error { + switch frame := f.(type) { + case *methodFrame: + // interrupt content and handle method + return ch.recvMethod(f) + + case *headerFrame: + // drop and reset + return ch.transition((*Channel).recvMethod) + + case *bodyFrame: + ch.body = append(ch.body, frame.Body...) + + if uint64(len(ch.body)) >= ch.header.Size { + ch.message.setContent(ch.header.Properties, ch.body) + ch.dispatch(ch.message) // termination state + return ch.transition((*Channel).recvMethod) + } + + return ch.transition((*Channel).recvContent) + } + + panic("unexpected frame type") +} + +/* +Close initiate a clean channel closure by sending a close message with the error +code set to '200'. + +It is safe to call this method multiple times. + +*/ +func (ch *Channel) Close() error { + defer ch.connection.closeChannel(ch, nil) + return ch.call( + &channelClose{ReplyCode: replySuccess}, + &channelCloseOk{}, + ) +} + +/* +NotifyClose registers a listener for when the server sends a channel or +connection exception in the form of a Connection.Close or Channel.Close method. +Connection exceptions will be broadcast to all open channels and all channels +will be closed, where channel exceptions will only be broadcast to listeners to +this channel. + +The chan provided will be closed when the Channel is closed and on a +graceful close, no error will be sent. + +*/ +func (ch *Channel) NotifyClose(c chan *Error) chan *Error { + ch.notifyM.Lock() + defer ch.notifyM.Unlock() + + if ch.noNotify { + close(c) + } else { + ch.closes = append(ch.closes, c) + } + + return c +} + +/* +NotifyFlow registers a listener for basic.flow methods sent by the server. +When `false` is sent on one of the listener channels, all publishers should +pause until a `true` is sent. + +The server may ask the producer to pause or restart the flow of Publishings +sent by on a channel. This is a simple flow-control mechanism that a server can +use to avoid overflowing its queues or otherwise finding itself receiving more +messages than it can process. Note that this method is not intended for window +control. It does not affect contents returned by basic.get-ok methods. + +When a new channel is opened, it is active (flow is active). Some +applications assume that channels are inactive until started. To emulate +this behavior a client MAY open the channel, then pause it. + +Publishers should respond to a flow messages as rapidly as possible and the +server may disconnect over producing channels that do not respect these +messages. + +basic.flow-ok methods will always be returned to the server regardless of +the number of listeners there are. + +To control the flow of deliveries from the server, use the Channel.Flow() +method instead. + +Note: RabbitMQ will rather use TCP pushback on the network connection instead +of sending basic.flow. This means that if a single channel is producing too +much on the same connection, all channels using that connection will suffer, +including acknowledgments from deliveries. Use different Connections if you +desire to interleave consumers and producers in the same process to avoid your +basic.ack messages from getting rate limited with your basic.publish messages. + +*/ +func (ch *Channel) NotifyFlow(c chan bool) chan bool { + ch.notifyM.Lock() + defer ch.notifyM.Unlock() + + if ch.noNotify { + close(c) + } else { + ch.flows = append(ch.flows, c) + } + + return c +} + +/* +NotifyReturn registers a listener for basic.return methods. These can be sent +from the server when a publish is undeliverable either from the mandatory or +immediate flags. + +A return struct has a copy of the Publishing along with some error +information about why the publishing failed. + +*/ +func (ch *Channel) NotifyReturn(c chan Return) chan Return { + ch.notifyM.Lock() + defer ch.notifyM.Unlock() + + if ch.noNotify { + close(c) + } else { + ch.returns = append(ch.returns, c) + } + + return c +} + +/* +NotifyCancel registers a listener for basic.cancel methods. These can be sent +from the server when a queue is deleted or when consuming from a mirrored queue +where the master has just failed (and was moved to another node). + +The subscription tag is returned to the listener. + +*/ +func (ch *Channel) NotifyCancel(c chan string) chan string { + ch.notifyM.Lock() + defer ch.notifyM.Unlock() + + if ch.noNotify { + close(c) + } else { + ch.cancels = append(ch.cancels, c) + } + + return c +} + +/* +NotifyConfirm calls NotifyPublish and starts a goroutine sending +ordered Ack and Nack DeliveryTag to the respective channels. + +For strict ordering, use NotifyPublish instead. +*/ +func (ch *Channel) NotifyConfirm(ack, nack chan uint64) (chan uint64, chan uint64) { + confirms := ch.NotifyPublish(make(chan Confirmation, len(ack)+len(nack))) + + go func() { + for c := range confirms { + if c.Ack { + ack <- c.DeliveryTag + } else { + nack <- c.DeliveryTag + } + } + close(ack) + if nack != ack { + close(nack) + } + }() + + return ack, nack +} + +/* +NotifyPublish registers a listener for reliable publishing. Receives from this +chan for every publish after Channel.Confirm will be in order starting with +DeliveryTag 1. + +There will be one and only one Confirmation Publishing starting with the +delivery tag of 1 and progressing sequentially until the total number of +Publishings have been seen by the server. + +Acknowledgments will be received in the order of delivery from the +NotifyPublish channels even if the server acknowledges them out of order. + +The listener chan will be closed when the Channel is closed. + +The capacity of the chan Confirmation must be at least as large as the +number of outstanding publishings. Not having enough buffered chans will +create a deadlock if you attempt to perform other operations on the Connection +or Channel while confirms are in-flight. + +It's advisable to wait for all Confirmations to arrive before calling +Channel.Close() or Connection.Close(). + +*/ +func (ch *Channel) NotifyPublish(confirm chan Confirmation) chan Confirmation { + ch.notifyM.Lock() + defer ch.notifyM.Unlock() + + if ch.noNotify { + close(confirm) + } else { + ch.confirms.Listen(confirm) + } + + return confirm + +} + +/* +Qos controls how many messages or how many bytes the server will try to keep on +the network for consumers before receiving delivery acks. The intent of Qos is +to make sure the network buffers stay full between the server and client. + +With a prefetch count greater than zero, the server will deliver that many +messages to consumers before acknowledgments are received. The server ignores +this option when consumers are started with noAck because no acknowledgments +are expected or sent. + +With a prefetch size greater than zero, the server will try to keep at least +that many bytes of deliveries flushed to the network before receiving +acknowledgments from the consumers. This option is ignored when consumers are +started with noAck. + +When global is true, these Qos settings apply to all existing and future +consumers on all channels on the same connection. When false, the Channel.Qos +settings will apply to all existing and future consumers on this channel. + +Please see the RabbitMQ Consumer Prefetch documentation for an explanation of +how the global flag is implemented in RabbitMQ, as it differs from the +AMQP 0.9.1 specification in that global Qos settings are limited in scope to +channels, not connections (https://www.rabbitmq.com/consumer-prefetch.html). + +To get round-robin behavior between consumers consuming from the same queue on +different connections, set the prefetch count to 1, and the next available +message on the server will be delivered to the next available consumer. + +If your consumer work time is reasonably consistent and not much greater +than two times your network round trip time, you will see significant +throughput improvements starting with a prefetch count of 2 or slightly +greater as described by benchmarks on RabbitMQ. + +http://www.rabbitmq.com/blog/2012/04/25/rabbitmq-performance-measurements-part-2/ +*/ +func (ch *Channel) Qos(prefetchCount, prefetchSize int, global bool) error { + return ch.call( + &basicQos{ + PrefetchCount: uint16(prefetchCount), + PrefetchSize: uint32(prefetchSize), + Global: global, + }, + &basicQosOk{}, + ) +} + +/* +Cancel stops deliveries to the consumer chan established in Channel.Consume and +identified by consumer. + +Only use this method to cleanly stop receiving deliveries from the server and +cleanly shut down the consumer chan identified by this tag. Using this method +and waiting for remaining messages to flush from the consumer chan will ensure +all messages received on the network will be delivered to the receiver of your +consumer chan. + +Continue consuming from the chan Delivery provided by Channel.Consume until the +chan closes. + +When noWait is true, do not wait for the server to acknowledge the cancel. +Only use this when you are certain there are no deliveries in flight that +require an acknowledgment, otherwise they will arrive and be dropped in the +client without an ack, and will not be redelivered to other consumers. + +*/ +func (ch *Channel) Cancel(consumer string, noWait bool) error { + req := &basicCancel{ + ConsumerTag: consumer, + NoWait: noWait, + } + res := &basicCancelOk{} + + if err := ch.call(req, res); err != nil { + return err + } + + if req.wait() { + ch.consumers.cancel(res.ConsumerTag) + } else { + // Potentially could drop deliveries in flight + ch.consumers.cancel(consumer) + } + + return nil +} + +/* +QueueDeclare declares a queue to hold messages and deliver to consumers. +Declaring creates a queue if it doesn't already exist, or ensures that an +existing queue matches the same parameters. + +Every queue declared gets a default binding to the empty exchange "" which has +the type "direct" with the routing key matching the queue's name. With this +default binding, it is possible to publish messages that route directly to +this queue by publishing to "" with the routing key of the queue name. + + QueueDeclare("alerts", true, false, false, false, nil) + Publish("", "alerts", false, false, Publishing{Body: []byte("...")}) + + Delivery Exchange Key Queue + ----------------------------------------------- + key: alerts -> "" -> alerts -> alerts + +The queue name may be empty, in which case the server will generate a unique name +which will be returned in the Name field of Queue struct. + +Durable and Non-Auto-Deleted queues will survive server restarts and remain +when there are no remaining consumers or bindings. Persistent publishings will +be restored in this queue on server restart. These queues are only able to be +bound to durable exchanges. + +Non-Durable and Auto-Deleted queues will not be redeclared on server restart +and will be deleted by the server after a short time when the last consumer is +canceled or the last consumer's channel is closed. Queues with this lifetime +can also be deleted normally with QueueDelete. These durable queues can only +be bound to non-durable exchanges. + +Non-Durable and Non-Auto-Deleted queues will remain declared as long as the +server is running regardless of how many consumers. This lifetime is useful +for temporary topologies that may have long delays between consumer activity. +These queues can only be bound to non-durable exchanges. + +Durable and Auto-Deleted queues will be restored on server restart, but without +active consumers will not survive and be removed. This Lifetime is unlikely +to be useful. + +Exclusive queues are only accessible by the connection that declares them and +will be deleted when the connection closes. Channels on other connections +will receive an error when attempting to declare, bind, consume, purge or +delete a queue with the same name. + +When noWait is true, the queue will assume to be declared on the server. A +channel exception will arrive if the conditions are met for existing queues +or attempting to modify an existing queue from a different connection. + +When the error return value is not nil, you can assume the queue could not be +declared with these parameters, and the channel will be closed. + +*/ +func (ch *Channel) QueueDeclare(name string, durable, autoDelete, exclusive, noWait bool, args Table) (Queue, error) { + if err := args.Validate(); err != nil { + return Queue{}, err + } + + req := &queueDeclare{ + Queue: name, + Passive: false, + Durable: durable, + AutoDelete: autoDelete, + Exclusive: exclusive, + NoWait: noWait, + Arguments: args, + } + res := &queueDeclareOk{} + + if err := ch.call(req, res); err != nil { + return Queue{}, err + } + + if req.wait() { + return Queue{ + Name: res.Queue, + Messages: int(res.MessageCount), + Consumers: int(res.ConsumerCount), + }, nil + } + + return Queue{Name: name}, nil +} + +/* + +QueueDeclarePassive is functionally and parametrically equivalent to +QueueDeclare, except that it sets the "passive" attribute to true. A passive +queue is assumed by RabbitMQ to already exist, and attempting to connect to a +non-existent queue will cause RabbitMQ to throw an exception. This function +can be used to test for the existence of a queue. + +*/ +func (ch *Channel) QueueDeclarePassive(name string, durable, autoDelete, exclusive, noWait bool, args Table) (Queue, error) { + if err := args.Validate(); err != nil { + return Queue{}, err + } + + req := &queueDeclare{ + Queue: name, + Passive: true, + Durable: durable, + AutoDelete: autoDelete, + Exclusive: exclusive, + NoWait: noWait, + Arguments: args, + } + res := &queueDeclareOk{} + + if err := ch.call(req, res); err != nil { + return Queue{}, err + } + + if req.wait() { + return Queue{ + Name: res.Queue, + Messages: int(res.MessageCount), + Consumers: int(res.ConsumerCount), + }, nil + } + + return Queue{Name: name}, nil +} + +/* +QueueInspect passively declares a queue by name to inspect the current message +count and consumer count. + +Use this method to check how many messages ready for delivery reside in the queue, +how many consumers are receiving deliveries, and whether a queue by this +name already exists. + +If the queue by this name exists, use Channel.QueueDeclare check if it is +declared with specific parameters. + +If a queue by this name does not exist, an error will be returned and the +channel will be closed. + +*/ +func (ch *Channel) QueueInspect(name string) (Queue, error) { + req := &queueDeclare{ + Queue: name, + Passive: true, + } + res := &queueDeclareOk{} + + err := ch.call(req, res) + + state := Queue{ + Name: name, + Messages: int(res.MessageCount), + Consumers: int(res.ConsumerCount), + } + + return state, err +} + +/* +QueueBind binds an exchange to a queue so that publishings to the exchange will +be routed to the queue when the publishing routing key matches the binding +routing key. + + QueueBind("pagers", "alert", "log", false, nil) + QueueBind("emails", "info", "log", false, nil) + + Delivery Exchange Key Queue + ----------------------------------------------- + key: alert --> log ----> alert --> pagers + key: info ---> log ----> info ---> emails + key: debug --> log (none) (dropped) + +If a binding with the same key and arguments already exists between the +exchange and queue, the attempt to rebind will be ignored and the existing +binding will be retained. + +In the case that multiple bindings may cause the message to be routed to the +same queue, the server will only route the publishing once. This is possible +with topic exchanges. + + QueueBind("pagers", "alert", "amq.topic", false, nil) + QueueBind("emails", "info", "amq.topic", false, nil) + QueueBind("emails", "#", "amq.topic", false, nil) // match everything + + Delivery Exchange Key Queue + ----------------------------------------------- + key: alert --> amq.topic ----> alert --> pagers + key: info ---> amq.topic ----> # ------> emails + \---> info ---/ + key: debug --> amq.topic ----> # ------> emails + +It is only possible to bind a durable queue to a durable exchange regardless of +whether the queue or exchange is auto-deleted. Bindings between durable queues +and exchanges will also be restored on server restart. + +If the binding could not complete, an error will be returned and the channel +will be closed. + +When noWait is false and the queue could not be bound, the channel will be +closed with an error. + +*/ +func (ch *Channel) QueueBind(name, key, exchange string, noWait bool, args Table) error { + if err := args.Validate(); err != nil { + return err + } + + return ch.call( + &queueBind{ + Queue: name, + Exchange: exchange, + RoutingKey: key, + NoWait: noWait, + Arguments: args, + }, + &queueBindOk{}, + ) +} + +/* +QueueUnbind removes a binding between an exchange and queue matching the key and +arguments. + +It is possible to send and empty string for the exchange name which means to +unbind the queue from the default exchange. + +*/ +func (ch *Channel) QueueUnbind(name, key, exchange string, args Table) error { + if err := args.Validate(); err != nil { + return err + } + + return ch.call( + &queueUnbind{ + Queue: name, + Exchange: exchange, + RoutingKey: key, + Arguments: args, + }, + &queueUnbindOk{}, + ) +} + +/* +QueuePurge removes all messages from the named queue which are not waiting to +be acknowledged. Messages that have been delivered but have not yet been +acknowledged will not be removed. + +When successful, returns the number of messages purged. + +If noWait is true, do not wait for the server response and the number of +messages purged will not be meaningful. +*/ +func (ch *Channel) QueuePurge(name string, noWait bool) (int, error) { + req := &queuePurge{ + Queue: name, + NoWait: noWait, + } + res := &queuePurgeOk{} + + err := ch.call(req, res) + + return int(res.MessageCount), err +} + +/* +QueueDelete removes the queue from the server including all bindings then +purges the messages based on server configuration, returning the number of +messages purged. + +When ifUnused is true, the queue will not be deleted if there are any +consumers on the queue. If there are consumers, an error will be returned and +the channel will be closed. + +When ifEmpty is true, the queue will not be deleted if there are any messages +remaining on the queue. If there are messages, an error will be returned and +the channel will be closed. + +When noWait is true, the queue will be deleted without waiting for a response +from the server. The purged message count will not be meaningful. If the queue +could not be deleted, a channel exception will be raised and the channel will +be closed. + +*/ +func (ch *Channel) QueueDelete(name string, ifUnused, ifEmpty, noWait bool) (int, error) { + req := &queueDelete{ + Queue: name, + IfUnused: ifUnused, + IfEmpty: ifEmpty, + NoWait: noWait, + } + res := &queueDeleteOk{} + + err := ch.call(req, res) + + return int(res.MessageCount), err +} + +/* +Consume immediately starts delivering queued messages. + +Begin receiving on the returned chan Delivery before any other operation on the +Connection or Channel. + +Continues deliveries to the returned chan Delivery until Channel.Cancel, +Connection.Close, Channel.Close, or an AMQP exception occurs. Consumers must +range over the chan to ensure all deliveries are received. Unreceived +deliveries will block all methods on the same connection. + +All deliveries in AMQP must be acknowledged. It is expected of the consumer to +call Delivery.Ack after it has successfully processed the delivery. If the +consumer is cancelled or the channel or connection is closed any unacknowledged +deliveries will be requeued at the end of the same queue. + +The consumer is identified by a string that is unique and scoped for all +consumers on this channel. If you wish to eventually cancel the consumer, use +the same non-empty identifier in Channel.Cancel. An empty string will cause +the library to generate a unique identity. The consumer identity will be +included in every Delivery in the ConsumerTag field + +When autoAck (also known as noAck) is true, the server will acknowledge +deliveries to this consumer prior to writing the delivery to the network. When +autoAck is true, the consumer should not call Delivery.Ack. Automatically +acknowledging deliveries means that some deliveries may get lost if the +consumer is unable to process them after the server delivers them. +See http://www.rabbitmq.com/confirms.html for more details. + +When exclusive is true, the server will ensure that this is the sole consumer +from this queue. When exclusive is false, the server will fairly distribute +deliveries across multiple consumers. + +The noLocal flag is not supported by RabbitMQ. + +It's advisable to use separate connections for +Channel.Publish and Channel.Consume so not to have TCP pushback on publishing +affect the ability to consume messages, so this parameter is here mostly for +completeness. + +When noWait is true, do not wait for the server to confirm the request and +immediately begin deliveries. If it is not possible to consume, a channel +exception will be raised and the channel will be closed. + +Optional arguments can be provided that have specific semantics for the queue +or server. + +Inflight messages, limited by Channel.Qos will be buffered until received from +the returned chan. + +When the Channel or Connection is closed, all buffered and inflight messages will +be dropped. + +When the consumer tag is cancelled, all inflight messages will be delivered until +the returned chan is closed. + +*/ +func (ch *Channel) Consume(queue, consumer string, autoAck, exclusive, noLocal, noWait bool, args Table) (<-chan Delivery, error) { + // When we return from ch.call, there may be a delivery already for the + // consumer that hasn't been added to the consumer hash yet. Because of + // this, we never rely on the server picking a consumer tag for us. + + if err := args.Validate(); err != nil { + return nil, err + } + + if consumer == "" { + consumer = uniqueConsumerTag() + } + + req := &basicConsume{ + Queue: queue, + ConsumerTag: consumer, + NoLocal: noLocal, + NoAck: autoAck, + Exclusive: exclusive, + NoWait: noWait, + Arguments: args, + } + res := &basicConsumeOk{} + + deliveries := make(chan Delivery) + + ch.consumers.add(consumer, deliveries) + + if err := ch.call(req, res); err != nil { + ch.consumers.cancel(consumer) + return nil, err + } + + return (<-chan Delivery)(deliveries), nil +} + +/* +ExchangeDeclare declares an exchange on the server. If the exchange does not +already exist, the server will create it. If the exchange exists, the server +verifies that it is of the provided type, durability and auto-delete flags. + +Errors returned from this method will close the channel. + +Exchange names starting with "amq." are reserved for pre-declared and +standardized exchanges. The client MAY declare an exchange starting with +"amq." if the passive option is set, or the exchange already exists. Names can +consist of a non-empty sequence of letters, digits, hyphen, underscore, +period, or colon. + +Each exchange belongs to one of a set of exchange kinds/types implemented by +the server. The exchange types define the functionality of the exchange - i.e. +how messages are routed through it. Once an exchange is declared, its type +cannot be changed. The common types are "direct", "fanout", "topic" and +"headers". + +Durable and Non-Auto-Deleted exchanges will survive server restarts and remain +declared when there are no remaining bindings. This is the best lifetime for +long-lived exchange configurations like stable routes and default exchanges. + +Non-Durable and Auto-Deleted exchanges will be deleted when there are no +remaining bindings and not restored on server restart. This lifetime is +useful for temporary topologies that should not pollute the virtual host on +failure or after the consumers have completed. + +Non-Durable and Non-Auto-deleted exchanges will remain as long as the server is +running including when there are no remaining bindings. This is useful for +temporary topologies that may have long delays between bindings. + +Durable and Auto-Deleted exchanges will survive server restarts and will be +removed before and after server restarts when there are no remaining bindings. +These exchanges are useful for robust temporary topologies or when you require +binding durable queues to auto-deleted exchanges. + +Note: RabbitMQ declares the default exchange types like 'amq.fanout' as +durable, so queues that bind to these pre-declared exchanges must also be +durable. + +Exchanges declared as `internal` do not accept accept publishings. Internal +exchanges are useful when you wish to implement inter-exchange topologies +that should not be exposed to users of the broker. + +When noWait is true, declare without waiting for a confirmation from the server. +The channel may be closed as a result of an error. Add a NotifyClose listener +to respond to any exceptions. + +Optional amqp.Table of arguments that are specific to the server's implementation of +the exchange can be sent for exchange types that require extra parameters. +*/ +func (ch *Channel) ExchangeDeclare(name, kind string, durable, autoDelete, internal, noWait bool, args Table) error { + if err := args.Validate(); err != nil { + return err + } + + return ch.call( + &exchangeDeclare{ + Exchange: name, + Type: kind, + Passive: false, + Durable: durable, + AutoDelete: autoDelete, + Internal: internal, + NoWait: noWait, + Arguments: args, + }, + &exchangeDeclareOk{}, + ) +} + +/* + +ExchangeDeclarePassive is functionally and parametrically equivalent to +ExchangeDeclare, except that it sets the "passive" attribute to true. A passive +exchange is assumed by RabbitMQ to already exist, and attempting to connect to a +non-existent exchange will cause RabbitMQ to throw an exception. This function +can be used to detect the existence of an exchange. + +*/ +func (ch *Channel) ExchangeDeclarePassive(name, kind string, durable, autoDelete, internal, noWait bool, args Table) error { + if err := args.Validate(); err != nil { + return err + } + + return ch.call( + &exchangeDeclare{ + Exchange: name, + Type: kind, + Passive: true, + Durable: durable, + AutoDelete: autoDelete, + Internal: internal, + NoWait: noWait, + Arguments: args, + }, + &exchangeDeclareOk{}, + ) +} + +/* +ExchangeDelete removes the named exchange from the server. When an exchange is +deleted all queue bindings on the exchange are also deleted. If this exchange +does not exist, the channel will be closed with an error. + +When ifUnused is true, the server will only delete the exchange if it has no queue +bindings. If the exchange has queue bindings the server does not delete it +but close the channel with an exception instead. Set this to true if you are +not the sole owner of the exchange. + +When noWait is true, do not wait for a server confirmation that the exchange has +been deleted. Failing to delete the channel could close the channel. Add a +NotifyClose listener to respond to these channel exceptions. +*/ +func (ch *Channel) ExchangeDelete(name string, ifUnused, noWait bool) error { + return ch.call( + &exchangeDelete{ + Exchange: name, + IfUnused: ifUnused, + NoWait: noWait, + }, + &exchangeDeleteOk{}, + ) +} + +/* +ExchangeBind binds an exchange to another exchange to create inter-exchange +routing topologies on the server. This can decouple the private topology and +routing exchanges from exchanges intended solely for publishing endpoints. + +Binding two exchanges with identical arguments will not create duplicate +bindings. + +Binding one exchange to another with multiple bindings will only deliver a +message once. For example if you bind your exchange to `amq.fanout` with two +different binding keys, only a single message will be delivered to your +exchange even though multiple bindings will match. + +Given a message delivered to the source exchange, the message will be forwarded +to the destination exchange when the routing key is matched. + + ExchangeBind("sell", "MSFT", "trade", false, nil) + ExchangeBind("buy", "AAPL", "trade", false, nil) + + Delivery Source Key Destination + example exchange exchange + ----------------------------------------------- + key: AAPL --> trade ----> MSFT sell + \---> AAPL --> buy + +When noWait is true, do not wait for the server to confirm the binding. If any +error occurs the channel will be closed. Add a listener to NotifyClose to +handle these errors. + +Optional arguments specific to the exchanges bound can also be specified. +*/ +func (ch *Channel) ExchangeBind(destination, key, source string, noWait bool, args Table) error { + if err := args.Validate(); err != nil { + return err + } + + return ch.call( + &exchangeBind{ + Destination: destination, + Source: source, + RoutingKey: key, + NoWait: noWait, + Arguments: args, + }, + &exchangeBindOk{}, + ) +} + +/* +ExchangeUnbind unbinds the destination exchange from the source exchange on the +server by removing the routing key between them. This is the inverse of +ExchangeBind. If the binding does not currently exist, an error will be +returned. + +When noWait is true, do not wait for the server to confirm the deletion of the +binding. If any error occurs the channel will be closed. Add a listener to +NotifyClose to handle these errors. + +Optional arguments that are specific to the type of exchanges bound can also be +provided. These must match the same arguments specified in ExchangeBind to +identify the binding. +*/ +func (ch *Channel) ExchangeUnbind(destination, key, source string, noWait bool, args Table) error { + if err := args.Validate(); err != nil { + return err + } + + return ch.call( + &exchangeUnbind{ + Destination: destination, + Source: source, + RoutingKey: key, + NoWait: noWait, + Arguments: args, + }, + &exchangeUnbindOk{}, + ) +} + +/* +Publish sends a Publishing from the client to an exchange on the server. + +When you want a single message to be delivered to a single queue, you can +publish to the default exchange with the routingKey of the queue name. This is +because every declared queue gets an implicit route to the default exchange. + +Since publishings are asynchronous, any undeliverable message will get returned +by the server. Add a listener with Channel.NotifyReturn to handle any +undeliverable message when calling publish with either the mandatory or +immediate parameters as true. + +Publishings can be undeliverable when the mandatory flag is true and no queue is +bound that matches the routing key, or when the immediate flag is true and no +consumer on the matched queue is ready to accept the delivery. + +This can return an error when the channel, connection or socket is closed. The +error or lack of an error does not indicate whether the server has received this +publishing. + +It is possible for publishing to not reach the broker if the underlying socket +is shut down without pending publishing packets being flushed from the kernel +buffers. The easy way of making it probable that all publishings reach the +server is to always call Connection.Close before terminating your publishing +application. The way to ensure that all publishings reach the server is to add +a listener to Channel.NotifyPublish and put the channel in confirm mode with +Channel.Confirm. Publishing delivery tags and their corresponding +confirmations start at 1. Exit when all publishings are confirmed. + +When Publish does not return an error and the channel is in confirm mode, the +internal counter for DeliveryTags with the first confirmation starts at 1. + +*/ +func (ch *Channel) Publish(exchange, key string, mandatory, immediate bool, msg Publishing) error { + if err := msg.Headers.Validate(); err != nil { + return err + } + + ch.m.Lock() + defer ch.m.Unlock() + + if err := ch.send(&basicPublish{ + Exchange: exchange, + RoutingKey: key, + Mandatory: mandatory, + Immediate: immediate, + Body: msg.Body, + Properties: properties{ + Headers: msg.Headers, + ContentType: msg.ContentType, + ContentEncoding: msg.ContentEncoding, + DeliveryMode: msg.DeliveryMode, + Priority: msg.Priority, + CorrelationId: msg.CorrelationId, + ReplyTo: msg.ReplyTo, + Expiration: msg.Expiration, + MessageId: msg.MessageId, + Timestamp: msg.Timestamp, + Type: msg.Type, + UserId: msg.UserId, + AppId: msg.AppId, + }, + }); err != nil { + return err + } + + if ch.confirming { + ch.confirms.Publish() + } + + return nil +} + +/* +Get synchronously receives a single Delivery from the head of a queue from the +server to the client. In almost all cases, using Channel.Consume will be +preferred. + +If there was a delivery waiting on the queue and that delivery was received, the +second return value will be true. If there was no delivery waiting or an error +occurred, the ok bool will be false. + +All deliveries must be acknowledged including those from Channel.Get. Call +Delivery.Ack on the returned delivery when you have fully processed this +delivery. + +When autoAck is true, the server will automatically acknowledge this message so +you don't have to. But if you are unable to fully process this message before +the channel or connection is closed, the message will not get requeued. + +*/ +func (ch *Channel) Get(queue string, autoAck bool) (msg Delivery, ok bool, err error) { + req := &basicGet{Queue: queue, NoAck: autoAck} + res := &basicGetOk{} + empty := &basicGetEmpty{} + + if err := ch.call(req, res, empty); err != nil { + return Delivery{}, false, err + } + + if res.DeliveryTag > 0 { + return *(newDelivery(ch, res)), true, nil + } + + return Delivery{}, false, nil +} + +/* +Tx puts the channel into transaction mode on the server. All publishings and +acknowledgments following this method will be atomically committed or rolled +back for a single queue. Call either Channel.TxCommit or Channel.TxRollback to +leave a this transaction and immediately start a new transaction. + +The atomicity across multiple queues is not defined as queue declarations and +bindings are not included in the transaction. + +The behavior of publishings that are delivered as mandatory or immediate while +the channel is in a transaction is not defined. + +Once a channel has been put into transaction mode, it cannot be taken out of +transaction mode. Use a different channel for non-transactional semantics. + +*/ +func (ch *Channel) Tx() error { + return ch.call( + &txSelect{}, + &txSelectOk{}, + ) +} + +/* +TxCommit atomically commits all publishings and acknowledgments for a single +queue and immediately start a new transaction. + +Calling this method without having called Channel.Tx is an error. + +*/ +func (ch *Channel) TxCommit() error { + return ch.call( + &txCommit{}, + &txCommitOk{}, + ) +} + +/* +TxRollback atomically rolls back all publishings and acknowledgments for a +single queue and immediately start a new transaction. + +Calling this method without having called Channel.Tx is an error. + +*/ +func (ch *Channel) TxRollback() error { + return ch.call( + &txRollback{}, + &txRollbackOk{}, + ) +} + +/* +Flow pauses the delivery of messages to consumers on this channel. Channels +are opened with flow control active, to open a channel with paused +deliveries immediately call this method with `false` after calling +Connection.Channel. + +When active is `false`, this method asks the server to temporarily pause deliveries +until called again with active as `true`. + +Channel.Get methods will not be affected by flow control. + +This method is not intended to act as window control. Use Channel.Qos to limit +the number of unacknowledged messages or bytes in flight instead. + +The server may also send us flow methods to throttle our publishings. A well +behaving publishing client should add a listener with Channel.NotifyFlow and +pause its publishings when `false` is sent on that channel. + +Note: RabbitMQ prefers to use TCP push back to control flow for all channels on +a connection, so under high volume scenarios, it's wise to open separate +Connections for publishings and deliveries. + +*/ +func (ch *Channel) Flow(active bool) error { + return ch.call( + &channelFlow{Active: active}, + &channelFlowOk{}, + ) +} + +/* +Confirm puts this channel into confirm mode so that the client can ensure all +publishings have successfully been received by the server. After entering this +mode, the server will send a basic.ack or basic.nack message with the deliver +tag set to a 1 based incremental index corresponding to every publishing +received after the this method returns. + +Add a listener to Channel.NotifyPublish to respond to the Confirmations. If +Channel.NotifyPublish is not called, the Confirmations will be silently +ignored. + +The order of acknowledgments is not bound to the order of deliveries. + +Ack and Nack confirmations will arrive at some point in the future. + +Unroutable mandatory or immediate messages are acknowledged immediately after +any Channel.NotifyReturn listeners have been notified. Other messages are +acknowledged when all queues that should have the message routed to them have +either received acknowledgment of delivery or have enqueued the message, +persisting the message if necessary. + +When noWait is true, the client will not wait for a response. A channel +exception could occur if the server does not support this method. + +*/ +func (ch *Channel) Confirm(noWait bool) error { + if err := ch.call( + &confirmSelect{Nowait: noWait}, + &confirmSelectOk{}, + ); err != nil { + return err + } + + ch.confirmM.Lock() + ch.confirming = true + ch.confirmM.Unlock() + + return nil +} + +/* +Recover redelivers all unacknowledged deliveries on this channel. + +When requeue is false, messages will be redelivered to the original consumer. + +When requeue is true, messages will be redelivered to any available consumer, +potentially including the original. + +If the deliveries cannot be recovered, an error will be returned and the channel +will be closed. + +Note: this method is not implemented on RabbitMQ, use Delivery.Nack instead +*/ +func (ch *Channel) Recover(requeue bool) error { + return ch.call( + &basicRecover{Requeue: requeue}, + &basicRecoverOk{}, + ) +} + +/* +Ack acknowledges a delivery by its delivery tag when having been consumed with +Channel.Consume or Channel.Get. + +Ack acknowledges all message received prior to the delivery tag when multiple +is true. + +See also Delivery.Ack +*/ +func (ch *Channel) Ack(tag uint64, multiple bool) error { + ch.m.Lock() + defer ch.m.Unlock() + + return ch.send(&basicAck{ + DeliveryTag: tag, + Multiple: multiple, + }) +} + +/* +Nack negatively acknowledges a delivery by its delivery tag. Prefer this +method to notify the server that you were not able to process this delivery and +it must be redelivered or dropped. + +See also Delivery.Nack +*/ +func (ch *Channel) Nack(tag uint64, multiple bool, requeue bool) error { + ch.m.Lock() + defer ch.m.Unlock() + + return ch.send(&basicNack{ + DeliveryTag: tag, + Multiple: multiple, + Requeue: requeue, + }) +} + +/* +Reject negatively acknowledges a delivery by its delivery tag. Prefer Nack +over Reject when communicating with a RabbitMQ server because you can Nack +multiple messages, reducing the amount of protocol messages to exchange. + +See also Delivery.Reject +*/ +func (ch *Channel) Reject(tag uint64, requeue bool) error { + ch.m.Lock() + defer ch.m.Unlock() + + return ch.send(&basicReject{ + DeliveryTag: tag, + Requeue: requeue, + }) +} diff --git a/vendor/github.com/streadway/amqp/client_test.go b/vendor/github.com/streadway/amqp/client_test.go new file mode 100644 index 0000000..d10671a --- /dev/null +++ b/vendor/github.com/streadway/amqp/client_test.go @@ -0,0 +1,714 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +package amqp + +import ( + "bytes" + "io" + "reflect" + "testing" + "time" +) + +type server struct { + *testing.T + r reader // framer <- client + w writer // framer -> client + S io.ReadWriteCloser // Server IO + C io.ReadWriteCloser // Client IO + + // captured client frames + start connectionStartOk + tune connectionTuneOk +} + +func defaultConfig() Config { + return Config{ + SASL: []Authentication{&PlainAuth{"guest", "guest"}}, + Vhost: "/", + Locale: defaultLocale, + } +} + +func newSession(t *testing.T) (io.ReadWriteCloser, *server) { + rs, wc := io.Pipe() + rc, ws := io.Pipe() + + rws := &logIO{t, "server", pipe{rs, ws}} + rwc := &logIO{t, "client", pipe{rc, wc}} + + server := server{ + T: t, + r: reader{rws}, + w: writer{rws}, + S: rws, + C: rwc, + } + + return rwc, &server +} + +func (t *server) expectBytes(b []byte) { + in := make([]byte, len(b)) + if _, err := io.ReadFull(t.S, in); err != nil { + t.Fatalf("io error expecting bytes: %v", err) + } + + if bytes.Compare(b, in) != 0 { + t.Fatalf("failed bytes: expected: %s got: %s", string(b), string(in)) + } +} + +func (t *server) send(channel int, m message) { + defer time.AfterFunc(time.Second, func() { panic("send deadlock") }).Stop() + + if msg, ok := m.(messageWithContent); ok { + props, body := msg.getContent() + class, _ := msg.id() + t.w.WriteFrame(&methodFrame{ + ChannelId: uint16(channel), + Method: msg, + }) + t.w.WriteFrame(&headerFrame{ + ChannelId: uint16(channel), + ClassId: class, + Size: uint64(len(body)), + Properties: props, + }) + t.w.WriteFrame(&bodyFrame{ + ChannelId: uint16(channel), + Body: body, + }) + } else { + t.w.WriteFrame(&methodFrame{ + ChannelId: uint16(channel), + Method: m, + }) + } +} + +// drops all but method frames expected on the given channel +func (t *server) recv(channel int, m message) message { + defer time.AfterFunc(time.Second, func() { panic("recv deadlock") }).Stop() + + var remaining int + var header *headerFrame + var body []byte + + for { + frame, err := t.r.ReadFrame() + if err != nil { + t.Fatalf("frame err, read: %s", err) + } + + if frame.channel() != uint16(channel) { + t.Fatalf("expected frame on channel %d, got channel %d", channel, frame.channel()) + } + + switch f := frame.(type) { + case *heartbeatFrame: + // drop + + case *headerFrame: + // start content state + header = f + remaining = int(header.Size) + if remaining == 0 { + m.(messageWithContent).setContent(header.Properties, nil) + return m + } + + case *bodyFrame: + // continue until terminated + body = append(body, f.Body...) + remaining -= len(f.Body) + if remaining <= 0 { + m.(messageWithContent).setContent(header.Properties, body) + return m + } + + case *methodFrame: + if reflect.TypeOf(m) == reflect.TypeOf(f.Method) { + wantv := reflect.ValueOf(m).Elem() + havev := reflect.ValueOf(f.Method).Elem() + wantv.Set(havev) + if _, ok := m.(messageWithContent); !ok { + return m + } + } else { + t.Fatalf("expected method type: %T, got: %T", m, f.Method) + } + + default: + t.Fatalf("unexpected frame: %+v", f) + } + } +} + +func (t *server) expectAMQP() { + t.expectBytes([]byte{'A', 'M', 'Q', 'P', 0, 0, 9, 1}) +} + +func (t *server) connectionStart() { + t.send(0, &connectionStart{ + VersionMajor: 0, + VersionMinor: 9, + Mechanisms: "PLAIN", + Locales: "en_US", + }) + + t.recv(0, &t.start) +} + +func (t *server) connectionTune() { + t.send(0, &connectionTune{ + ChannelMax: 11, + FrameMax: 20000, + Heartbeat: 10, + }) + + t.recv(0, &t.tune) +} + +func (t *server) connectionOpen() { + t.expectAMQP() + t.connectionStart() + t.connectionTune() + + t.recv(0, &connectionOpen{}) + t.send(0, &connectionOpenOk{}) +} + +func (t *server) connectionClose() { + t.recv(0, &connectionClose{}) + t.send(0, &connectionCloseOk{}) +} + +func (t *server) channelOpen(id int) { + t.recv(id, &channelOpen{}) + t.send(id, &channelOpenOk{}) +} + +func TestDefaultClientProperties(t *testing.T) { + rwc, srv := newSession(t) + + go func() { + srv.connectionOpen() + rwc.Close() + }() + + if c, err := Open(rwc, defaultConfig()); err != nil { + t.Fatalf("could not create connection: %v (%s)", c, err) + } + + if want, got := defaultProduct, srv.start.ClientProperties["product"]; want != got { + t.Errorf("expected product %s got: %s", want, got) + } + + if want, got := defaultVersion, srv.start.ClientProperties["version"]; want != got { + t.Errorf("expected version %s got: %s", want, got) + } + + if want, got := defaultLocale, srv.start.Locale; want != got { + t.Errorf("expected locale %s got: %s", want, got) + } +} + +func TestCustomClientProperties(t *testing.T) { + rwc, srv := newSession(t) + + config := defaultConfig() + config.Properties = Table{ + "product": "foo", + "version": "1.0", + } + + go func() { + srv.connectionOpen() + rwc.Close() + }() + + if c, err := Open(rwc, config); err != nil { + t.Fatalf("could not create connection: %v (%s)", c, err) + } + + if want, got := config.Properties["product"], srv.start.ClientProperties["product"]; want != got { + t.Errorf("expected product %s got: %s", want, got) + } + + if want, got := config.Properties["version"], srv.start.ClientProperties["version"]; want != got { + t.Errorf("expected version %s got: %s", want, got) + } +} + +func TestOpen(t *testing.T) { + rwc, srv := newSession(t) + go func() { + srv.connectionOpen() + rwc.Close() + }() + + if c, err := Open(rwc, defaultConfig()); err != nil { + t.Fatalf("could not create connection: %v (%s)", c, err) + } +} + +func TestChannelOpen(t *testing.T) { + rwc, srv := newSession(t) + + go func() { + srv.connectionOpen() + srv.channelOpen(1) + + rwc.Close() + }() + + c, err := Open(rwc, defaultConfig()) + if err != nil { + t.Fatalf("could not create connection: %v (%s)", c, err) + } + + ch, err := c.Channel() + if err != nil { + t.Fatalf("could not open channel: %v (%s)", ch, err) + } +} + +func TestOpenFailedSASLUnsupportedMechanisms(t *testing.T) { + rwc, srv := newSession(t) + + go func() { + srv.expectAMQP() + srv.send(0, &connectionStart{ + VersionMajor: 0, + VersionMinor: 9, + Mechanisms: "KERBEROS NTLM", + Locales: "en_US", + }) + }() + + c, err := Open(rwc, defaultConfig()) + if err != ErrSASL { + t.Fatalf("expected ErrSASL got: %+v on %+v", err, c) + } +} + +func TestOpenFailedCredentials(t *testing.T) { + rwc, srv := newSession(t) + + go func() { + srv.expectAMQP() + srv.connectionStart() + // Now kill/timeout the connection indicating bad auth + rwc.Close() + }() + + c, err := Open(rwc, defaultConfig()) + if err != ErrCredentials { + t.Fatalf("expected ErrCredentials got: %+v on %+v", err, c) + } +} + +func TestOpenFailedVhost(t *testing.T) { + rwc, srv := newSession(t) + + go func() { + srv.expectAMQP() + srv.connectionStart() + srv.connectionTune() + srv.recv(0, &connectionOpen{}) + + // Now kill/timeout the connection on bad Vhost + rwc.Close() + }() + + c, err := Open(rwc, defaultConfig()) + if err != ErrVhost { + t.Fatalf("expected ErrVhost got: %+v on %+v", err, c) + } +} + +func TestConfirmMultipleOrdersDeliveryTags(t *testing.T) { + rwc, srv := newSession(t) + defer rwc.Close() + + go func() { + srv.connectionOpen() + srv.channelOpen(1) + + srv.recv(1, &confirmSelect{}) + srv.send(1, &confirmSelectOk{}) + + srv.recv(1, &basicPublish{}) + srv.recv(1, &basicPublish{}) + srv.recv(1, &basicPublish{}) + srv.recv(1, &basicPublish{}) + + // Single tag, plus multiple, should produce + // 2, 1, 3, 4 + srv.send(1, &basicAck{DeliveryTag: 2}) + srv.send(1, &basicAck{DeliveryTag: 1}) + srv.send(1, &basicAck{DeliveryTag: 4, Multiple: true}) + + srv.recv(1, &basicPublish{}) + srv.recv(1, &basicPublish{}) + srv.recv(1, &basicPublish{}) + srv.recv(1, &basicPublish{}) + + // And some more, but in reverse order, multiple then one + // 5, 6, 7, 8 + srv.send(1, &basicAck{DeliveryTag: 6, Multiple: true}) + srv.send(1, &basicAck{DeliveryTag: 8}) + srv.send(1, &basicAck{DeliveryTag: 7}) + }() + + c, err := Open(rwc, defaultConfig()) + if err != nil { + t.Fatalf("could not create connection: %v (%s)", c, err) + } + + ch, err := c.Channel() + if err != nil { + t.Fatalf("could not open channel: %v (%s)", ch, err) + } + + confirm := ch.NotifyPublish(make(chan Confirmation)) + + ch.Confirm(false) + + go func() { + ch.Publish("", "q", false, false, Publishing{Body: []byte("pub 1")}) + ch.Publish("", "q", false, false, Publishing{Body: []byte("pub 2")}) + ch.Publish("", "q", false, false, Publishing{Body: []byte("pub 3")}) + ch.Publish("", "q", false, false, Publishing{Body: []byte("pub 4")}) + }() + + // received out of order, consumed in order + for i, tag := range []uint64{1, 2, 3, 4} { + if ack := <-confirm; tag != ack.DeliveryTag { + t.Fatalf("failed ack, expected ack#%d to be %d, got %d", i, tag, ack.DeliveryTag) + } + } + + go func() { + ch.Publish("", "q", false, false, Publishing{Body: []byte("pub 5")}) + ch.Publish("", "q", false, false, Publishing{Body: []byte("pub 6")}) + ch.Publish("", "q", false, false, Publishing{Body: []byte("pub 7")}) + ch.Publish("", "q", false, false, Publishing{Body: []byte("pub 8")}) + }() + + for i, tag := range []uint64{5, 6, 7, 8} { + if ack := <-confirm; tag != ack.DeliveryTag { + t.Fatalf("failed ack, expected ack#%d to be %d, got %d", i, tag, ack.DeliveryTag) + } + } + +} + +func TestNotifyClosesReusedPublisherConfirmChan(t *testing.T) { + rwc, srv := newSession(t) + + go func() { + srv.connectionOpen() + srv.channelOpen(1) + + srv.recv(1, &confirmSelect{}) + srv.send(1, &confirmSelectOk{}) + + srv.recv(0, &connectionClose{}) + srv.send(0, &connectionCloseOk{}) + }() + + c, err := Open(rwc, defaultConfig()) + if err != nil { + t.Fatalf("could not create connection: %v (%s)", c, err) + } + + ch, err := c.Channel() + if err != nil { + t.Fatalf("could not open channel: %v (%s)", ch, err) + } + + ackAndNack := make(chan uint64) + ch.NotifyConfirm(ackAndNack, ackAndNack) + + if err := ch.Confirm(false); err != nil { + t.Fatalf("expected to enter confirm mode: %v", err) + } + + if err := c.Close(); err != nil { + t.Fatalf("could not close connection: %v (%s)", c, err) + } +} + +func TestNotifyClosesAllChansAfterConnectionClose(t *testing.T) { + rwc, srv := newSession(t) + + go func() { + srv.connectionOpen() + srv.channelOpen(1) + + srv.recv(0, &connectionClose{}) + srv.send(0, &connectionCloseOk{}) + }() + + c, err := Open(rwc, defaultConfig()) + if err != nil { + t.Fatalf("could not create connection: %v (%s)", c, err) + } + + ch, err := c.Channel() + if err != nil { + t.Fatalf("could not open channel: %v (%s)", ch, err) + } + + if err := c.Close(); err != nil { + t.Fatalf("could not close connection: %v (%s)", c, err) + } + + select { + case <-c.NotifyClose(make(chan *Error)): + case <-time.After(time.Millisecond): + t.Errorf("expected to close NotifyClose chan after Connection.Close") + } + + select { + case <-ch.NotifyClose(make(chan *Error)): + case <-time.After(time.Millisecond): + t.Errorf("expected to close Connection.NotifyClose chan after Connection.Close") + } + + select { + case <-ch.NotifyFlow(make(chan bool)): + case <-time.After(time.Millisecond): + t.Errorf("expected to close Channel.NotifyFlow chan after Connection.Close") + } + + select { + case <-ch.NotifyCancel(make(chan string)): + case <-time.After(time.Millisecond): + t.Errorf("expected to close Channel.NofityCancel chan after Connection.Close") + } + + select { + case <-ch.NotifyReturn(make(chan Return)): + case <-time.After(time.Millisecond): + t.Errorf("expected to close Channel.NotifyReturn chan after Connection.Close") + } + + confirms := ch.NotifyPublish(make(chan Confirmation)) + + select { + case <-confirms: + case <-time.After(time.Millisecond): + t.Errorf("expected to close confirms on Channel.NotifyPublish chan after Connection.Close") + } +} + +// Should not panic when sending bodies split at different boundaries +func TestPublishBodySliceIssue74(t *testing.T) { + rwc, srv := newSession(t) + defer rwc.Close() + + const frameSize = 100 + const publishings = frameSize * 3 + + done := make(chan bool) + base := make([]byte, publishings) + + go func() { + srv.connectionOpen() + srv.channelOpen(1) + + for i := 0; i < publishings; i++ { + srv.recv(1, &basicPublish{}) + } + + done <- true + }() + + cfg := defaultConfig() + cfg.FrameSize = frameSize + + c, err := Open(rwc, cfg) + if err != nil { + t.Fatalf("could not create connection: %v (%s)", c, err) + } + + ch, err := c.Channel() + if err != nil { + t.Fatalf("could not open channel: %v (%s)", ch, err) + } + + for i := 0; i < publishings; i++ { + go ch.Publish("", "q", false, false, Publishing{Body: base[0:i]}) + } + + <-done +} + +// Should not panic when server and client have frame_size of 0 +func TestPublishZeroFrameSizeIssue161(t *testing.T) { + rwc, srv := newSession(t) + defer rwc.Close() + + const frameSize = 0 + const publishings = 1 + done := make(chan bool) + + go func() { + srv.connectionOpen() + srv.channelOpen(1) + + for i := 0; i < publishings; i++ { + srv.recv(1, &basicPublish{}) + } + + done <- true + }() + + cfg := defaultConfig() + cfg.FrameSize = frameSize + + c, err := Open(rwc, cfg) + + // override the tuned framesize with a hard 0, as would happen when rabbit is configured with 0 + c.Config.FrameSize = frameSize + + if err != nil { + t.Fatalf("could not create connection: %v (%s)", c, err) + } + + ch, err := c.Channel() + if err != nil { + t.Fatalf("could not open channel: %v (%s)", ch, err) + } + + for i := 0; i < publishings; i++ { + go ch.Publish("", "q", false, false, Publishing{Body: []byte("anything")}) + } + + <-done +} + +func TestPublishAndShutdownDeadlockIssue84(t *testing.T) { + rwc, srv := newSession(t) + defer rwc.Close() + + go func() { + srv.connectionOpen() + srv.channelOpen(1) + srv.recv(1, &basicPublish{}) + // Mimic a broken io pipe so that Publish catches the error and goes into shutdown + srv.S.Close() + }() + + c, err := Open(rwc, defaultConfig()) + if err != nil { + t.Fatalf("couldn't create connection: %v (%s)", c, err) + } + + ch, err := c.Channel() + if err != nil { + t.Fatalf("couldn't open channel: %v (%s)", ch, err) + } + + defer time.AfterFunc(500*time.Millisecond, func() { panic("Publish deadlock") }).Stop() + for { + if err := ch.Publish("exchange", "q", false, false, Publishing{Body: []byte("test")}); err != nil { + t.Log("successfully caught disconnect error", err) + return + } + } +} + +// TestChannelReturnsCloseRace ensures that receiving a basicReturn frame and +// sending the notification to the bound channel does not race with +// channel.shutdown() which closes all registered notification channels - checks +// for a "send on closed channel" panic +func TestChannelReturnsCloseRace(t *testing.T) { + defer time.AfterFunc(5*time.Second, func() { panic("Shutdown deadlock") }).Stop() + ch := newChannel(&Connection{}, 1) + + // Register a channel to close in channel.shutdown() + notify := make(chan Return, 1) + ch.NotifyReturn(notify) + + go func() { + for range notify { + // Drain notifications + } + }() + + // Simulate receiving a load of returns (triggering a write to the above + // channel) while we call shutdown concurrently + go func() { + for i := 0; i < 100; i++ { + ch.dispatch(&basicReturn{}) + } + }() + + ch.shutdown(nil) +} + +// TestLeakClosedConsumersIssue264 ensures that closing a consumer with +// prefetched messages does not leak the buffering goroutine. +func TestLeakClosedConsumersIssue264(t *testing.T) { + const tag = "consumer-tag" + + rwc, srv := newSession(t) + defer rwc.Close() + + go func() { + srv.connectionOpen() + srv.channelOpen(1) + + srv.recv(1, &basicQos{}) + srv.send(1, &basicQosOk{}) + + srv.recv(1, &basicConsume{}) + srv.send(1, &basicConsumeOk{ConsumerTag: tag}) + + // This delivery is intended to be consumed + srv.send(1, &basicDeliver{ConsumerTag: tag, DeliveryTag: 1}) + + // This delivery is intended to be dropped + srv.send(1, &basicDeliver{ConsumerTag: tag, DeliveryTag: 2}) + + srv.recv(0, &connectionClose{}) + srv.send(0, &connectionCloseOk{}) + srv.C.Close() + }() + + c, err := Open(rwc, defaultConfig()) + if err != nil { + t.Fatalf("could not create connection: %v (%s)", c, err) + } + + ch, err := c.Channel() + if err != nil { + t.Fatalf("could not open channel: %v (%s)", ch, err) + } + ch.Qos(2, 0, false) + + consumer, err := ch.Consume("queue", tag, false, false, false, false, nil) + if err != nil { + t.Fatalf("unexpected error during consumer: %v", err) + } + + first := <-consumer + if want, got := uint64(1), first.DeliveryTag; want != got { + t.Fatalf("unexpected delivery tag: want: %d, got: %d", want, got) + } + + if err := c.Close(); err != nil { + t.Fatalf("unexpected error during connection close: %v", err) + } + + if _, open := <-consumer; open { + t.Fatalf("expected deliveries channel to be closed immediately when the connection is closed so not to leak the bufferDeliveries goroutine") + } +} diff --git a/vendor/github.com/streadway/amqp/confirms.go b/vendor/github.com/streadway/amqp/confirms.go new file mode 100644 index 0000000..06cbaa7 --- /dev/null +++ b/vendor/github.com/streadway/amqp/confirms.go @@ -0,0 +1,94 @@ +package amqp + +import "sync" + +// confirms resequences and notifies one or multiple publisher confirmation listeners +type confirms struct { + m sync.Mutex + listeners []chan Confirmation + sequencer map[uint64]Confirmation + published uint64 + expecting uint64 +} + +// newConfirms allocates a confirms +func newConfirms() *confirms { + return &confirms{ + sequencer: map[uint64]Confirmation{}, + published: 0, + expecting: 1, + } +} + +func (c *confirms) Listen(l chan Confirmation) { + c.m.Lock() + defer c.m.Unlock() + + c.listeners = append(c.listeners, l) +} + +// publish increments the publishing counter +func (c *confirms) Publish() uint64 { + c.m.Lock() + defer c.m.Unlock() + + c.published++ + return c.published +} + +// confirm confirms one publishing, increments the expecting delivery tag, and +// removes bookkeeping for that delivery tag. +func (c *confirms) confirm(confirmation Confirmation) { + delete(c.sequencer, c.expecting) + c.expecting++ + for _, l := range c.listeners { + l <- confirmation + } +} + +// resequence confirms any out of order delivered confirmations +func (c *confirms) resequence() { + for c.expecting <= c.published { + sequenced, found := c.sequencer[c.expecting] + if !found { + return + } + c.confirm(sequenced) + } +} + +// one confirms one publishing and all following in the publishing sequence +func (c *confirms) One(confirmed Confirmation) { + c.m.Lock() + defer c.m.Unlock() + + if c.expecting == confirmed.DeliveryTag { + c.confirm(confirmed) + } else { + c.sequencer[confirmed.DeliveryTag] = confirmed + } + c.resequence() +} + +// multiple confirms all publishings up until the delivery tag +func (c *confirms) Multiple(confirmed Confirmation) { + c.m.Lock() + defer c.m.Unlock() + + for c.expecting <= confirmed.DeliveryTag { + c.confirm(Confirmation{c.expecting, confirmed.Ack}) + } + c.resequence() +} + +// Close closes all listeners, discarding any out of sequence confirmations +func (c *confirms) Close() error { + c.m.Lock() + defer c.m.Unlock() + + for _, l := range c.listeners { + close(l) + } + c.listeners = nil + return nil +} diff --git a/vendor/github.com/streadway/amqp/confirms_test.go b/vendor/github.com/streadway/amqp/confirms_test.go new file mode 100644 index 0000000..d54de9e --- /dev/null +++ b/vendor/github.com/streadway/amqp/confirms_test.go @@ -0,0 +1,153 @@ +package amqp + +import ( + "testing" + "time" +) + +func TestConfirmOneResequences(t *testing.T) { + var ( + fixtures = []Confirmation{ + {1, true}, + {2, false}, + {3, true}, + } + c = newConfirms() + l = make(chan Confirmation, len(fixtures)) + ) + + c.Listen(l) + + for i := range fixtures { + if want, got := uint64(i+1), c.Publish(); want != got { + t.Fatalf("expected publish to return the 1 based delivery tag published, want: %d, got: %d", want, got) + } + } + + c.One(fixtures[1]) + c.One(fixtures[2]) + + select { + case confirm := <-l: + t.Fatalf("expected to wait in order to properly resequence results, got: %+v", confirm) + default: + } + + c.One(fixtures[0]) + + for i, fix := range fixtures { + if want, got := fix, <-l; want != got { + t.Fatalf("expected to return confirmations in sequence for %d, want: %+v, got: %+v", i, want, got) + } + } +} + +func TestConfirmMixedResequences(t *testing.T) { + var ( + fixtures = []Confirmation{ + {1, true}, + {2, true}, + {3, true}, + } + c = newConfirms() + l = make(chan Confirmation, len(fixtures)) + ) + c.Listen(l) + + for range fixtures { + c.Publish() + } + + c.One(fixtures[0]) + c.One(fixtures[2]) + c.Multiple(fixtures[1]) + + for i, fix := range fixtures { + want := fix + var got Confirmation + select { + case got = <-l: + case <-time.After(1 * time.Second): + t.Fatalf("timeout on reading confirmations") + } + if want != got { + t.Fatalf("expected to confirm in sequence for %d, want: %+v, got: %+v", i, want, got) + } + } +} + +func TestConfirmMultipleResequences(t *testing.T) { + var ( + fixtures = []Confirmation{ + {1, true}, + {2, true}, + {3, true}, + {4, true}, + } + c = newConfirms() + l = make(chan Confirmation, len(fixtures)) + ) + c.Listen(l) + + for range fixtures { + c.Publish() + } + + c.Multiple(fixtures[len(fixtures)-1]) + + for i, fix := range fixtures { + if want, got := fix, <-l; want != got { + t.Fatalf("expected to confirm multiple in sequence for %d, want: %+v, got: %+v", i, want, got) + } + } +} + +func BenchmarkSequentialBufferedConfirms(t *testing.B) { + var ( + c = newConfirms() + l = make(chan Confirmation, 10) + ) + + c.Listen(l) + + for i := 0; i < t.N; i++ { + if i > cap(l)-1 { + <-l + } + c.One(Confirmation{c.Publish(), true}) + } +} + +func TestConfirmsIsThreadSafe(t *testing.T) { + const count = 1000 + const timeout = 5 * time.Second + var ( + c = newConfirms() + l = make(chan Confirmation) + pub = make(chan Confirmation) + done = make(chan Confirmation) + late = time.After(timeout) + ) + + c.Listen(l) + + for i := 0; i < count; i++ { + go func() { pub <- Confirmation{c.Publish(), true} }() + } + + for i := 0; i < count; i++ { + go func() { c.One(<-pub) }() + } + + for i := 0; i < count; i++ { + go func() { done <- <-l }() + } + + for i := 0; i < count; i++ { + select { + case <-done: + case <-late: + t.Fatalf("expected all publish/confirms to finish after %s", timeout) + } + } +} diff --git a/vendor/github.com/streadway/amqp/connection.go b/vendor/github.com/streadway/amqp/connection.go new file mode 100644 index 0000000..b9d8e8e --- /dev/null +++ b/vendor/github.com/streadway/amqp/connection.go @@ -0,0 +1,847 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +package amqp + +import ( + "bufio" + "crypto/tls" + "io" + "net" + "reflect" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" +) + +const ( + maxChannelMax = (2 << 15) - 1 + + defaultHeartbeat = 10 * time.Second + defaultConnectionTimeout = 30 * time.Second + defaultProduct = "https://github.com/streadway/amqp" + defaultVersion = "β" + // Safer default that makes channel leaks a lot easier to spot + // before they create operational headaches. See https://github.com/rabbitmq/rabbitmq-server/issues/1593. + defaultChannelMax = (2 << 10) - 1 + defaultLocale = "en_US" +) + +// Config is used in DialConfig and Open to specify the desired tuning +// parameters used during a connection open handshake. The negotiated tuning +// will be stored in the returned connection's Config field. +type Config struct { + // The SASL mechanisms to try in the client request, and the successful + // mechanism used on the Connection object. + // If SASL is nil, PlainAuth from the URL is used. + SASL []Authentication + + // Vhost specifies the namespace of permissions, exchanges, queues and + // bindings on the server. Dial sets this to the path parsed from the URL. + Vhost string + + ChannelMax int // 0 max channels means 2^16 - 1 + FrameSize int // 0 max bytes means unlimited + Heartbeat time.Duration // less than 1s uses the server's interval + + // TLSClientConfig specifies the client configuration of the TLS connection + // when establishing a tls transport. + // If the URL uses an amqps scheme, then an empty tls.Config with the + // ServerName from the URL is used. + TLSClientConfig *tls.Config + + // Properties is table of properties that the client advertises to the server. + // This is an optional setting - if the application does not set this, + // the underlying library will use a generic set of client properties. + Properties Table + + // Connection locale that we expect to always be en_US + // Even though servers must return it as per the AMQP 0-9-1 spec, + // we are not aware of it being used other than to satisfy the spec requirements + Locale string + + // Dial returns a net.Conn prepared for a TLS handshake with TSLClientConfig, + // then an AMQP connection handshake. + // If Dial is nil, net.DialTimeout with a 30s connection and 30s deadline is + // used during TLS and AMQP handshaking. + Dial func(network, addr string) (net.Conn, error) +} + +// Connection manages the serialization and deserialization of frames from IO +// and dispatches the frames to the appropriate channel. All RPC methods and +// asynchronous Publishing, Delivery, Ack, Nack and Return messages are +// multiplexed on this channel. There must always be active receivers for +// every asynchronous message on this connection. +type Connection struct { + destructor sync.Once // shutdown once + sendM sync.Mutex // conn writer mutex + m sync.Mutex // struct field mutex + + conn io.ReadWriteCloser + + rpc chan message + writer *writer + sends chan time.Time // timestamps of each frame sent + deadlines chan readDeadliner // heartbeater updates read deadlines + + allocator *allocator // id generator valid after openTune + channels map[uint16]*Channel + + noNotify bool // true when we will never notify again + closes []chan *Error + blocks []chan Blocking + + errors chan *Error + + Config Config // The negotiated Config after connection.open + + Major int // Server's major version + Minor int // Server's minor version + Properties Table // Server properties + Locales []string // Server locales + + closed int32 // Will be 1 if the connection is closed, 0 otherwise. Should only be accessed as atomic +} + +type readDeadliner interface { + SetReadDeadline(time.Time) error +} + +// DefaultDial establishes a connection when config.Dial is not provided +func DefaultDial(connectionTimeout time.Duration) func(network, addr string) (net.Conn, error) { + return func(network, addr string) (net.Conn, error) { + conn, err := net.DialTimeout(network, addr, connectionTimeout) + if err != nil { + return nil, err + } + + // Heartbeating hasn't started yet, don't stall forever on a dead server. + // A deadline is set for TLS and AMQP handshaking. After AMQP is established, + // the deadline is cleared in openComplete. + if err := conn.SetDeadline(time.Now().Add(connectionTimeout)); err != nil { + return nil, err + } + + return conn, nil + } +} + +// Dial accepts a string in the AMQP URI format and returns a new Connection +// over TCP using PlainAuth. Defaults to a server heartbeat interval of 10 +// seconds and sets the handshake deadline to 30 seconds. After handshake, +// deadlines are cleared. +// +// Dial uses the zero value of tls.Config when it encounters an amqps:// +// scheme. It is equivalent to calling DialTLS(amqp, nil). +func Dial(url string) (*Connection, error) { + return DialConfig(url, Config{ + Heartbeat: defaultHeartbeat, + Locale: defaultLocale, + }) +} + +// DialTLS accepts a string in the AMQP URI format and returns a new Connection +// over TCP using PlainAuth. Defaults to a server heartbeat interval of 10 +// seconds and sets the initial read deadline to 30 seconds. +// +// DialTLS uses the provided tls.Config when encountering an amqps:// scheme. +func DialTLS(url string, amqps *tls.Config) (*Connection, error) { + return DialConfig(url, Config{ + Heartbeat: defaultHeartbeat, + TLSClientConfig: amqps, + Locale: defaultLocale, + }) +} + +// DialConfig accepts a string in the AMQP URI format and a configuration for +// the transport and connection setup, returning a new Connection. Defaults to +// a server heartbeat interval of 10 seconds and sets the initial read deadline +// to 30 seconds. +func DialConfig(url string, config Config) (*Connection, error) { + var err error + var conn net.Conn + + uri, err := ParseURI(url) + if err != nil { + return nil, err + } + + if config.SASL == nil { + config.SASL = []Authentication{uri.PlainAuth()} + } + + if config.Vhost == "" { + config.Vhost = uri.Vhost + } + + addr := net.JoinHostPort(uri.Host, strconv.FormatInt(int64(uri.Port), 10)) + + dialer := config.Dial + if dialer == nil { + dialer = DefaultDial(defaultConnectionTimeout) + } + + conn, err = dialer("tcp", addr) + if err != nil { + return nil, err + } + + if uri.Scheme == "amqps" { + if config.TLSClientConfig == nil { + config.TLSClientConfig = new(tls.Config) + } + + // If ServerName has not been specified in TLSClientConfig, + // set it to the URI host used for this connection. + if config.TLSClientConfig.ServerName == "" { + config.TLSClientConfig.ServerName = uri.Host + } + + client := tls.Client(conn, config.TLSClientConfig) + if err := client.Handshake(); err != nil { + + conn.Close() + return nil, err + } + + conn = client + } + + return Open(conn, config) +} + +/* +Open accepts an already established connection, or other io.ReadWriteCloser as +a transport. Use this method if you have established a TLS connection or wish +to use your own custom transport. + +*/ +func Open(conn io.ReadWriteCloser, config Config) (*Connection, error) { + c := &Connection{ + conn: conn, + writer: &writer{bufio.NewWriter(conn)}, + channels: make(map[uint16]*Channel), + rpc: make(chan message), + sends: make(chan time.Time), + errors: make(chan *Error, 1), + deadlines: make(chan readDeadliner, 1), + } + go c.reader(conn) + return c, c.open(config) +} + +/* +LocalAddr returns the local TCP peer address, or ":0" (the zero value of net.TCPAddr) +as a fallback default value if the underlying transport does not support LocalAddr(). +*/ +func (c *Connection) LocalAddr() net.Addr { + if conn, ok := c.conn.(interface { + LocalAddr() net.Addr + }); ok { + return conn.LocalAddr() + } + return &net.TCPAddr{} +} + +// ConnectionState returns basic TLS details of the underlying transport. +// Returns a zero value when the underlying connection does not implement +// ConnectionState() tls.ConnectionState. +func (c *Connection) ConnectionState() tls.ConnectionState { + if conn, ok := c.conn.(interface { + ConnectionState() tls.ConnectionState + }); ok { + return conn.ConnectionState() + } + return tls.ConnectionState{} +} + +/* +NotifyClose registers a listener for close events either initiated by an error +accompanying a connection.close method or by a normal shutdown. + +On normal shutdowns, the chan will be closed. + +To reconnect after a transport or protocol error, register a listener here and +re-run your setup process. + +*/ +func (c *Connection) NotifyClose(receiver chan *Error) chan *Error { + c.m.Lock() + defer c.m.Unlock() + + if c.noNotify { + close(receiver) + } else { + c.closes = append(c.closes, receiver) + } + + return receiver +} + +/* +NotifyBlocked registers a listener for RabbitMQ specific TCP flow control +method extensions connection.blocked and connection.unblocked. Flow control is +active with a reason when Blocking.Blocked is true. When a Connection is +blocked, all methods will block across all connections until server resources +become free again. + +This optional extension is supported by the server when the +"connection.blocked" server capability key is true. + +*/ +func (c *Connection) NotifyBlocked(receiver chan Blocking) chan Blocking { + c.m.Lock() + defer c.m.Unlock() + + if c.noNotify { + close(receiver) + } else { + c.blocks = append(c.blocks, receiver) + } + + return receiver +} + +/* +Close requests and waits for the response to close the AMQP connection. + +It's advisable to use this message when publishing to ensure all kernel buffers +have been flushed on the server and client before exiting. + +An error indicates that server may not have received this request to close but +the connection should be treated as closed regardless. + +After returning from this call, all resources associated with this connection, +including the underlying io, Channels, Notify listeners and Channel consumers +will also be closed. +*/ +func (c *Connection) Close() error { + if c.IsClosed() { + return ErrClosed + } + + defer c.shutdown(nil) + return c.call( + &connectionClose{ + ReplyCode: replySuccess, + ReplyText: "kthxbai", + }, + &connectionCloseOk{}, + ) +} + +func (c *Connection) closeWith(err *Error) error { + if c.IsClosed() { + return ErrClosed + } + + defer c.shutdown(err) + return c.call( + &connectionClose{ + ReplyCode: uint16(err.Code), + ReplyText: err.Reason, + }, + &connectionCloseOk{}, + ) +} + +// IsClosed returns true if the connection is marked as closed, otherwise false +// is returned. +func (c *Connection) IsClosed() bool { + return (atomic.LoadInt32(&c.closed) == 1) +} + +func (c *Connection) send(f frame) error { + if c.IsClosed() { + return ErrClosed + } + + c.sendM.Lock() + err := c.writer.WriteFrame(f) + c.sendM.Unlock() + + if err != nil { + // shutdown could be re-entrant from signaling notify chans + go c.shutdown(&Error{ + Code: FrameError, + Reason: err.Error(), + }) + } else { + // Broadcast we sent a frame, reducing heartbeats, only + // if there is something that can receive - like a non-reentrant + // call or if the heartbeater isn't running + select { + case c.sends <- time.Now(): + default: + } + } + + return err +} + +func (c *Connection) shutdown(err *Error) { + atomic.StoreInt32(&c.closed, 1) + + c.destructor.Do(func() { + c.m.Lock() + defer c.m.Unlock() + + if err != nil { + for _, c := range c.closes { + c <- err + } + } + + if err != nil { + c.errors <- err + } + // Shutdown handler goroutine can still receive the result. + close(c.errors) + + for _, c := range c.closes { + close(c) + } + + for _, c := range c.blocks { + close(c) + } + + // Shutdown the channel, but do not use closeChannel() as it calls + // releaseChannel() which requires the connection lock. + // + // Ranging over c.channels and calling releaseChannel() that mutates + // c.channels is racy - see commit 6063341 for an example. + for _, ch := range c.channels { + ch.shutdown(err) + } + + c.conn.Close() + + c.channels = map[uint16]*Channel{} + c.allocator = newAllocator(1, c.Config.ChannelMax) + c.noNotify = true + }) +} + +// All methods sent to the connection channel should be synchronous so we +// can handle them directly without a framing component +func (c *Connection) demux(f frame) { + if f.channel() == 0 { + c.dispatch0(f) + } else { + c.dispatchN(f) + } +} + +func (c *Connection) dispatch0(f frame) { + switch mf := f.(type) { + case *methodFrame: + switch m := mf.Method.(type) { + case *connectionClose: + // Send immediately as shutdown will close our side of the writer. + c.send(&methodFrame{ + ChannelId: 0, + Method: &connectionCloseOk{}, + }) + + c.shutdown(newError(m.ReplyCode, m.ReplyText)) + case *connectionBlocked: + for _, c := range c.blocks { + c <- Blocking{Active: true, Reason: m.Reason} + } + case *connectionUnblocked: + for _, c := range c.blocks { + c <- Blocking{Active: false} + } + default: + c.rpc <- m + } + case *heartbeatFrame: + // kthx - all reads reset our deadline. so we can drop this + default: + // lolwat - channel0 only responds to methods and heartbeats + c.closeWith(ErrUnexpectedFrame) + } +} + +func (c *Connection) dispatchN(f frame) { + c.m.Lock() + channel := c.channels[f.channel()] + c.m.Unlock() + + if channel != nil { + channel.recv(channel, f) + } else { + c.dispatchClosed(f) + } +} + +// section 2.3.7: "When a peer decides to close a channel or connection, it +// sends a Close method. The receiving peer MUST respond to a Close with a +// Close-Ok, and then both parties can close their channel or connection. Note +// that if peers ignore Close, deadlock can happen when both peers send Close +// at the same time." +// +// When we don't have a channel, so we must respond with close-ok on a close +// method. This can happen between a channel exception on an asynchronous +// method like basic.publish and a synchronous close with channel.close. +// In that case, we'll get both a channel.close and channel.close-ok in any +// order. +func (c *Connection) dispatchClosed(f frame) { + // Only consider method frames, drop content/header frames + if mf, ok := f.(*methodFrame); ok { + switch mf.Method.(type) { + case *channelClose: + c.send(&methodFrame{ + ChannelId: f.channel(), + Method: &channelCloseOk{}, + }) + case *channelCloseOk: + // we are already closed, so do nothing + default: + // unexpected method on closed channel + c.closeWith(ErrClosed) + } + } +} + +// Reads each frame off the IO and hand off to the connection object that +// will demux the streams and dispatch to one of the opened channels or +// handle on channel 0 (the connection channel). +func (c *Connection) reader(r io.Reader) { + buf := bufio.NewReader(r) + frames := &reader{buf} + conn, haveDeadliner := r.(readDeadliner) + + for { + frame, err := frames.ReadFrame() + + if err != nil { + c.shutdown(&Error{Code: FrameError, Reason: err.Error()}) + return + } + + c.demux(frame) + + if haveDeadliner { + c.deadlines <- conn + } + } +} + +// Ensures that at least one frame is being sent at the tuned interval with a +// jitter tolerance of 1s +func (c *Connection) heartbeater(interval time.Duration, done chan *Error) { + const maxServerHeartbeatsInFlight = 3 + + var sendTicks <-chan time.Time + if interval > 0 { + ticker := time.NewTicker(interval) + defer ticker.Stop() + sendTicks = ticker.C + } + + lastSent := time.Now() + + for { + select { + case at, stillSending := <-c.sends: + // When actively sending, depend on sent frames to reset server timer + if stillSending { + lastSent = at + } else { + return + } + + case at := <-sendTicks: + // When idle, fill the space with a heartbeat frame + if at.Sub(lastSent) > interval-time.Second { + if err := c.send(&heartbeatFrame{}); err != nil { + // send heartbeats even after close/closeOk so we + // tick until the connection starts erroring + return + } + } + + case conn := <-c.deadlines: + // When reading, reset our side of the deadline, if we've negotiated one with + // a deadline that covers at least 2 server heartbeats + if interval > 0 { + conn.SetReadDeadline(time.Now().Add(maxServerHeartbeatsInFlight * interval)) + } + + case <-done: + return + } + } +} + +// Convenience method to inspect the Connection.Properties["capabilities"] +// Table for server identified capabilities like "basic.ack" or +// "confirm.select". +func (c *Connection) isCapable(featureName string) bool { + capabilities, _ := c.Properties["capabilities"].(Table) + hasFeature, _ := capabilities[featureName].(bool) + return hasFeature +} + +// allocateChannel records but does not open a new channel with a unique id. +// This method is the initial part of the channel lifecycle and paired with +// releaseChannel +func (c *Connection) allocateChannel() (*Channel, error) { + c.m.Lock() + defer c.m.Unlock() + + if c.IsClosed() { + return nil, ErrClosed + } + + id, ok := c.allocator.next() + if !ok { + return nil, ErrChannelMax + } + + ch := newChannel(c, uint16(id)) + c.channels[uint16(id)] = ch + + return ch, nil +} + +// releaseChannel removes a channel from the registry as the final part of the +// channel lifecycle +func (c *Connection) releaseChannel(id uint16) { + c.m.Lock() + defer c.m.Unlock() + + delete(c.channels, id) + c.allocator.release(int(id)) +} + +// openChannel allocates and opens a channel, must be paired with closeChannel +func (c *Connection) openChannel() (*Channel, error) { + ch, err := c.allocateChannel() + if err != nil { + return nil, err + } + + if err := ch.open(); err != nil { + c.releaseChannel(ch.id) + return nil, err + } + return ch, nil +} + +// closeChannel releases and initiates a shutdown of the channel. All channel +// closures should be initiated here for proper channel lifecycle management on +// this connection. +func (c *Connection) closeChannel(ch *Channel, e *Error) { + ch.shutdown(e) + c.releaseChannel(ch.id) +} + +/* +Channel opens a unique, concurrent server channel to process the bulk of AMQP +messages. Any error from methods on this receiver will render the receiver +invalid and a new Channel should be opened. + +*/ +func (c *Connection) Channel() (*Channel, error) { + return c.openChannel() +} + +func (c *Connection) call(req message, res ...message) error { + // Special case for when the protocol header frame is sent insted of a + // request method + if req != nil { + if err := c.send(&methodFrame{ChannelId: 0, Method: req}); err != nil { + return err + } + } + + select { + case err, ok := <-c.errors: + if !ok { + return ErrClosed + } + return err + + case msg := <-c.rpc: + // Try to match one of the result types + for _, try := range res { + if reflect.TypeOf(msg) == reflect.TypeOf(try) { + // *res = *msg + vres := reflect.ValueOf(try).Elem() + vmsg := reflect.ValueOf(msg).Elem() + vres.Set(vmsg) + return nil + } + } + return ErrCommandInvalid + } + // unreachable +} + +// Connection = open-Connection *use-Connection close-Connection +// open-Connection = C:protocol-header +// S:START C:START-OK +// *challenge +// S:TUNE C:TUNE-OK +// C:OPEN S:OPEN-OK +// challenge = S:SECURE C:SECURE-OK +// use-Connection = *channel +// close-Connection = C:CLOSE S:CLOSE-OK +// / S:CLOSE C:CLOSE-OK +func (c *Connection) open(config Config) error { + if err := c.send(&protocolHeader{}); err != nil { + return err + } + + return c.openStart(config) +} + +func (c *Connection) openStart(config Config) error { + start := &connectionStart{} + + if err := c.call(nil, start); err != nil { + return err + } + + c.Major = int(start.VersionMajor) + c.Minor = int(start.VersionMinor) + c.Properties = Table(start.ServerProperties) + c.Locales = strings.Split(start.Locales, " ") + + // eventually support challenge/response here by also responding to + // connectionSecure. + auth, ok := pickSASLMechanism(config.SASL, strings.Split(start.Mechanisms, " ")) + if !ok { + return ErrSASL + } + + // Save this mechanism off as the one we chose + c.Config.SASL = []Authentication{auth} + + // Set the connection locale to client locale + c.Config.Locale = config.Locale + + return c.openTune(config, auth) +} + +func (c *Connection) openTune(config Config, auth Authentication) error { + if len(config.Properties) == 0 { + config.Properties = Table{ + "product": defaultProduct, + "version": defaultVersion, + } + } + + config.Properties["capabilities"] = Table{ + "connection.blocked": true, + "consumer_cancel_notify": true, + } + + ok := &connectionStartOk{ + ClientProperties: config.Properties, + Mechanism: auth.Mechanism(), + Response: auth.Response(), + Locale: config.Locale, + } + tune := &connectionTune{} + + if err := c.call(ok, tune); err != nil { + // per spec, a connection can only be closed when it has been opened + // so at this point, we know it's an auth error, but the socket + // was closed instead. Return a meaningful error. + return ErrCredentials + } + + // When the server and client both use default 0, then the max channel is + // only limited by uint16. + c.Config.ChannelMax = pick(config.ChannelMax, int(tune.ChannelMax)) + if c.Config.ChannelMax == 0 { + c.Config.ChannelMax = defaultChannelMax + } + c.Config.ChannelMax = min(c.Config.ChannelMax, maxChannelMax) + + // Frame size includes headers and end byte (len(payload)+8), even if + // this is less than FrameMinSize, use what the server sends because the + // alternative is to stop the handshake here. + c.Config.FrameSize = pick(config.FrameSize, int(tune.FrameMax)) + + // Save this off for resetDeadline() + c.Config.Heartbeat = time.Second * time.Duration(pick( + int(config.Heartbeat/time.Second), + int(tune.Heartbeat))) + + // "The client should start sending heartbeats after receiving a + // Connection.Tune method" + go c.heartbeater(c.Config.Heartbeat, c.NotifyClose(make(chan *Error, 1))) + + if err := c.send(&methodFrame{ + ChannelId: 0, + Method: &connectionTuneOk{ + ChannelMax: uint16(c.Config.ChannelMax), + FrameMax: uint32(c.Config.FrameSize), + Heartbeat: uint16(c.Config.Heartbeat / time.Second), + }, + }); err != nil { + return err + } + + return c.openVhost(config) +} + +func (c *Connection) openVhost(config Config) error { + req := &connectionOpen{VirtualHost: config.Vhost} + res := &connectionOpenOk{} + + if err := c.call(req, res); err != nil { + // Cannot be closed yet, but we know it's a vhost problem + return ErrVhost + } + + c.Config.Vhost = config.Vhost + + return c.openComplete() +} + +// openComplete performs any final Connection initialization dependent on the +// connection handshake and clears any state needed for TLS and AMQP handshaking. +func (c *Connection) openComplete() error { + // We clear the deadlines and let the heartbeater reset the read deadline if requested. + // RabbitMQ uses TCP flow control at this point for pushback so Writes can + // intentionally block. + if deadliner, ok := c.conn.(interface { + SetDeadline(time.Time) error + }); ok { + _ = deadliner.SetDeadline(time.Time{}) + } + + c.allocator = newAllocator(1, c.Config.ChannelMax) + return nil +} + +func max(a, b int) int { + if a > b { + return a + } + return b +} + +func min(a, b int) int { + if a < b { + return a + } + return b +} + +func pick(client, server int) int { + if client == 0 || server == 0 { + return max(client, server) + } + return min(client, server) +} diff --git a/vendor/github.com/streadway/amqp/connection_test.go b/vendor/github.com/streadway/amqp/connection_test.go new file mode 100644 index 0000000..4b54699 --- /dev/null +++ b/vendor/github.com/streadway/amqp/connection_test.go @@ -0,0 +1,195 @@ +// Copyright (c) 2016, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +// +build integration + +package amqp + +import ( + "crypto/tls" + "net" + "sync" + "testing" + "time" +) + +func TestRequiredServerLocale(t *testing.T) { + conn := integrationConnection(t, "AMQP 0-9-1 required server locale") + requiredServerLocale := defaultLocale + + for _, locale := range conn.Locales { + if locale == requiredServerLocale { + return + } + } + + t.Fatalf("AMQP 0-9-1 server must support at least the %s locale, server sent the following locales: %#v", requiredServerLocale, conn.Locales) +} + +func TestDefaultConnectionLocale(t *testing.T) { + conn := integrationConnection(t, "client default locale") + + if conn.Config.Locale != defaultLocale { + t.Fatalf("Expected default connection locale to be %s, is was: %s", defaultLocale, conn.Config.Locale) + } +} + +func TestChannelOpenOnAClosedConnectionFails(t *testing.T) { + conn := integrationConnection(t, "channel on close") + + conn.Close() + + if _, err := conn.Channel(); err != ErrClosed { + t.Fatalf("channel.open on a closed connection %#v is expected to fail", conn) + } +} + +// TestChannelOpenOnAClosedConnectionFails_ReleasesAllocatedChannel ensures the +// channel allocated is released if opening the channel fails. +func TestChannelOpenOnAClosedConnectionFails_ReleasesAllocatedChannel(t *testing.T) { + conn := integrationConnection(t, "releases channel allocation") + conn.Close() + + before := len(conn.channels) + + if _, err := conn.Channel(); err != ErrClosed { + t.Fatalf("channel.open on a closed connection %#v is expected to fail", conn) + } + + if len(conn.channels) != before { + t.Fatalf("channel.open failed, but the allocated channel was not released") + } +} + +// TestRaceBetweenChannelAndConnectionClose ensures allocating a new channel +// does not race with shutting the connection down. +// +// See https://github.com/streadway/amqp/issues/251 - thanks to jmalloc for the +// test case. +func TestRaceBetweenChannelAndConnectionClose(t *testing.T) { + defer time.AfterFunc(10*time.Second, func() { panic("Close deadlock") }).Stop() + + conn := integrationConnection(t, "allocation/shutdown race") + + go conn.Close() + for i := 0; i < 10; i++ { + go func() { + ch, err := conn.Channel() + if err == nil { + ch.Close() + } + }() + } +} + +// TestRaceBetweenChannelShutdownAndSend ensures closing a channel +// (channel.shutdown) does not race with calling channel.send() from any other +// goroutines. +// +// See https://github.com/streadway/amqp/pull/253#issuecomment-292464811 for +// more details - thanks to jmalloc again. +func TestRaceBetweenChannelShutdownAndSend(t *testing.T) { + defer time.AfterFunc(10*time.Second, func() { panic("Close deadlock") }).Stop() + + conn := integrationConnection(t, "channel close/send race") + defer conn.Close() + + ch, _ := conn.Channel() + + go ch.Close() + for i := 0; i < 10; i++ { + go func() { + // ch.Ack calls ch.send() internally. + ch.Ack(42, false) + }() + } +} + +func TestQueueDeclareOnAClosedConnectionFails(t *testing.T) { + conn := integrationConnection(t, "queue declare on close") + ch, _ := conn.Channel() + + conn.Close() + + if _, err := ch.QueueDeclare("an example", false, false, false, false, nil); err != ErrClosed { + t.Fatalf("queue.declare on a closed connection %#v is expected to return ErrClosed, returned: %#v", conn, err) + } +} + +func TestConcurrentClose(t *testing.T) { + const concurrency = 32 + + conn := integrationConnection(t, "concurrent close") + defer conn.Close() + + wg := sync.WaitGroup{} + wg.Add(concurrency) + for i := 0; i < concurrency; i++ { + go func() { + defer wg.Done() + + err := conn.Close() + + if err == nil { + t.Log("first concurrent close was successful") + return + } + + if err == ErrClosed { + t.Log("later concurrent close were successful and returned ErrClosed") + return + } + + // BUG(st) is this really acceptable? we got a net.OpError before the + // connection was marked as closed means a race condition between the + // network connection and handshake state. It should be a package error + // returned. + if _, neterr := err.(*net.OpError); neterr { + t.Logf("unknown net.OpError during close, ignoring: %+v", err) + return + } + + // A different/protocol error occurred indicating a race or missed condition + if _, other := err.(*Error); other { + t.Fatalf("Expected no error, or ErrClosed, or a net.OpError from conn.Close(), got %#v (%s) of type %T", err, err, err) + } + }() + } + wg.Wait() +} + +// TestPlaintextDialTLS esnures amqp:// connections succeed when using DialTLS. +func TestPlaintextDialTLS(t *testing.T) { + uri, err := ParseURI(integrationURLFromEnv()) + if err != nil { + t.Fatalf("parse URI error: %s", err) + } + + // We can only test when we have a plaintext listener + if uri.Scheme != "amqp" { + t.Skip("requires server listening for plaintext connections") + } + + conn, err := DialTLS(uri.String(), &tls.Config{MinVersion: tls.VersionTLS12}) + if err != nil { + t.Fatalf("unexpected dial error, got %v", err) + } + conn.Close() +} + +// TestIsClosed will test the public method IsClosed on a connection. +func TestIsClosed(t *testing.T) { + conn := integrationConnection(t, "public IsClosed()") + + if conn.IsClosed() { + t.Fatalf("connection expected to not be marked as closed") + } + + conn.Close() + + if !conn.IsClosed() { + t.Fatal("connection expected to be marked as closed") + } +} diff --git a/vendor/github.com/streadway/amqp/consumers.go b/vendor/github.com/streadway/amqp/consumers.go new file mode 100644 index 0000000..887ac74 --- /dev/null +++ b/vendor/github.com/streadway/amqp/consumers.go @@ -0,0 +1,142 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +package amqp + +import ( + "os" + "strconv" + "sync" + "sync/atomic" +) + +var consumerSeq uint64 + +const consumerTagLengthMax = 0xFF // see writeShortstr + +func uniqueConsumerTag() string { + return commandNameBasedUniqueConsumerTag(os.Args[0]) +} + +func commandNameBasedUniqueConsumerTag(commandName string) string { + tagPrefix := "ctag-" + tagInfix := commandName + tagSuffix := "-" + strconv.FormatUint(atomic.AddUint64(&consumerSeq, 1), 10) + + if len(tagPrefix)+len(tagInfix)+len(tagSuffix) > consumerTagLengthMax { + tagInfix = "streadway/amqp" + } + + return tagPrefix + tagInfix + tagSuffix +} + +type consumerBuffers map[string]chan *Delivery + +// Concurrent type that manages the consumerTag -> +// ingress consumerBuffer mapping +type consumers struct { + sync.WaitGroup // one for buffer + closed chan struct{} // signal buffer + + sync.Mutex // protects below + chans consumerBuffers +} + +func makeConsumers() *consumers { + return &consumers{ + closed: make(chan struct{}), + chans: make(consumerBuffers), + } +} + +func (subs *consumers) buffer(in chan *Delivery, out chan Delivery) { + defer close(out) + defer subs.Done() + + var inflight = in + var queue []*Delivery + + for delivery := range in { + queue = append(queue, delivery) + + for len(queue) > 0 { + select { + case <-subs.closed: + // closed before drained, drop in-flight + return + + case delivery, consuming := <-inflight: + if consuming { + queue = append(queue, delivery) + } else { + inflight = nil + } + + case out <- *queue[0]: + queue = queue[1:] + } + } + } +} + +// On key conflict, close the previous channel. +func (subs *consumers) add(tag string, consumer chan Delivery) { + subs.Lock() + defer subs.Unlock() + + if prev, found := subs.chans[tag]; found { + close(prev) + } + + in := make(chan *Delivery) + subs.chans[tag] = in + + subs.Add(1) + go subs.buffer(in, consumer) +} + +func (subs *consumers) cancel(tag string) (found bool) { + subs.Lock() + defer subs.Unlock() + + ch, found := subs.chans[tag] + + if found { + delete(subs.chans, tag) + close(ch) + } + + return found +} + +func (subs *consumers) close() { + subs.Lock() + defer subs.Unlock() + + close(subs.closed) + + for tag, ch := range subs.chans { + delete(subs.chans, tag) + close(ch) + } + + subs.Wait() +} + +// Sends a delivery to a the consumer identified by `tag`. +// If unbuffered channels are used for Consume this method +// could block all deliveries until the consumer +// receives on the other end of the channel. +func (subs *consumers) send(tag string, msg *Delivery) bool { + subs.Lock() + defer subs.Unlock() + + buffer, found := subs.chans[tag] + if found { + buffer <- msg + } + + return found +} diff --git a/vendor/github.com/streadway/amqp/consumers_test.go b/vendor/github.com/streadway/amqp/consumers_test.go new file mode 100644 index 0000000..998b9a7 --- /dev/null +++ b/vendor/github.com/streadway/amqp/consumers_test.go @@ -0,0 +1,20 @@ +package amqp + +import ( + "strings" + "testing" +) + +func TestGeneratedUniqueConsumerTagDoesNotExceedMaxLength(t *testing.T) { + assertCorrectLength := func(commandName string) { + tag := commandNameBasedUniqueConsumerTag(commandName) + if len(tag) > consumerTagLengthMax { + t.Error("Generated unique consumer tag exceeds maximum length:", tag) + } + } + + assertCorrectLength("test") + assertCorrectLength(strings.Repeat("z", 249)) + assertCorrectLength(strings.Repeat("z", 256)) + assertCorrectLength(strings.Repeat("z", 1024)) +} diff --git a/vendor/github.com/streadway/amqp/delivery.go b/vendor/github.com/streadway/amqp/delivery.go new file mode 100644 index 0000000..7241264 --- /dev/null +++ b/vendor/github.com/streadway/amqp/delivery.go @@ -0,0 +1,173 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +package amqp + +import ( + "errors" + "time" +) + +var errDeliveryNotInitialized = errors.New("delivery not initialized") + +// Acknowledger notifies the server of successful or failed consumption of +// delivieries via identifier found in the Delivery.DeliveryTag field. +// +// Applications can provide mock implementations in tests of Delivery handlers. +type Acknowledger interface { + Ack(tag uint64, multiple bool) error + Nack(tag uint64, multiple bool, requeue bool) error + Reject(tag uint64, requeue bool) error +} + +// Delivery captures the fields for a previously delivered message resident in +// a queue to be delivered by the server to a consumer from Channel.Consume or +// Channel.Get. +type Delivery struct { + Acknowledger Acknowledger // the channel from which this delivery arrived + + Headers Table // Application or header exchange table + + // Properties + ContentType string // MIME content type + ContentEncoding string // MIME content encoding + DeliveryMode uint8 // queue implementation use - non-persistent (1) or persistent (2) + Priority uint8 // queue implementation use - 0 to 9 + CorrelationId string // application use - correlation identifier + ReplyTo string // application use - address to reply to (ex: RPC) + Expiration string // implementation use - message expiration spec + MessageId string // application use - message identifier + Timestamp time.Time // application use - message timestamp + Type string // application use - message type name + UserId string // application use - creating user - should be authenticated user + AppId string // application use - creating application id + + // Valid only with Channel.Consume + ConsumerTag string + + // Valid only with Channel.Get + MessageCount uint32 + + DeliveryTag uint64 + Redelivered bool + Exchange string // basic.publish exchange + RoutingKey string // basic.publish routing key + + Body []byte +} + +func newDelivery(channel *Channel, msg messageWithContent) *Delivery { + props, body := msg.getContent() + + delivery := Delivery{ + Acknowledger: channel, + + Headers: props.Headers, + ContentType: props.ContentType, + ContentEncoding: props.ContentEncoding, + DeliveryMode: props.DeliveryMode, + Priority: props.Priority, + CorrelationId: props.CorrelationId, + ReplyTo: props.ReplyTo, + Expiration: props.Expiration, + MessageId: props.MessageId, + Timestamp: props.Timestamp, + Type: props.Type, + UserId: props.UserId, + AppId: props.AppId, + + Body: body, + } + + // Properties for the delivery types + switch m := msg.(type) { + case *basicDeliver: + delivery.ConsumerTag = m.ConsumerTag + delivery.DeliveryTag = m.DeliveryTag + delivery.Redelivered = m.Redelivered + delivery.Exchange = m.Exchange + delivery.RoutingKey = m.RoutingKey + + case *basicGetOk: + delivery.MessageCount = m.MessageCount + delivery.DeliveryTag = m.DeliveryTag + delivery.Redelivered = m.Redelivered + delivery.Exchange = m.Exchange + delivery.RoutingKey = m.RoutingKey + } + + return &delivery +} + +/* +Ack delegates an acknowledgement through the Acknowledger interface that the +client or server has finished work on a delivery. + +All deliveries in AMQP must be acknowledged. If you called Channel.Consume +with autoAck true then the server will be automatically ack each message and +this method should not be called. Otherwise, you must call Delivery.Ack after +you have successfully processed this delivery. + +When multiple is true, this delivery and all prior unacknowledged deliveries +on the same channel will be acknowledged. This is useful for batch processing +of deliveries. + +An error will indicate that the acknowledge could not be delivered to the +channel it was sent from. + +Either Delivery.Ack, Delivery.Reject or Delivery.Nack must be called for every +delivery that is not automatically acknowledged. +*/ +func (d Delivery) Ack(multiple bool) error { + if d.Acknowledger == nil { + return errDeliveryNotInitialized + } + return d.Acknowledger.Ack(d.DeliveryTag, multiple) +} + +/* +Reject delegates a negatively acknowledgement through the Acknowledger interface. + +When requeue is true, queue this message to be delivered to a consumer on a +different channel. When requeue is false or the server is unable to queue this +message, it will be dropped. + +If you are batch processing deliveries, and your server supports it, prefer +Delivery.Nack. + +Either Delivery.Ack, Delivery.Reject or Delivery.Nack must be called for every +delivery that is not automatically acknowledged. +*/ +func (d Delivery) Reject(requeue bool) error { + if d.Acknowledger == nil { + return errDeliveryNotInitialized + } + return d.Acknowledger.Reject(d.DeliveryTag, requeue) +} + +/* +Nack negatively acknowledge the delivery of message(s) identified by the +delivery tag from either the client or server. + +When multiple is true, nack messages up to and including delivered messages up +until the delivery tag delivered on the same channel. + +When requeue is true, request the server to deliver this message to a different +consumer. If it is not possible or requeue is false, the message will be +dropped or delivered to a server configured dead-letter queue. + +This method must not be used to select or requeue messages the client wishes +not to handle, rather it is to inform the server that the client is incapable +of handling this message at this time. + +Either Delivery.Ack, Delivery.Reject or Delivery.Nack must be called for every +delivery that is not automatically acknowledged. +*/ +func (d Delivery) Nack(multiple, requeue bool) error { + if d.Acknowledger == nil { + return errDeliveryNotInitialized + } + return d.Acknowledger.Nack(d.DeliveryTag, multiple, requeue) +} diff --git a/vendor/github.com/streadway/amqp/delivery_test.go b/vendor/github.com/streadway/amqp/delivery_test.go new file mode 100644 index 0000000..f126f87 --- /dev/null +++ b/vendor/github.com/streadway/amqp/delivery_test.go @@ -0,0 +1,33 @@ +package amqp + +import "testing" + +func shouldNotPanic(t *testing.T) { + if err := recover(); err != nil { + t.Fatalf("should not panic, got: %s", err) + } +} + +// A closed delivery chan could produce zero value. Ack/Nack/Reject on these +// deliveries can produce a nil pointer panic. Instead return an error when +// the method can never be successful. +func TestAckZeroValueAcknowledgerDoesNotPanic(t *testing.T) { + defer shouldNotPanic(t) + if err := (Delivery{}).Ack(false); err == nil { + t.Errorf("expected Delivery{}.Ack to error") + } +} + +func TestNackZeroValueAcknowledgerDoesNotPanic(t *testing.T) { + defer shouldNotPanic(t) + if err := (Delivery{}).Nack(false, false); err == nil { + t.Errorf("expected Delivery{}.Ack to error") + } +} + +func TestRejectZeroValueAcknowledgerDoesNotPanic(t *testing.T) { + defer shouldNotPanic(t) + if err := (Delivery{}).Reject(false); err == nil { + t.Errorf("expected Delivery{}.Ack to error") + } +} diff --git a/vendor/github.com/streadway/amqp/doc.go b/vendor/github.com/streadway/amqp/doc.go new file mode 100644 index 0000000..76bf3e5 --- /dev/null +++ b/vendor/github.com/streadway/amqp/doc.go @@ -0,0 +1,108 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +/* +Package amqp is an AMQP 0.9.1 client with RabbitMQ extensions + +Understand the AMQP 0.9.1 messaging model by reviewing these links first. Much +of the terminology in this library directly relates to AMQP concepts. + + Resources + + http://www.rabbitmq.com/tutorials/amqp-concepts.html + http://www.rabbitmq.com/getstarted.html + http://www.rabbitmq.com/amqp-0-9-1-reference.html + +Design + +Most other broker clients publish to queues, but in AMQP, clients publish +Exchanges instead. AMQP is programmable, meaning that both the producers and +consumers agree on the configuration of the broker, instead requiring an +operator or system configuration that declares the logical topology in the +broker. The routing between producers and consumer queues is via Bindings. +These bindings form the logical topology of the broker. + +In this library, a message sent from publisher is called a "Publishing" and a +message received to a consumer is called a "Delivery". The fields of +Publishings and Deliveries are close but not exact mappings to the underlying +wire format to maintain stronger types. Many other libraries will combine +message properties with message headers. In this library, the message well +known properties are strongly typed fields on the Publishings and Deliveries, +whereas the user defined headers are in the Headers field. + +The method naming closely matches the protocol's method name with positional +parameters mapping to named protocol message fields. The motivation here is to +present a comprehensive view over all possible interactions with the server. + +Generally, methods that map to protocol methods of the "basic" class will be +elided in this interface, and "select" methods of various channel mode selectors +will be elided for example Channel.Confirm and Channel.Tx. + +The library is intentionally designed to be synchronous, where responses for +each protocol message are required to be received in an RPC manner. Some +methods have a noWait parameter like Channel.QueueDeclare, and some methods are +asynchronous like Channel.Publish. The error values should still be checked for +these methods as they will indicate IO failures like when the underlying +connection closes. + +Asynchronous Events + +Clients of this library may be interested in receiving some of the protocol +messages other than Deliveries like basic.ack methods while a channel is in +confirm mode. + +The Notify* methods with Connection and Channel receivers model the pattern of +asynchronous events like closes due to exceptions, or messages that are sent out +of band from an RPC call like basic.ack or basic.flow. + +Any asynchronous events, including Deliveries and Publishings must always have +a receiver until the corresponding chans are closed. Without asynchronous +receivers, the sychronous methods will block. + +Use Case + +It's important as a client to an AMQP topology to ensure the state of the +broker matches your expectations. For both publish and consume use cases, +make sure you declare the queues, exchanges and bindings you expect to exist +prior to calling Channel.Publish or Channel.Consume. + + // Connections start with amqp.Dial() typically from a command line argument + // or environment variable. + connection, err := amqp.Dial(os.Getenv("AMQP_URL")) + + // To cleanly shutdown by flushing kernel buffers, make sure to close and + // wait for the response. + defer connection.Close() + + // Most operations happen on a channel. If any error is returned on a + // channel, the channel will no longer be valid, throw it away and try with + // a different channel. If you use many channels, it's useful for the + // server to + channel, err := connection.Channel() + + // Declare your topology here, if it doesn't exist, it will be created, if + // it existed already and is not what you expect, then that's considered an + // error. + + // Use your connection on this topology with either Publish or Consume, or + // inspect your queues with QueueInspect. It's unwise to mix Publish and + // Consume to let TCP do its job well. + +SSL/TLS - Secure connections + +When Dial encounters an amqps:// scheme, it will use the zero value of a +tls.Config. This will only perform server certificate and host verification. + +Use DialTLS when you wish to provide a client certificate (recommended), +include a private certificate authority's certificate in the cert chain for +server validity, or run insecure by not verifying the server certificate dial +your own connection. DialTLS will use the provided tls.Config when it +encounters an amqps:// scheme and will dial a plain connection when it +encounters an amqp:// scheme. + +SSL/TLS in RabbitMQ is documented here: http://www.rabbitmq.com/ssl.html + +*/ +package amqp diff --git a/vendor/github.com/streadway/amqp/example_client_test.go b/vendor/github.com/streadway/amqp/example_client_test.go new file mode 100644 index 0000000..b7f1cfc --- /dev/null +++ b/vendor/github.com/streadway/amqp/example_client_test.go @@ -0,0 +1,286 @@ +package amqp_test + +import ( + "errors" + "fmt" + "github.com/streadway/amqp" + "log" + "os" + "time" +) + +// This exports a Session object that wraps this library. It +// automatically reconnects when the connection fails, and +// blocks all pushes until the connection succeeds. It also +// confirms every outgoing message, so none are lost. +// It doesn't automatically ack each message, but leaves that +// to the parent process, since it is usage-dependent. +// +// Try running this in one terminal, and `rabbitmq-server` in another. +// Stop & restart RabbitMQ to see how the queue reacts. +func Example() { + name := "job_queue" + addr := "amqp://guest:guest@localhost:5672/" + queue := New(name, addr) + message := []byte("message") + // Attempt to push a message every 2 seconds + for { + time.Sleep(time.Second * 3) + if err := queue.Push(message); err != nil { + fmt.Printf("Push failed: %s\n", err) + } else { + fmt.Println("Push succeeded!") + } + } +} + +type Session struct { + name string + logger *log.Logger + connection *amqp.Connection + channel *amqp.Channel + done chan bool + notifyConnClose chan *amqp.Error + notifyChanClose chan *amqp.Error + notifyConfirm chan amqp.Confirmation + isReady bool +} + +const ( + // When reconnecting to the server after connection failure + reconnectDelay = 5 * time.Second + + // When setting up the channel after a channel exception + reInitDelay = 2 * time.Second + + // When resending messages the server didn't confirm + resendDelay = 5 * time.Second +) + +var ( + errNotConnected = errors.New("not connected to a server") + errAlreadyClosed = errors.New("already closed: not connected to the server") + errShutdown = errors.New("session is shutting down") +) + +// New creates a new consumer state instance, and automatically +// attempts to connect to the server. +func New(name string, addr string) *Session { + session := Session{ + logger: log.New(os.Stdout, "", log.LstdFlags), + name: name, + done: make(chan bool), + } + go session.handleReconnect(addr) + return &session +} + +// handleReconnect will wait for a connection error on +// notifyConnClose, and then continuously attempt to reconnect. +func (session *Session) handleReconnect(addr string) { + for { + session.isReady = false + log.Println("Attempting to connect") + + conn, err := session.connect(addr) + + if err != nil { + log.Println("Failed to connect. Retrying...") + + select { + case <-session.done: + return + case <-time.After(reconnectDelay): + } + continue + } + + if done := session.handleReInit(conn); done { + break + } + } +} + +// connect will create a new AMQP connection +func (session *Session) connect(addr string) (*amqp.Connection, error) { + conn, err := amqp.Dial(addr) + + if err != nil { + return nil, err + } + + session.changeConnection(conn) + log.Println("Connected!") + return conn, nil +} + +// handleReconnect will wait for a channel error +// and then continuously attempt to re-initialize both channels +func (session *Session) handleReInit(conn *amqp.Connection) bool { + for { + session.isReady = false + + err := session.init(conn) + + if err != nil { + log.Println("Failed to initialize channel. Retrying...") + + select { + case <-session.done: + return true + case <-time.After(reInitDelay): + } + continue + } + + select { + case <-session.done: + return true + case <-session.notifyConnClose: + log.Println("Connection closed. Reconnecting...") + return false + case <-session.notifyChanClose: + log.Println("Channel closed. Re-running init...") + } + } +} + +// init will initialize channel & declare queue +func (session *Session) init(conn *amqp.Connection) error { + ch, err := conn.Channel() + + if err != nil { + return err + } + + err = ch.Confirm(false) + + if err != nil { + return err + } + _, err = ch.QueueDeclare( + session.name, + false, // Durable + false, // Delete when unused + false, // Exclusive + false, // No-wait + nil, // Arguments + ) + + if err != nil { + return err + } + + session.changeChannel(ch) + session.isReady = true + log.Println("Setup!") + + return nil +} + +// changeConnection takes a new connection to the queue, +// and updates the close listener to reflect this. +func (session *Session) changeConnection(connection *amqp.Connection) { + session.connection = connection + session.notifyConnClose = make(chan *amqp.Error) + session.connection.NotifyClose(session.notifyConnClose) +} + +// changeChannel takes a new channel to the queue, +// and updates the channel listeners to reflect this. +func (session *Session) changeChannel(channel *amqp.Channel) { + session.channel = channel + session.notifyChanClose = make(chan *amqp.Error) + session.notifyConfirm = make(chan amqp.Confirmation) + session.channel.NotifyClose(session.notifyChanClose) + session.channel.NotifyPublish(session.notifyConfirm) +} + +// Push will push data onto the queue, and wait for a confirm. +// If no confirms are received until within the resendTimeout, +// it continuously re-sends messages until a confirm is received. +// This will block until the server sends a confirm. Errors are +// only returned if the push action itself fails, see UnsafePush. +func (session *Session) Push(data []byte) error { + if !session.isReady { + return errors.New("failed to push push: not connected") + } + for { + err := session.UnsafePush(data) + if err != nil { + session.logger.Println("Push failed. Retrying...") + select { + case <-session.done: + return errShutdown + case <-time.After(resendDelay): + } + continue + } + select { + case confirm := <-session.notifyConfirm: + if confirm.Ack { + session.logger.Println("Push confirmed!") + return nil + } + case <-time.After(resendDelay): + } + session.logger.Println("Push didn't confirm. Retrying...") + } +} + +// UnsafePush will push to the queue without checking for +// confirmation. It returns an error if it fails to connect. +// No guarantees are provided for whether the server will +// recieve the message. +func (session *Session) UnsafePush(data []byte) error { + if !session.isReady { + return errNotConnected + } + return session.channel.Publish( + "", // Exchange + session.name, // Routing key + false, // Mandatory + false, // Immediate + amqp.Publishing{ + ContentType: "text/plain", + Body: data, + }, + ) +} + +// Stream will continuously put queue items on the channel. +// It is required to call delivery.Ack when it has been +// successfully processed, or delivery.Nack when it fails. +// Ignoring this will cause data to build up on the server. +func (session *Session) Stream() (<-chan amqp.Delivery, error) { + if !session.isReady { + return nil, errNotConnected + } + return session.channel.Consume( + session.name, + "", // Consumer + false, // Auto-Ack + false, // Exclusive + false, // No-local + false, // No-Wait + nil, // Args + ) +} + +// Close will cleanly shutdown the channel and connection. +func (session *Session) Close() error { + if !session.isReady { + return errAlreadyClosed + } + err := session.channel.Close() + if err != nil { + return err + } + err = session.connection.Close() + if err != nil { + return err + } + close(session.done) + session.isReady = false + return nil +} diff --git a/vendor/github.com/streadway/amqp/examples_test.go b/vendor/github.com/streadway/amqp/examples_test.go new file mode 100644 index 0000000..278a6dd --- /dev/null +++ b/vendor/github.com/streadway/amqp/examples_test.go @@ -0,0 +1,402 @@ +package amqp_test + +import ( + "crypto/tls" + "crypto/x509" + "io/ioutil" + "log" + "net" + "runtime" + "time" + + "github.com/streadway/amqp" +) + +func ExampleConfig_timeout() { + // Provide your own anonymous Dial function that delgates to net.DialTimout + // for custom timeouts + + conn, err := amqp.DialConfig("amqp:///", amqp.Config{ + Dial: func(network, addr string) (net.Conn, error) { + return net.DialTimeout(network, addr, 2*time.Second) + }, + }) + + log.Printf("conn: %v, err: %v", conn, err) +} + +func ExampleDialTLS() { + // This example assume you have a RabbitMQ node running on localhost + // with TLS enabled. + // + // The easiest way to create the CA, certificates and keys required for these + // examples is by using tls-gen: https://github.com/michaelklishin/tls-gen + // + // A comprehensive RabbitMQ TLS guide can be found at + // http://www.rabbitmq.com/ssl.html + // + // Once you have the required TLS files in place, use the following + // rabbitmq.config example for the RabbitMQ node that you will run on + // localhost: + // + // [ + // {rabbit, [ + // {tcp_listeners, []}, % listens on 127.0.0.1:5672 + // {ssl_listeners, [5671]}, % listens on 0.0.0.0:5671 + // {ssl_options, [{cacertfile,"/path/to/your/testca/cacert.pem"}, + // {certfile,"/path/to/your/server/cert.pem"}, + // {keyfile,"/path/to/your/server/key.pem"}, + // {verify,verify_peer}, + // {fail_if_no_peer_cert,true}]} + // ]} + // ]. + // + // + // In the above rabbitmq.config example, we are disabling the plain AMQP port + // and verifying that clients and fail if no certificate is presented. + // + // The self-signing certificate authority's certificate (cacert.pem) must be + // included in the RootCAs to be trusted, otherwise the server certificate + // will fail certificate verification. + // + // Alternatively to adding it to the tls.Config. you can add the CA's cert to + // your system's root CAs. The tls package will use the system roots + // specific to each support OS. Under OS X, add (drag/drop) cacert.pem + // file to the 'Certificates' section of KeyChain.app to add and always + // trust. You can also add it via the command line: + // + // security add-certificate testca/cacert.pem + // security add-trusted-cert testca/cacert.pem + // + // If you depend on the system root CAs, then use nil for the RootCAs field + // so the system roots will be loaded instead. + // + // Server names are validated by the crypto/tls package, so the server + // certificate must be made for the hostname in the URL. Find the commonName + // (CN) and make sure the hostname in the URL matches this common name. Per + // the RabbitMQ instructions (or tls-gen) for a self-signed cert, this defaults to the + // current hostname. + // + // openssl x509 -noout -in /path/to/certificate.pem -subject + // + // If your server name in your certificate is different than the host you are + // connecting to, set the hostname used for verification in + // ServerName field of the tls.Config struct. + cfg := new(tls.Config) + + // see at the top + cfg.RootCAs = x509.NewCertPool() + + if ca, err := ioutil.ReadFile("testca/cacert.pem"); err == nil { + cfg.RootCAs.AppendCertsFromPEM(ca) + } + + // Move the client cert and key to a location specific to your application + // and load them here. + + if cert, err := tls.LoadX509KeyPair("client/cert.pem", "client/key.pem"); err == nil { + cfg.Certificates = append(cfg.Certificates, cert) + } + + // see a note about Common Name (CN) at the top + conn, err := amqp.DialTLS("amqps://server-name-from-certificate/", cfg) + + log.Printf("conn: %v, err: %v", conn, err) +} + +func ExampleChannel_Confirm_bridge() { + // This example acts as a bridge, shoveling all messages sent from the source + // exchange "log" to destination exchange "log". + + // Confirming publishes can help from overproduction and ensure every message + // is delivered. + + // Setup the source of the store and forward + source, err := amqp.Dial("amqp://source/") + if err != nil { + log.Fatalf("connection.open source: %s", err) + } + defer source.Close() + + chs, err := source.Channel() + if err != nil { + log.Fatalf("channel.open source: %s", err) + } + + if err := chs.ExchangeDeclare("log", "topic", true, false, false, false, nil); err != nil { + log.Fatalf("exchange.declare destination: %s", err) + } + + if _, err := chs.QueueDeclare("remote-tee", true, true, false, false, nil); err != nil { + log.Fatalf("queue.declare source: %s", err) + } + + if err := chs.QueueBind("remote-tee", "#", "logs", false, nil); err != nil { + log.Fatalf("queue.bind source: %s", err) + } + + shovel, err := chs.Consume("remote-tee", "shovel", false, false, false, false, nil) + if err != nil { + log.Fatalf("basic.consume source: %s", err) + } + + // Setup the destination of the store and forward + destination, err := amqp.Dial("amqp://destination/") + if err != nil { + log.Fatalf("connection.open destination: %s", err) + } + defer destination.Close() + + chd, err := destination.Channel() + if err != nil { + log.Fatalf("channel.open destination: %s", err) + } + + if err := chd.ExchangeDeclare("log", "topic", true, false, false, false, nil); err != nil { + log.Fatalf("exchange.declare destination: %s", err) + } + + // Buffer of 1 for our single outstanding publishing + confirms := chd.NotifyPublish(make(chan amqp.Confirmation, 1)) + + if err := chd.Confirm(false); err != nil { + log.Fatalf("confirm.select destination: %s", err) + } + + // Now pump the messages, one by one, a smarter implementation + // would batch the deliveries and use multiple ack/nacks + for { + msg, ok := <-shovel + if !ok { + log.Fatalf("source channel closed, see the reconnect example for handling this") + } + + err = chd.Publish("logs", msg.RoutingKey, false, false, amqp.Publishing{ + // Copy all the properties + ContentType: msg.ContentType, + ContentEncoding: msg.ContentEncoding, + DeliveryMode: msg.DeliveryMode, + Priority: msg.Priority, + CorrelationId: msg.CorrelationId, + ReplyTo: msg.ReplyTo, + Expiration: msg.Expiration, + MessageId: msg.MessageId, + Timestamp: msg.Timestamp, + Type: msg.Type, + UserId: msg.UserId, + AppId: msg.AppId, + + // Custom headers + Headers: msg.Headers, + + // And the body + Body: msg.Body, + }) + + if err != nil { + msg.Nack(false, false) + log.Fatalf("basic.publish destination: %+v", msg) + } + + // only ack the source delivery when the destination acks the publishing + if confirmed := <-confirms; confirmed.Ack { + msg.Ack(false) + } else { + msg.Nack(false, false) + } + } +} + +func ExampleChannel_Consume() { + // Connects opens an AMQP connection from the credentials in the URL. + conn, err := amqp.Dial("amqp://guest:guest@localhost:5672/") + if err != nil { + log.Fatalf("connection.open: %s", err) + } + defer conn.Close() + + c, err := conn.Channel() + if err != nil { + log.Fatalf("channel.open: %s", err) + } + + // We declare our topology on both the publisher and consumer to ensure they + // are the same. This is part of AMQP being a programmable messaging model. + // + // See the Channel.Publish example for the complimentary declare. + err = c.ExchangeDeclare("logs", "topic", true, false, false, false, nil) + if err != nil { + log.Fatalf("exchange.declare: %s", err) + } + + // Establish our queue topologies that we are responsible for + type bind struct { + queue string + key string + } + + bindings := []bind{ + {"page", "alert"}, + {"email", "info"}, + {"firehose", "#"}, + } + + for _, b := range bindings { + _, err = c.QueueDeclare(b.queue, true, false, false, false, nil) + if err != nil { + log.Fatalf("queue.declare: %v", err) + } + + err = c.QueueBind(b.queue, b.key, "logs", false, nil) + if err != nil { + log.Fatalf("queue.bind: %v", err) + } + } + + // Set our quality of service. Since we're sharing 3 consumers on the same + // channel, we want at least 3 messages in flight. + err = c.Qos(3, 0, false) + if err != nil { + log.Fatalf("basic.qos: %v", err) + } + + // Establish our consumers that have different responsibilities. Our first + // two queues do not ack the messages on the server, so require to be acked + // on the client. + + pages, err := c.Consume("page", "pager", false, false, false, false, nil) + if err != nil { + log.Fatalf("basic.consume: %v", err) + } + + go func() { + for log := range pages { + // ... this consumer is responsible for sending pages per log + log.Ack(false) + } + }() + + // Notice how the concern for which messages arrive here are in the AMQP + // topology and not in the queue. We let the server pick a consumer tag this + // time. + + emails, err := c.Consume("email", "", false, false, false, false, nil) + if err != nil { + log.Fatalf("basic.consume: %v", err) + } + + go func() { + for log := range emails { + // ... this consumer is responsible for sending emails per log + log.Ack(false) + } + }() + + // This consumer requests that every message is acknowledged as soon as it's + // delivered. + + firehose, err := c.Consume("firehose", "", true, false, false, false, nil) + if err != nil { + log.Fatalf("basic.consume: %v", err) + } + + // To show how to process the items in parallel, we'll use a work pool. + for i := 0; i < runtime.NumCPU(); i++ { + go func(work <-chan amqp.Delivery) { + for range work { + // ... this consumer pulls from the firehose and doesn't need to acknowledge + } + }(firehose) + } + + // Wait until you're ready to finish, could be a signal handler here. + time.Sleep(10 * time.Second) + + // Cancelling a consumer by name will finish the range and gracefully end the + // goroutine + err = c.Cancel("pager", false) + if err != nil { + log.Fatalf("basic.cancel: %v", err) + } + + // deferred closing the Connection will also finish the consumer's ranges of + // their delivery chans. If you need every delivery to be processed, make + // sure to wait for all consumers goroutines to finish before exiting your + // process. +} + +func ExampleChannel_Publish() { + // Connects opens an AMQP connection from the credentials in the URL. + conn, err := amqp.Dial("amqp://guest:guest@localhost:5672/") + if err != nil { + log.Fatalf("connection.open: %s", err) + } + + // This waits for a server acknowledgment which means the sockets will have + // flushed all outbound publishings prior to returning. It's important to + // block on Close to not lose any publishings. + defer conn.Close() + + c, err := conn.Channel() + if err != nil { + log.Fatalf("channel.open: %s", err) + } + + // We declare our topology on both the publisher and consumer to ensure they + // are the same. This is part of AMQP being a programmable messaging model. + // + // See the Channel.Consume example for the complimentary declare. + err = c.ExchangeDeclare("logs", "topic", true, false, false, false, nil) + if err != nil { + log.Fatalf("exchange.declare: %v", err) + } + + // Prepare this message to be persistent. Your publishing requirements may + // be different. + msg := amqp.Publishing{ + DeliveryMode: amqp.Persistent, + Timestamp: time.Now(), + ContentType: "text/plain", + Body: []byte("Go Go AMQP!"), + } + + // This is not a mandatory delivery, so it will be dropped if there are no + // queues bound to the logs exchange. + err = c.Publish("logs", "info", false, false, msg) + if err != nil { + // Since publish is asynchronous this can happen if the network connection + // is reset or if the server has run out of resources. + log.Fatalf("basic.publish: %v", err) + } +} + +func publishAllTheThings(conn *amqp.Connection) { + // ... snarf snarf, barf barf +} + +func ExampleConnection_NotifyBlocked() { + // Simply logs when the server throttles the TCP connection for publishers + + // Test this by tuning your server to have a low memory watermark: + // rabbitmqctl set_vm_memory_high_watermark 0.00000001 + + conn, err := amqp.Dial("amqp://guest:guest@localhost:5672/") + if err != nil { + log.Fatalf("connection.open: %s", err) + } + defer conn.Close() + + blockings := conn.NotifyBlocked(make(chan amqp.Blocking)) + go func() { + for b := range blockings { + if b.Active { + log.Printf("TCP blocked: %q", b.Reason) + } else { + log.Printf("TCP unblocked") + } + } + }() + + // Your application domain channel setup publishings + publishAllTheThings(conn) +} diff --git a/vendor/github.com/streadway/amqp/fuzz.go b/vendor/github.com/streadway/amqp/fuzz.go new file mode 100644 index 0000000..16e626c --- /dev/null +++ b/vendor/github.com/streadway/amqp/fuzz.go @@ -0,0 +1,17 @@ +// +build gofuzz + +package amqp + +import "bytes" + +func Fuzz(data []byte) int { + r := reader{bytes.NewReader(data)} + frame, err := r.ReadFrame() + if err != nil { + if frame != nil { + panic("frame is not nil") + } + return 0 + } + return 1 +} diff --git a/vendor/github.com/streadway/amqp/gen.sh b/vendor/github.com/streadway/amqp/gen.sh new file mode 100755 index 0000000..d46e19b --- /dev/null +++ b/vendor/github.com/streadway/amqp/gen.sh @@ -0,0 +1,2 @@ +#!/bin/sh +go run spec/gen.go < spec/amqp0-9-1.stripped.extended.xml | gofmt > spec091.go diff --git a/vendor/github.com/streadway/amqp/go.mod b/vendor/github.com/streadway/amqp/go.mod new file mode 100644 index 0000000..4eeab33 --- /dev/null +++ b/vendor/github.com/streadway/amqp/go.mod @@ -0,0 +1,3 @@ +module github.com/streadway/amqp + +go 1.10 diff --git a/vendor/github.com/streadway/amqp/integration_test.go b/vendor/github.com/streadway/amqp/integration_test.go new file mode 100644 index 0000000..b4cf741 --- /dev/null +++ b/vendor/github.com/streadway/amqp/integration_test.go @@ -0,0 +1,1878 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +// +build integration + +package amqp + +import ( + "bytes" + devrand "crypto/rand" + "encoding/binary" + "fmt" + "hash/crc32" + "io" + "math/rand" + "net" + "os" + "reflect" + "strconv" + "sync" + "testing" + "testing/quick" + "time" +) + +func TestIntegrationOpenClose(t *testing.T) { + if c := integrationConnection(t, "open-close"); c != nil { + t.Logf("have connection, calling connection close") + if err := c.Close(); err != nil { + t.Fatalf("connection close: %s", err) + } + t.Logf("connection close OK") + } +} + +func TestIntegrationOpenCloseChannel(t *testing.T) { + if c := integrationConnection(t, "channel"); c != nil { + defer c.Close() + + ch, err := c.Channel() + if err != nil { + t.Fatalf("create channel 1: %s", err) + } + ch.Close() + } +} + +func TestIntegrationHighChannelChurnInTightLoop(t *testing.T) { + if c := integrationConnection(t, "channel churn"); c != nil { + defer c.Close() + + for i := 0; i < 1000; i++ { + ch, err := c.Channel() + if err != nil { + t.Fatalf("create channel 1: %s", err) + } + ch.Close() + } + } +} + +func TestIntegrationOpenConfig(t *testing.T) { + config := Config{} + + c, err := DialConfig(integrationURLFromEnv(), config) + if err != nil { + t.Fatalf("expected to dial with config %+v integration server: %s", config, err) + } + + if _, err := c.Channel(); err != nil { + t.Fatalf("expected to open channel: %s", err) + } + + if err := c.Close(); err != nil { + t.Fatalf("connection close: %s", err) + } +} + +func TestIntegrationOpenConfigWithNetDial(t *testing.T) { + config := Config{Dial: net.Dial} + + c, err := DialConfig(integrationURLFromEnv(), config) + if err != nil { + t.Errorf("expected to dial with config %+v integration server: %s", config, err) + } + + if _, err := c.Channel(); err != nil { + t.Fatalf("expected to open channel: %s", err) + } + + if err := c.Close(); err != nil { + t.Fatalf("connection close: %s", err) + } +} + +func TestIntegrationLocalAddr(t *testing.T) { + config := Config{} + + c, err := DialConfig(integrationURLFromEnv(), config) + defer c.Close() + if err != nil { + t.Errorf("expected to dial with config %+v integration server: %s", config, err) + } + + a := c.LocalAddr() + _, portString, err := net.SplitHostPort(a.String()) + if err != nil { + t.Errorf("expected to get a local network address with config %+v integration server: %s", config, a.String()) + } + + port, err := strconv.Atoi(portString) + if err != nil { + t.Errorf("expected to get a TCP port number with config %+v integration server: %s", config, err) + } + t.Logf("Connected to port %d\n", port) +} + +// https://github.com/streadway/amqp/issues/94 +func TestExchangePassiveOnMissingExchangeShouldError(t *testing.T) { + c := integrationConnection(t, "exch") + if c != nil { + defer c.Close() + + ch, err := c.Channel() + if err != nil { + t.Fatalf("create channel 1: %s", err) + } + defer ch.Close() + + if err := ch.ExchangeDeclarePassive( + "test-integration-missing-passive-exchange", + "direct", // type + false, // duration (note: is durable) + true, // auto-delete + false, // internal + false, // nowait + nil, // args + ); err == nil { + t.Fatal("ExchangeDeclarePassive of a missing exchange should return error") + } + } +} + +// https://github.com/streadway/amqp/issues/94 +func TestIntegrationExchangeDeclarePassiveOnDeclaredShouldNotError(t *testing.T) { + c := integrationConnection(t, "exch") + if c != nil { + defer c.Close() + + exchange := "test-integration-declared-passive-exchange" + + ch, err := c.Channel() + if err != nil { + t.Fatalf("create channel: %s", err) + } + defer ch.Close() + + if err := ch.ExchangeDeclare( + exchange, // name + "direct", // type + false, // durable + true, // auto-delete + false, // internal + false, // nowait + nil, // args + ); err != nil { + t.Fatalf("declare exchange: %s", err) + } + + if err := ch.ExchangeDeclarePassive( + exchange, // name + "direct", // type + false, // durable + true, // auto-delete + false, // internal + false, // nowait + nil, // args + ); err != nil { + t.Fatalf("ExchangeDeclarePassive on a declared exchange should not error, got: %q", err) + } + } +} + +func TestIntegrationExchange(t *testing.T) { + c := integrationConnection(t, "exch") + if c != nil { + defer c.Close() + + channel, err := c.Channel() + if err != nil { + t.Fatalf("create channel: %s", err) + } + t.Logf("create channel OK") + + exchange := "test-integration-exchange" + + if err := channel.ExchangeDeclare( + exchange, // name + "direct", // type + false, // duration + true, // auto-delete + false, // internal + false, // nowait + nil, // args + ); err != nil { + t.Fatalf("declare exchange: %s", err) + } + t.Logf("declare exchange OK") + + if err := channel.ExchangeDelete(exchange, false, false); err != nil { + t.Fatalf("delete exchange: %s", err) + } + t.Logf("delete exchange OK") + + if err := channel.Close(); err != nil { + t.Fatalf("close channel: %s", err) + } + t.Logf("close channel OK") + } +} + +// https://github.com/streadway/amqp/issues/94 +func TestIntegrationQueueDeclarePassiveOnMissingExchangeShouldError(t *testing.T) { + c := integrationConnection(t, "queue") + if c != nil { + defer c.Close() + + ch, err := c.Channel() + if err != nil { + t.Fatalf("create channel1: %s", err) + } + defer ch.Close() + + if _, err := ch.QueueDeclarePassive( + "test-integration-missing-passive-queue", // name + false, // duration (note: not durable) + true, // auto-delete + false, // exclusive + false, // noWait + nil, // arguments + ); err == nil { + t.Fatal("QueueDeclarePassive of a missing queue should error") + } + } +} + +// https://github.com/streadway/amqp/issues/94 +func TestIntegrationPassiveQueue(t *testing.T) { + c := integrationConnection(t, "queue") + if c != nil { + defer c.Close() + + name := "test-integration-declared-passive-queue" + + ch, err := c.Channel() + if err != nil { + t.Fatalf("create channel1: %s", err) + } + defer ch.Close() + + if _, err := ch.QueueDeclare( + name, // name + false, // durable + true, // auto-delete + false, // exclusive + false, // noWait + nil, // arguments + ); err != nil { + t.Fatalf("queue declare: %s", err) + } + + if _, err := ch.QueueDeclarePassive( + name, // name + false, // durable + true, // auto-delete + false, // exclusive + false, // noWait + nil, // arguments + ); err != nil { + t.Fatalf("QueueDeclarePassive on declared queue should not error, got: %q", err) + } + + if _, err := ch.QueueDeclarePassive( + name, // name + true, // durable (note: differs) + true, // auto-delete + false, // exclusive + false, // noWait + nil, // arguments + ); err != nil { + t.Fatalf("QueueDeclarePassive on declared queue with different flags should error") + } + } +} + +func TestIntegrationBasicQueueOperations(t *testing.T) { + c := integrationConnection(t, "queue") + if c != nil { + defer c.Close() + + channel, err := c.Channel() + if err != nil { + t.Fatalf("create channel: %s", err) + } + t.Logf("create channel OK") + + exchangeName := "test-basic-ops-exchange" + queueName := "test-basic-ops-queue" + + deleteQueueFirstOptions := []bool{true, false} + for _, deleteQueueFirst := range deleteQueueFirstOptions { + + if err := channel.ExchangeDeclare( + exchangeName, // name + "direct", // type + true, // duration (note: is durable) + false, // auto-delete + false, // internal + false, // nowait + nil, // args + ); err != nil { + t.Fatalf("declare exchange: %s", err) + } + t.Logf("declare exchange OK") + + if _, err := channel.QueueDeclare( + queueName, // name + true, // duration (note: durable) + false, // auto-delete + false, // exclusive + false, // noWait + nil, // arguments + ); err != nil { + t.Fatalf("queue declare: %s", err) + } + t.Logf("declare queue OK") + + if err := channel.QueueBind( + queueName, // name + "", // routingKey + exchangeName, // sourceExchange + false, // noWait + nil, // arguments + ); err != nil { + t.Fatalf("queue bind: %s", err) + } + t.Logf("queue bind OK") + + if deleteQueueFirst { + if _, err := channel.QueueDelete( + queueName, // name + false, // ifUnused (false=be aggressive) + false, // ifEmpty (false=be aggressive) + false, // noWait + ); err != nil { + t.Fatalf("delete queue (first): %s", err) + } + t.Logf("delete queue (first) OK") + + if err := channel.ExchangeDelete(exchangeName, false, false); err != nil { + t.Fatalf("delete exchange (after delete queue): %s", err) + } + t.Logf("delete exchange (after delete queue) OK") + + } else { // deleteExchangeFirst + if err := channel.ExchangeDelete(exchangeName, false, false); err != nil { + t.Fatalf("delete exchange (first): %s", err) + } + t.Logf("delete exchange (first) OK") + + if _, err := channel.QueueInspect(queueName); err != nil { + t.Fatalf("inspect queue state after deleting exchange: %s", err) + } + t.Logf("queue properly remains after exchange is deleted") + + if _, err := channel.QueueDelete( + queueName, + false, // ifUnused + false, // ifEmpty + false, // noWait + ); err != nil { + t.Fatalf("delete queue (after delete exchange): %s", err) + } + t.Logf("delete queue (after delete exchange) OK") + } + } + + if err := channel.Close(); err != nil { + t.Fatalf("close channel: %s", err) + } + t.Logf("close channel OK") + } +} + +func TestIntegrationConnectionNegotiatesMaxChannels(t *testing.T) { + config := Config{ChannelMax: 0} + + c, err := DialConfig(integrationURLFromEnv(), config) + if err != nil { + t.Errorf("expected to dial with config %+v integration server: %s", config, err) + } + defer c.Close() + + if want, got := defaultChannelMax, c.Config.ChannelMax; want != got { + t.Fatalf("expected connection to negotiate uint16 (%d) channels, got: %d", want, got) + } +} + +func TestIntegrationConnectionNegotiatesClientMaxChannels(t *testing.T) { + config := Config{ChannelMax: 16} + + c, err := DialConfig(integrationURLFromEnv(), config) + if err != nil { + t.Errorf("expected to dial with config %+v integration server: %s", config, err) + } + defer c.Close() + + if want, got := config.ChannelMax, c.Config.ChannelMax; want != got { + t.Fatalf("expected client specified channel limit after handshake %d, got: %d", want, got) + } +} + +func TestIntegrationChannelIDsExhausted(t *testing.T) { + config := Config{ChannelMax: 16} + + c, err := DialConfig(integrationURLFromEnv(), config) + if err != nil { + t.Errorf("expected to dial with config %+v integration server: %s", config, err) + } + defer c.Close() + + for i := 1; i <= c.Config.ChannelMax; i++ { + if _, err := c.Channel(); err != nil { + t.Fatalf("expected allocating all channel ids to succed, failed on %d with %v", i, err) + } + } + + if _, err := c.Channel(); err != ErrChannelMax { + t.Fatalf("expected allocating all channels to produce the client side error %#v, got: %#v", ErrChannelMax, err) + } +} + +func TestIntegrationChannelClosing(t *testing.T) { + c := integrationConnection(t, "closings") + if c != nil { + defer c.Close() + + // This function is run on every channel after it is successfully + // opened. It can do something to verify something. It should be + // quick; many channels may be opened! + f := func(t *testing.T, c *Channel) { + return + } + + // open and close + channel, err := c.Channel() + if err != nil { + t.Fatalf("basic create channel: %s", err) + } + t.Logf("basic create channel OK") + + if err := channel.Close(); err != nil { + t.Fatalf("basic close channel: %s", err) + } + t.Logf("basic close channel OK") + + // deferred close + signal := make(chan bool) + go func() { + channel, err := c.Channel() + if err != nil { + t.Fatalf("second create channel: %s", err) + } + t.Logf("second create channel OK") + + <-signal // a bit of synchronization + f(t, channel) + + defer func() { + if err := channel.Close(); err != nil { + t.Fatalf("deferred close channel: %s", err) + } + t.Logf("deferred close channel OK") + signal <- true + }() + }() + signal <- true + select { + case <-signal: + t.Logf("(got close signal OK)") + break + case <-time.After(250 * time.Millisecond): + t.Fatalf("deferred close: timeout") + } + + // multiple channels + for _, n := range []int{2, 4, 8, 16, 32, 64, 128, 256} { + channels := make([]*Channel, n) + for i := 0; i < n; i++ { + var err error + if channels[i], err = c.Channel(); err != nil { + t.Fatalf("create channel %d/%d: %s", i+1, n, err) + } + } + f(t, channel) + for i, channel := range channels { + if err := channel.Close(); err != nil { + t.Fatalf("close channel %d/%d: %s", i+1, n, err) + } + } + t.Logf("created/closed %d channels OK", n) + } + + } +} + +func TestIntegrationMeaningfulChannelErrors(t *testing.T) { + c := integrationConnection(t, "pub") + if c != nil { + defer c.Close() + + ch, err := c.Channel() + if err != nil { + t.Fatalf("Could not create channel") + } + + queue := "test.integration.channel.error" + + _, err = ch.QueueDeclare(queue, false, true, false, false, nil) + if err != nil { + t.Fatalf("Could not declare") + } + + _, err = ch.QueueDeclare(queue, true, false, false, false, nil) + if err == nil { + t.Fatalf("Expected error, got nil") + } + + e, ok := err.(*Error) + if !ok { + t.Fatalf("Expected type Error response, got %T", err) + } + + if e.Code != PreconditionFailed { + t.Fatalf("Expected PreconditionFailed, got: %+v", e) + } + + _, err = ch.QueueDeclare(queue, false, true, false, false, nil) + if err != ErrClosed { + t.Fatalf("Expected channel to be closed, got: %T", err) + } + } +} + +// https://github.com/streadway/amqp/issues/6 +func TestIntegrationNonBlockingClose(t *testing.T) { + c := integrationConnection(t, "#6") + if c != nil { + defer c.Close() + + ch, err := c.Channel() + if err != nil { + t.Fatalf("Could not create channel") + } + + queue := "test.integration.blocking.close" + + _, err = ch.QueueDeclare(queue, false, true, false, false, nil) + if err != nil { + t.Fatalf("Could not declare") + } + + msgs, err := ch.Consume(queue, "", false, false, false, false, nil) + if err != nil { + t.Fatalf("Could not consume") + } + + // Simulate a consumer + go func() { + for range msgs { + t.Logf("Oh my, received message on an empty queue") + } + }() + + succeed := make(chan bool) + + go func() { + if err = ch.Close(); err != nil { + t.Fatalf("Close produced an error when it shouldn't") + } + succeed <- true + }() + + select { + case <-succeed: + break + case <-time.After(1 * time.Second): + t.Fatalf("Close timed out after 1s") + } + } +} + +func TestIntegrationPublishConsume(t *testing.T) { + queue := "test.integration.publish.consume" + + c1 := integrationConnection(t, "pub") + c2 := integrationConnection(t, "sub") + + if c1 != nil && c2 != nil { + defer c1.Close() + defer c2.Close() + + pub, _ := c1.Channel() + sub, _ := c2.Channel() + + pub.QueueDeclare(queue, false, true, false, false, nil) + sub.QueueDeclare(queue, false, true, false, false, nil) + defer pub.QueueDelete(queue, false, false, false) + + messages, _ := sub.Consume(queue, "", false, false, false, false, nil) + + pub.Publish("", queue, false, false, Publishing{Body: []byte("pub 1")}) + pub.Publish("", queue, false, false, Publishing{Body: []byte("pub 2")}) + pub.Publish("", queue, false, false, Publishing{Body: []byte("pub 3")}) + + assertConsumeBody(t, messages, []byte("pub 1")) + assertConsumeBody(t, messages, []byte("pub 2")) + assertConsumeBody(t, messages, []byte("pub 3")) + } +} + +func TestIntegrationConsumeFlow(t *testing.T) { + queue := "test.integration.consumer-flow" + + c1 := integrationConnection(t, "pub-flow") + c2 := integrationConnection(t, "sub-flow") + + if c1 != nil && c2 != nil { + defer c1.Close() + defer c2.Close() + + pub, _ := c1.Channel() + sub, _ := c2.Channel() + + pub.QueueDeclare(queue, false, true, false, false, nil) + sub.QueueDeclare(queue, false, true, false, false, nil) + defer pub.QueueDelete(queue, false, false, false) + + sub.Qos(1, 0, false) + + messages, _ := sub.Consume(queue, "", false, false, false, false, nil) + + pub.Publish("", queue, false, false, Publishing{Body: []byte("pub 1")}) + pub.Publish("", queue, false, false, Publishing{Body: []byte("pub 2")}) + + msg := assertConsumeBody(t, messages, []byte("pub 1")) + + if err := sub.Flow(false); err.(*Error).Code == NotImplemented { + t.Log("flow control is not supported on this version of rabbitmq") + return + } + + msg.Ack(false) + + select { + case <-messages: + t.Fatalf("message was delivered when flow was not active") + default: + } + + sub.Flow(true) + + msg = assertConsumeBody(t, messages, []byte("pub 2")) + msg.Ack(false) + } +} + +func TestIntegrationRecoverNotImplemented(t *testing.T) { + queue := "test.recover" + + if c, ch := integrationQueue(t, queue); c != nil { + if product, ok := c.Properties["product"]; ok && product.(string) == "RabbitMQ" { + defer c.Close() + + err := ch.Recover(false) + + if ex, ok := err.(*Error); !ok || ex.Code != 540 { + t.Fatalf("Expected NOT IMPLEMENTED got: %v", ex) + } + } + } +} + +// This test is driven by a private API to simulate the server sending a channelFlow message +func TestIntegrationPublishFlow(t *testing.T) { + // TODO - no idea how to test without affecting the server or mucking internal APIs + // i'd like to make sure the RW lock can be held by multiple publisher threads + // and that multiple channelFlow messages do not block the dispatch thread +} + +func TestIntegrationConsumeCancel(t *testing.T) { + queue := "test.integration.consume-cancel" + + c := integrationConnection(t, "pub") + + if c != nil { + defer c.Close() + + ch, _ := c.Channel() + + ch.QueueDeclare(queue, false, true, false, false, nil) + defer ch.QueueDelete(queue, false, false, false) + + messages, _ := ch.Consume(queue, "integration-tag", false, false, false, false, nil) + + ch.Publish("", queue, false, false, Publishing{Body: []byte("1")}) + + assertConsumeBody(t, messages, []byte("1")) + + err := ch.Cancel("integration-tag", false) + if err != nil { + t.Fatalf("error cancelling the consumer: %v", err) + } + + ch.Publish("", queue, false, false, Publishing{Body: []byte("2")}) + + select { + case <-time.After(100 * time.Millisecond): + t.Fatalf("Timeout on Close") + case _, ok := <-messages: + if ok { + t.Fatalf("Extra message on consumer when consumer should have been closed") + } + } + } +} + +func (c *Connection) Generate(r *rand.Rand, _ int) reflect.Value { + urlStr := os.Getenv("AMQP_URL") + if urlStr == "" { + return reflect.ValueOf(nil) + } + + conn, err := Dial(urlStr) + if err != nil { + return reflect.ValueOf(nil) + } + + return reflect.ValueOf(conn) +} + +func (c Publishing) Generate(r *rand.Rand, _ int) reflect.Value { + var ok bool + var t reflect.Value + + p := Publishing{} + //p.DeliveryMode = uint8(r.Intn(3)) + //p.Priority = uint8(r.Intn(8)) + + if r.Intn(2) > 0 { + p.ContentType = "application/octet-stream" + } + + if r.Intn(2) > 0 { + p.ContentEncoding = "gzip" + } + + if r.Intn(2) > 0 { + p.CorrelationId = fmt.Sprintf("%d", r.Int()) + } + + if r.Intn(2) > 0 { + p.ReplyTo = fmt.Sprintf("%d", r.Int()) + } + + if r.Intn(2) > 0 { + p.MessageId = fmt.Sprintf("%d", r.Int()) + } + + if r.Intn(2) > 0 { + p.Type = fmt.Sprintf("%d", r.Int()) + } + + if r.Intn(2) > 0 { + p.AppId = fmt.Sprintf("%d", r.Int()) + } + + if r.Intn(2) > 0 { + p.Timestamp = time.Unix(r.Int63(), r.Int63()) + } + + if t, ok = quick.Value(reflect.TypeOf(p.Body), r); ok { + p.Body = t.Bytes() + } + + return reflect.ValueOf(p) +} + +func TestQuickPublishOnly(t *testing.T) { + if c := integrationConnection(t, "quick"); c != nil { + defer c.Close() + pub, err := c.Channel() + queue := "test-publish" + + if _, err = pub.QueueDeclare(queue, false, true, false, false, nil); err != nil { + t.Errorf("Failed to declare: %s", err) + return + } + + defer pub.QueueDelete(queue, false, false, false) + + quick.Check(func(msg Publishing) bool { + return pub.Publish("", queue, false, false, msg) == nil + }, nil) + } +} + +func TestPublishEmptyBody(t *testing.T) { + c := integrationConnection(t, "empty") + if c != nil { + defer c.Close() + + ch, err := c.Channel() + if err != nil { + t.Errorf("Failed to create channel") + return + } + + queue := "test-TestPublishEmptyBody" + + if _, err := ch.QueueDeclare(queue, false, true, false, false, nil); err != nil { + t.Fatalf("Could not declare") + } + + messages, err := ch.Consume(queue, "", false, false, false, false, nil) + if err != nil { + t.Fatalf("Could not consume") + } + + err = ch.Publish("", queue, false, false, Publishing{}) + if err != nil { + t.Fatalf("Could not publish") + } + + select { + case msg := <-messages: + if len(msg.Body) != 0 { + t.Errorf("Received non empty body") + } + case <-time.After(200 * time.Millisecond): + t.Errorf("Timeout on receive") + } + } +} + +func TestPublishEmptyBodyWithHeadersIssue67(t *testing.T) { + c := integrationConnection(t, "issue67") + if c != nil { + defer c.Close() + + ch, err := c.Channel() + if err != nil { + t.Errorf("Failed to create channel") + return + } + + queue := "test-TestPublishEmptyBodyWithHeaders" + + if _, err := ch.QueueDeclare(queue, false, true, false, false, nil); err != nil { + t.Fatalf("Could not declare") + } + + messages, err := ch.Consume(queue, "", false, false, false, false, nil) + if err != nil { + t.Fatalf("Could not consume") + } + + headers := Table{ + "ham": "spam", + } + + err = ch.Publish("", queue, false, false, Publishing{Headers: headers}) + if err != nil { + t.Fatalf("Could not publish") + } + + select { + case msg := <-messages: + if msg.Headers["ham"] == nil { + t.Fatalf("Headers aren't sent") + } + if msg.Headers["ham"] != "spam" { + t.Fatalf("Headers are wrong") + } + case <-time.After(200 * time.Millisecond): + t.Errorf("Timeout on receive") + } + } +} + +func TestQuickPublishConsumeOnly(t *testing.T) { + c1 := integrationConnection(t, "quick-pub") + c2 := integrationConnection(t, "quick-sub") + + if c1 != nil && c2 != nil { + defer c1.Close() + defer c2.Close() + + pub, err := c1.Channel() + sub, err := c2.Channel() + + queue := "TestPublishConsumeOnly" + + if _, err = pub.QueueDeclare(queue, false, true, false, false, nil); err != nil { + t.Errorf("Failed to declare: %s", err) + return + } + + if _, err = sub.QueueDeclare(queue, false, true, false, false, nil); err != nil { + t.Errorf("Failed to declare: %s", err) + return + } + + defer sub.QueueDelete(queue, false, false, false) + + ch, err := sub.Consume(queue, "", false, false, false, false, nil) + if err != nil { + t.Errorf("Could not sub: %s", err) + } + + quick.CheckEqual( + func(msg Publishing) []byte { + empty := Publishing{Body: msg.Body} + if pub.Publish("", queue, false, false, empty) != nil { + return []byte{'X'} + } + return msg.Body + }, + func(msg Publishing) []byte { + out := <-ch + out.Ack(false) + return out.Body + }, + nil) + } +} + +func TestQuickPublishConsumeBigBody(t *testing.T) { + c1 := integrationConnection(t, "big-pub") + c2 := integrationConnection(t, "big-sub") + + if c1 != nil && c2 != nil { + defer c1.Close() + defer c2.Close() + + pub, err := c1.Channel() + sub, err := c2.Channel() + + queue := "test-pubsub" + + if _, err = sub.QueueDeclare(queue, false, true, false, false, nil); err != nil { + t.Errorf("Failed to declare: %s", err) + return + } + + ch, err := sub.Consume(queue, "", false, false, false, false, nil) + if err != nil { + t.Errorf("Could not sub: %s", err) + } + + fixture := Publishing{ + Body: make([]byte, 1e4+1000), + } + + if _, err = pub.QueueDeclare(queue, false, true, false, false, nil); err != nil { + t.Errorf("Failed to declare: %s", err) + return + } + + err = pub.Publish("", queue, false, false, fixture) + if err != nil { + t.Errorf("Could not publish big body") + } + + select { + case msg := <-ch: + if bytes.Compare(msg.Body, fixture.Body) != 0 { + t.Errorf("Consumed big body didn't match") + } + case <-time.After(200 * time.Millisecond): + t.Errorf("Timeout on receive") + } + } +} + +func TestIntegrationGetOk(t *testing.T) { + if c := integrationConnection(t, "getok"); c != nil { + defer c.Close() + + queue := "test.get-ok" + ch, _ := c.Channel() + + ch.QueueDeclare(queue, false, true, false, false, nil) + ch.Publish("", queue, false, false, Publishing{Body: []byte("ok")}) + + msg, ok, err := ch.Get(queue, false) + + if err != nil { + t.Fatalf("Failed get: %v", err) + } + + if !ok { + t.Fatalf("Get on a queued message did not find the message") + } + + if string(msg.Body) != "ok" { + t.Fatalf("Get did not get the correct message") + } + } +} + +func TestIntegrationGetEmpty(t *testing.T) { + if c := integrationConnection(t, "getok"); c != nil { + defer c.Close() + + queue := "test.get-ok" + ch, _ := c.Channel() + + ch.QueueDeclare(queue, false, true, false, false, nil) + + _, ok, err := ch.Get(queue, false) + + if err != nil { + t.Fatalf("Failed get: %v", err) + } + + if !ok { + t.Fatalf("Get on a queued message retrieved a message when it shouldn't have") + } + } +} + +func TestIntegrationTxCommit(t *testing.T) { + if c := integrationConnection(t, "txcommit"); c != nil { + defer c.Close() + + queue := "test.tx.commit" + ch, _ := c.Channel() + + ch.QueueDeclare(queue, false, true, false, false, nil) + + if err := ch.Tx(); err != nil { + t.Fatalf("tx.select failed") + } + + ch.Publish("", queue, false, false, Publishing{Body: []byte("ok")}) + + if err := ch.TxCommit(); err != nil { + t.Fatalf("tx.commit failed") + } + + msg, ok, err := ch.Get(queue, false) + + if err != nil || !ok { + t.Fatalf("Failed get: %v", err) + } + + if string(msg.Body) != "ok" { + t.Fatalf("Get did not get the correct message from the transaction") + } + } +} + +func TestIntegrationTxRollback(t *testing.T) { + if c := integrationConnection(t, "txrollback"); c != nil { + defer c.Close() + + queue := "test.tx.rollback" + ch, _ := c.Channel() + + ch.QueueDeclare(queue, false, true, false, false, nil) + + if err := ch.Tx(); err != nil { + t.Fatalf("tx.select failed") + } + + ch.Publish("", queue, false, false, Publishing{Body: []byte("ok")}) + + if err := ch.TxRollback(); err != nil { + t.Fatalf("tx.rollback failed") + } + + _, ok, err := ch.Get(queue, false) + + if err != nil { + t.Fatalf("Failed get: %v", err) + } + + if ok { + t.Fatalf("message was published when it should have been rolled back") + } + } +} + +func TestIntegrationReturn(t *testing.T) { + if c, ch := integrationQueue(t, "return"); c != nil { + defer c.Close() + + ret := make(chan Return, 1) + + ch.NotifyReturn(ret) + + // mandatory publish to an exchange without a binding should be returned + ch.Publish("", "return-without-binding", true, false, Publishing{Body: []byte("mandatory")}) + + select { + case res := <-ret: + if string(res.Body) != "mandatory" { + t.Fatalf("expected return of the same message") + } + + if res.ReplyCode != NoRoute { + t.Fatalf("expected no consumers reply code on the Return result, got: %v", res.ReplyCode) + } + + case <-time.After(200 * time.Millisecond): + t.Fatalf("no return was received within 200ms") + } + } +} + +func TestIntegrationCancel(t *testing.T) { + queue := "cancel" + consumerTag := "test.cancel" + + if c, ch := integrationQueue(t, queue); c != nil { + defer c.Close() + + cancels := ch.NotifyCancel(make(chan string, 1)) + + go func() { + if _, err := ch.Consume(queue, consumerTag, false, false, false, false, nil); err != nil { + t.Fatalf("cannot consume from %q to test NotifyCancel: %v", queue, err) + } + if _, err := ch.QueueDelete(queue, false, false, false); err != nil { + t.Fatalf("cannot delete integration queue: %v", err) + } + }() + + select { + case tag := <-cancels: + if want, got := consumerTag, tag; want != got { + t.Fatalf("expected to be notified of deleted queue with consumer tag, got: %q", got) + } + case <-time.After(200 * time.Millisecond): + t.Fatalf("expected to be notified of deleted queue with 200ms") + } + } +} + +func TestIntegrationConfirm(t *testing.T) { + if c, ch := integrationQueue(t, "confirm"); c != nil { + defer c.Close() + + confirms := ch.NotifyPublish(make(chan Confirmation, 1)) + + if err := ch.Confirm(false); err != nil { + t.Fatalf("could not confirm") + } + + ch.Publish("", "confirm", false, false, Publishing{Body: []byte("confirm")}) + + select { + case confirmed := <-confirms: + if confirmed.DeliveryTag != 1 { + t.Fatalf("expected ack starting with delivery tag of 1") + } + case <-time.After(200 * time.Millisecond): + t.Fatalf("no ack was received within 200ms") + } + } +} + +// https://github.com/streadway/amqp/issues/61 +func TestRoundTripAllFieldValueTypes61(t *testing.T) { + if conn := integrationConnection(t, "issue61"); conn != nil { + defer conn.Close() + timestamp := time.Unix(100000000, 0) + + headers := Table{ + "A": []interface{}{ + []interface{}{"nested array", int32(3)}, + Decimal{2, 1}, + Table{"S": "nested table in array"}, + int32(2 << 20), + string("array string"), + timestamp, + nil, + byte(2), + float64(2.64), + float32(2.32), + int64(2 << 60), + int16(2 << 10), + bool(true), + []byte{'b', '2'}, + }, + "D": Decimal{1, 1}, + "F": Table{"S": "nested table in table"}, + "I": int32(1 << 20), + "S": string("string"), + "T": timestamp, + "V": nil, + "b": byte(1), + "d": float64(1.64), + "f": float32(1.32), + "l": int64(1 << 60), + "s": int16(1 << 10), + "t": bool(true), + "x": []byte{'b', '1'}, + } + + queue := "test.issue61-roundtrip" + ch, _ := conn.Channel() + + if _, err := ch.QueueDeclare(queue, false, true, false, false, nil); err != nil { + t.Fatalf("Could not declare") + } + + msgs, err := ch.Consume(queue, "", false, false, false, false, nil) + if err != nil { + t.Fatalf("Could not consume") + } + + err = ch.Publish("", queue, false, false, Publishing{Body: []byte("ignored"), Headers: headers}) + if err != nil { + t.Fatalf("Could not publish: %v", err) + } + + msg, ok := <-msgs + + if !ok { + t.Fatalf("Channel closed prematurely likely due to publish exception") + } + + for k, v := range headers { + if !reflect.DeepEqual(v, msg.Headers[k]) { + t.Errorf("Round trip header not the same for key %q: expected: %#v, got %#v", k, v, msg.Headers[k]) + } + } + } +} + +// Declares a queue with the x-message-ttl extension to exercise integer +// serialization. +// +// Relates to https://github.com/streadway/amqp/issues/60 +// +func TestDeclareArgsXMessageTTL(t *testing.T) { + if conn := integrationConnection(t, "declareTTL"); conn != nil { + defer conn.Close() + + ch, _ := conn.Channel() + args := Table{"x-message-ttl": int32(9000000)} + + // should not drop the connection + if _, err := ch.QueueDeclare("declareWithTTL", false, true, false, false, args); err != nil { + t.Fatalf("cannot declare with TTL: got: %v", err) + } + } +} + +// Sets up the topology where rejected messages will be forwarded +// to a fanout exchange, with a single queue bound. +// +// Relates to https://github.com/streadway/amqp/issues/56 +// +func TestDeclareArgsRejectToDeadLetterQueue(t *testing.T) { + if conn := integrationConnection(t, "declareArgs"); conn != nil { + defer conn.Close() + + ex, q := "declareArgs", "declareArgs-deliveries" + dlex, dlq := ex+"-dead-letter", q+"-dead-letter" + + ch, _ := conn.Channel() + + if err := ch.ExchangeDeclare(ex, "fanout", false, true, false, false, nil); err != nil { + t.Fatalf("cannot declare %v: got: %v", ex, err) + } + + if err := ch.ExchangeDeclare(dlex, "fanout", false, true, false, false, nil); err != nil { + t.Fatalf("cannot declare %v: got: %v", dlex, err) + } + + if _, err := ch.QueueDeclare(dlq, false, true, false, false, nil); err != nil { + t.Fatalf("cannot declare %v: got: %v", dlq, err) + } + + if err := ch.QueueBind(dlq, "#", dlex, false, nil); err != nil { + t.Fatalf("cannot bind %v to %v: got: %v", dlq, dlex, err) + } + + if _, err := ch.QueueDeclare(q, false, true, false, false, Table{ + "x-dead-letter-exchange": dlex, + }); err != nil { + t.Fatalf("cannot declare %v with dlq %v: got: %v", q, dlex, err) + } + + if err := ch.QueueBind(q, "#", ex, false, nil); err != nil { + t.Fatalf("cannot bind %v: got: %v", ex, err) + } + + fails, err := ch.Consume(q, "", false, false, false, false, nil) + if err != nil { + t.Fatalf("cannot consume %v: got: %v", q, err) + } + + // Reject everything consumed + go func() { + for d := range fails { + d.Reject(false) + } + }() + + // Publish the 'poison' + if err := ch.Publish(ex, q, true, false, Publishing{Body: []byte("ignored")}); err != nil { + t.Fatalf("publishing failed") + } + + // spin-get until message arrives on the dead-letter queue with a + // synchronous parse to exercise the array field (x-death) set by the + // server relating to issue-56 + for i := 0; i < 10; i++ { + d, got, err := ch.Get(dlq, false) + if !got && err == nil { + continue + } else if err != nil { + t.Fatalf("expected success in parsing reject, got: %v", err) + } else { + // pass if we've parsed an array + if v, ok := d.Headers["x-death"]; ok { + if _, ok := v.([]interface{}); ok { + return + } + } + t.Fatalf("array field x-death expected in the headers, got: %v (%T)", d.Headers, d.Headers["x-death"]) + } + } + + t.Fatalf("expectd dead-letter after 10 get attempts") + } +} + +// https://github.com/streadway/amqp/issues/48 +func TestDeadlockConsumerIssue48(t *testing.T) { + if conn := integrationConnection(t, "issue48"); conn != nil { + defer conn.Close() + + deadline := make(chan bool) + go func() { + select { + case <-time.After(5 * time.Second): + panic("expected to receive 2 deliveries while in an RPC, got a deadlock") + case <-deadline: + // pass + } + }() + + ch, err := conn.Channel() + if err != nil { + t.Fatalf("got error on channel.open: %v", err) + } + + queue := "test-issue48" + + if _, err := ch.QueueDeclare(queue, false, true, false, false, nil); err != nil { + t.Fatalf("expected to declare a queue: %v", err) + } + + if err := ch.Confirm(false); err != nil { + t.Fatalf("got error on confirm: %v", err) + } + + confirms := ch.NotifyPublish(make(chan Confirmation, 2)) + + for i := 0; i < cap(confirms); i++ { + // Fill the queue with some new or remaining publishings + ch.Publish("", queue, false, false, Publishing{Body: []byte("")}) + } + + for i := 0; i < cap(confirms); i++ { + // Wait for them to land on the queue so they'll be delivered on consume + <-confirms + } + + // Consuming should send them all on the wire + msgs, err := ch.Consume(queue, "", false, false, false, false, nil) + if err != nil { + t.Fatalf("got error on consume: %v", err) + } + + // We pop one off the chan, the other is on the wire + <-msgs + + // Opening a new channel (any RPC) while another delivery is on the wire + if _, err := conn.Channel(); err != nil { + t.Fatalf("got error on consume: %v", err) + } + + // We pop the next off the chan + <-msgs + + deadline <- true + } +} + +// https://github.com/streadway/amqp/issues/46 +func TestRepeatedChannelExceptionWithPublishAndMaxProcsIssue46(t *testing.T) { + conn := integrationConnection(t, "issue46") + if conn != nil { + for i := 0; i < 100; i++ { + ch, err := conn.Channel() + if err != nil { + t.Fatalf("expected error only on publish, got error on channel.open: %v", err) + } + + for j := 0; j < 10; j++ { + err = ch.Publish("not-existing-exchange", "some-key", false, false, Publishing{Body: []byte("some-data")}) + if err, ok := err.(Error); ok { + if err.Code != 504 { + t.Fatalf("expected channel only exception, got: %v", err) + } + } + } + } + } +} + +// https://github.com/streadway/amqp/issues/43 +func TestChannelExceptionWithCloseIssue43(t *testing.T) { + conn := integrationConnection(t, "issue43") + if conn != nil { + go func() { + for err := range conn.NotifyClose(make(chan *Error)) { + t.Log(err.Error()) + } + }() + + c1, err := conn.Channel() + if err != nil { + panic(err) + } + + go func() { + for err := range c1.NotifyClose(make(chan *Error)) { + t.Log("Channel1 Close: " + err.Error()) + } + }() + + c2, err := conn.Channel() + if err != nil { + panic(err) + } + + go func() { + for err := range c2.NotifyClose(make(chan *Error)) { + t.Log("Channel2 Close: " + err.Error()) + } + }() + + // Cause an asynchronous channel exception causing the server + // to send a "channel.close" method either before or after the next + // asynchronous method. + err = c1.Publish("nonexisting-exchange", "", false, false, Publishing{}) + if err != nil { + panic(err) + } + + // Receive or send the channel close method, the channel shuts down + // but this expects a channel.close-ok to be received. + c1.Close() + + // This ensures that the 2nd channel is unaffected by the channel exception + // on channel 1. + err = c2.ExchangeDeclare("test-channel-still-exists", "direct", false, true, false, false, nil) + if err != nil { + panic(err) + } + } +} + +// https://github.com/streadway/amqp/issues/7 +func TestCorruptedMessageIssue7(t *testing.T) { + messageCount := 1024 + + c1 := integrationConnection(t, "") + c2 := integrationConnection(t, "") + + if c1 != nil && c2 != nil { + defer c1.Close() + defer c2.Close() + + pub, err := c1.Channel() + if err != nil { + t.Fatalf("Cannot create Channel") + } + + sub, err := c2.Channel() + if err != nil { + t.Fatalf("Cannot create Channel") + } + + queue := "test-corrupted-message-regression" + + if _, err := pub.QueueDeclare(queue, false, true, false, false, nil); err != nil { + t.Fatalf("Cannot declare") + } + + if _, err := sub.QueueDeclare(queue, false, true, false, false, nil); err != nil { + t.Fatalf("Cannot declare") + } + + msgs, err := sub.Consume(queue, "", false, false, false, false, nil) + if err != nil { + t.Fatalf("Cannot consume") + } + + for i := 0; i < messageCount; i++ { + err := pub.Publish("", queue, false, false, Publishing{ + Body: generateCrc32Random(7 * i), + }) + + if err != nil { + t.Fatalf("Failed to publish") + } + } + + for i := 0; i < messageCount; i++ { + select { + case msg := <-msgs: + assertMessageCrc32(t, msg.Body, fmt.Sprintf("missed match at %d", i)) + case <-time.After(200 * time.Millisecond): + t.Fatalf("Timeout on recv") + } + } + } +} + +// https://github.com/streadway/amqp/issues/136 +func TestChannelCounterShouldNotPanicIssue136(t *testing.T) { + if c := integrationConnection(t, "issue136"); c != nil { + defer c.Close() + var wg sync.WaitGroup + + // exceeds 65535 channels + for i := 0; i < 8; i++ { + wg.Add(1) + go func(i int) { + for j := 0; j < 10000; j++ { + ch, err := c.Channel() + if err != nil { + t.Fatalf("failed to create channel %d:%d, got: %v", i, j, err) + } + if err := ch.Close(); err != nil { + t.Fatalf("failed to close channel %d:%d, got: %v", i, j, err) + } + } + wg.Done() + }(i) + } + wg.Wait() + } +} + +func TestExchangeDeclarePrecondition(t *testing.T) { + c1 := integrationConnection(t, "exchange-double-declare") + c2 := integrationConnection(t, "exchange-double-declare-cleanup") + if c1 != nil && c2 != nil { + defer c1.Close() + defer c2.Close() + + ch, err := c1.Channel() + if err != nil { + t.Fatalf("Create channel") + } + + exchange := "test-mismatched-redeclare" + + err = ch.ExchangeDeclare( + exchange, + "direct", // exchangeType + false, // durable + true, // auto-delete + false, // internal + false, // noWait + nil, // arguments + ) + if err != nil { + t.Fatalf("Could not initially declare exchange") + } + + err = ch.ExchangeDeclare( + exchange, + "direct", + true, // different durability + true, + false, + false, + nil, + ) + + if err == nil { + t.Fatalf("Expected to fail a redeclare with different durability, didn't receive an error") + } + + if err, ok := err.(Error); ok { + if err.Code != PreconditionFailed { + t.Fatalf("Expected precondition error") + } + if !err.Recover { + t.Fatalf("Expected to be able to recover") + } + } + + ch2, _ := c2.Channel() + if err = ch2.ExchangeDelete(exchange, false, false); err != nil { + t.Fatalf("Could not delete exchange: %v", err) + } + } +} + +func TestRabbitMQQueueTTLGet(t *testing.T) { + if c := integrationRabbitMQ(t, "ttl"); c != nil { + defer c.Close() + + queue := "test.rabbitmq-message-ttl" + channel, err := c.Channel() + if err != nil { + t.Fatalf("channel: %v", err) + } + + if _, err = channel.QueueDeclare( + queue, + false, + true, + false, + false, + Table{"x-message-ttl": int32(100)}, // in ms + ); err != nil { + t.Fatalf("queue declare: %s", err) + } + + channel.Publish("", queue, false, false, Publishing{Body: []byte("ttl")}) + + time.Sleep(200 * time.Millisecond) + + _, ok, err := channel.Get(queue, false) + + if ok { + t.Fatalf("Expected the message to expire in 100ms, it didn't expire after 200ms") + } + + if err != nil { + t.Fatalf("Failed to get on ttl queue") + } + } +} + +func TestRabbitMQQueueNackMultipleRequeue(t *testing.T) { + if c := integrationRabbitMQ(t, "nack"); c != nil { + defer c.Close() + + if c.isCapable("basic.nack") { + queue := "test.rabbitmq-basic-nack" + channel, err := c.Channel() + if err != nil { + t.Fatalf("channel: %v", err) + } + + if _, err = channel.QueueDeclare(queue, false, true, false, false, nil); err != nil { + t.Fatalf("queue declare: %s", err) + } + + channel.Publish("", queue, false, false, Publishing{Body: []byte("1")}) + channel.Publish("", queue, false, false, Publishing{Body: []byte("2")}) + + m1, ok, err := channel.Get(queue, false) + if !ok || err != nil || m1.Body[0] != '1' { + t.Fatalf("could not get message %v", m1) + } + + m2, ok, err := channel.Get(queue, false) + if !ok || err != nil || m2.Body[0] != '2' { + t.Fatalf("could not get message %v", m2) + } + + m2.Nack(true, true) + + m1, ok, err = channel.Get(queue, false) + if !ok || err != nil || m1.Body[0] != '1' { + t.Fatalf("could not get message %v", m1) + } + + m2, ok, err = channel.Get(queue, false) + if !ok || err != nil || m2.Body[0] != '2' { + t.Fatalf("could not get message %v", m2) + } + } + } +} + +func TestConsumerCancelNotification(t *testing.T) { + c := integrationConnection(t, "consumer cancel notification") + if c != nil { + defer c.Close() + ch, err := c.Channel() + if err != nil { + t.Fatalf("got error on channel.open: %v", err) + } + + queue := "test-consumer-cancel-notification" + + if _, err := ch.QueueDeclare(queue, false, true, false, false, nil); err != nil { + t.Fatalf("expected to declare a queue: %v", err) + } + + if _, err := ch.Consume(queue, "", false, false, false, false, nil); err != nil { + t.Fatalf("basic.consume failed") + } + // consumer cancel notification channel + ccnChan := make(chan string, 1) + ch.NotifyCancel(ccnChan) + + if _, err := ch.QueueDelete(queue, false, false, true); err != nil { + t.Fatalf("queue.delete failed: %s", err) + } + + select { + case <-ccnChan: + // do nothing + case <-time.After(time.Second * 10): + t.Errorf("basic.cancel wasn't received") + t.Fail() + } + // we don't close ccnChan because channel shutdown + // does it + } +} + +func TestConcurrentChannelAndConnectionClose(t *testing.T) { + c := integrationConnection(t, "concurrent channel and connection test") + if c != nil { + ch, err := c.Channel() + if err != nil { + t.Fatalf("got error on channel.open: %v", err) + } + + var wg sync.WaitGroup + wg.Add(2) + + starter := make(chan struct{}) + go func() { + defer wg.Done() + <-starter + c.Close() + }() + + go func() { + defer wg.Done() + <-starter + ch.Close() + }() + close(starter) + wg.Wait() + } +} + +/* + * Support for integration tests + */ + +func integrationURLFromEnv() string { + url := os.Getenv("AMQP_URL") + if url == "" { + url = "amqp://" + } + return url +} + +func loggedConnection(t *testing.T, conn *Connection, name string) *Connection { + if name != "" { + conn.conn = &logIO{t, name, conn.conn} + } + return conn +} + +// Returns a connection to the AMQP if the AMQP_URL environment +// variable is set and a connection can be established. +func integrationConnection(t *testing.T, name string) *Connection { + conn, err := Dial(integrationURLFromEnv()) + if err != nil { + t.Errorf("dial integration server: %s", err) + return nil + } + return loggedConnection(t, conn, name) +} + +// Returns a connection, channel and declares a queue when the AMQP_URL is in the environment +func integrationQueue(t *testing.T, name string) (*Connection, *Channel) { + if conn := integrationConnection(t, name); conn != nil { + if channel, err := conn.Channel(); err == nil { + if _, err = channel.QueueDeclare(name, false, true, false, false, nil); err == nil { + return conn, channel + } + } + } + return nil, nil +} + +// Delegates to integrationConnection and only returns a connection if the +// product is RabbitMQ +func integrationRabbitMQ(t *testing.T, name string) *Connection { + if conn := integrationConnection(t, "connect"); conn != nil { + if server, ok := conn.Properties["product"]; ok && server == "RabbitMQ" { + return conn + } + } + + return nil +} + +func assertConsumeBody(t *testing.T, messages <-chan Delivery, want []byte) (msg *Delivery) { + select { + case got := <-messages: + if bytes.Compare(want, got.Body) != 0 { + t.Fatalf("Message body does not match want: %v, got: %v, for: %+v", want, got.Body, got) + } + msg = &got + case <-time.After(200 * time.Millisecond): + t.Fatalf("Timeout waiting for %v", want) + } + + return msg +} + +// Pulls out the CRC and verifies the remaining content against the CRC +func assertMessageCrc32(t *testing.T, msg []byte, assert string) { + size := binary.BigEndian.Uint32(msg[:4]) + + crc := crc32.NewIEEE() + crc.Write(msg[8:]) + + if binary.BigEndian.Uint32(msg[4:8]) != crc.Sum32() { + t.Fatalf("Message does not match CRC: %s", assert) + } + + if int(size) != len(msg)-8 { + t.Fatalf("Message does not match size, should=%d, is=%d: %s", size, len(msg)-8, assert) + } +} + +// Creates a random body size with a leading 32-bit CRC in network byte order +// that verifies the remaining slice +func generateCrc32Random(size int) []byte { + msg := make([]byte, size+8) + if _, err := io.ReadFull(devrand.Reader, msg); err != nil { + panic(err) + } + + crc := crc32.NewIEEE() + crc.Write(msg[8:]) + + binary.BigEndian.PutUint32(msg[0:4], uint32(size)) + binary.BigEndian.PutUint32(msg[4:8], crc.Sum32()) + + return msg +} diff --git a/vendor/github.com/streadway/amqp/pre-commit b/vendor/github.com/streadway/amqp/pre-commit new file mode 100755 index 0000000..3715530 --- /dev/null +++ b/vendor/github.com/streadway/amqp/pre-commit @@ -0,0 +1,67 @@ +#!/bin/sh + +LATEST_STABLE_SUPPORTED_GO_VERSION="1.11" + +main() { + if local_go_version_is_latest_stable + then + run_gofmt + run_golint + run_govet + fi + run_unit_tests +} + +local_go_version_is_latest_stable() { + go version | grep -q $LATEST_STABLE_SUPPORTED_GO_VERSION +} + +log_error() { + echo "$*" 1>&2 +} + +run_gofmt() { + GOFMT_FILES=$(gofmt -l .) + if [ -n "$GOFMT_FILES" ] + then + log_error "gofmt failed for the following files: +$GOFMT_FILES + +please run 'gofmt -w .' on your changes before committing." + exit 1 + fi +} + +run_golint() { + GOLINT_ERRORS=$(golint ./... | grep -v "Id should be") + if [ -n "$GOLINT_ERRORS" ] + then + log_error "golint failed for the following reasons: +$GOLINT_ERRORS + +please run 'golint ./...' on your changes before committing." + exit 1 + fi +} + +run_govet() { + GOVET_ERRORS=$(go tool vet ./*.go 2>&1) + if [ -n "$GOVET_ERRORS" ] + then + log_error "go vet failed for the following reasons: +$GOVET_ERRORS + +please run 'go tool vet ./*.go' on your changes before committing." + exit 1 + fi +} + +run_unit_tests() { + if [ -z "$NOTEST" ] + then + log_error 'Running short tests...' + env AMQP_URL= go test -short + fi +} + +main diff --git a/vendor/github.com/streadway/amqp/read.go b/vendor/github.com/streadway/amqp/read.go new file mode 100644 index 0000000..3aa0b33 --- /dev/null +++ b/vendor/github.com/streadway/amqp/read.go @@ -0,0 +1,456 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +package amqp + +import ( + "bytes" + "encoding/binary" + "errors" + "io" + "time" +) + +/* +Reads a frame from an input stream and returns an interface that can be cast into +one of the following: + + methodFrame + PropertiesFrame + bodyFrame + heartbeatFrame + +2.3.5 frame Details + +All frames consist of a header (7 octets), a payload of arbitrary size, and a +'frame-end' octet that detects malformed frames: + + 0 1 3 7 size+7 size+8 + +------+---------+-------------+ +------------+ +-----------+ + | type | channel | size | | payload | | frame-end | + +------+---------+-------------+ +------------+ +-----------+ + octet short long size octets octet + +To read a frame, we: + 1. Read the header and check the frame type and channel. + 2. Depending on the frame type, we read the payload and process it. + 3. Read the frame end octet. + +In realistic implementations where performance is a concern, we would use +“read-ahead buffering” or + +“gathering reads” to avoid doing three separate system calls to read a frame. +*/ +func (r *reader) ReadFrame() (frame frame, err error) { + var scratch [7]byte + + if _, err = io.ReadFull(r.r, scratch[:7]); err != nil { + return + } + + typ := uint8(scratch[0]) + channel := binary.BigEndian.Uint16(scratch[1:3]) + size := binary.BigEndian.Uint32(scratch[3:7]) + + switch typ { + case frameMethod: + if frame, err = r.parseMethodFrame(channel, size); err != nil { + return + } + + case frameHeader: + if frame, err = r.parseHeaderFrame(channel, size); err != nil { + return + } + + case frameBody: + if frame, err = r.parseBodyFrame(channel, size); err != nil { + return nil, err + } + + case frameHeartbeat: + if frame, err = r.parseHeartbeatFrame(channel, size); err != nil { + return + } + + default: + return nil, ErrFrame + } + + if _, err = io.ReadFull(r.r, scratch[:1]); err != nil { + return nil, err + } + + if scratch[0] != frameEnd { + return nil, ErrFrame + } + + return +} + +func readShortstr(r io.Reader) (v string, err error) { + var length uint8 + if err = binary.Read(r, binary.BigEndian, &length); err != nil { + return + } + + bytes := make([]byte, length) + if _, err = io.ReadFull(r, bytes); err != nil { + return + } + return string(bytes), nil +} + +func readLongstr(r io.Reader) (v string, err error) { + var length uint32 + if err = binary.Read(r, binary.BigEndian, &length); err != nil { + return + } + + // slices can't be longer than max int32 value + if length > (^uint32(0) >> 1) { + return + } + + bytes := make([]byte, length) + if _, err = io.ReadFull(r, bytes); err != nil { + return + } + return string(bytes), nil +} + +func readDecimal(r io.Reader) (v Decimal, err error) { + if err = binary.Read(r, binary.BigEndian, &v.Scale); err != nil { + return + } + if err = binary.Read(r, binary.BigEndian, &v.Value); err != nil { + return + } + return +} + +func readFloat32(r io.Reader) (v float32, err error) { + if err = binary.Read(r, binary.BigEndian, &v); err != nil { + return + } + return +} + +func readFloat64(r io.Reader) (v float64, err error) { + if err = binary.Read(r, binary.BigEndian, &v); err != nil { + return + } + return +} + +func readTimestamp(r io.Reader) (v time.Time, err error) { + var sec int64 + if err = binary.Read(r, binary.BigEndian, &sec); err != nil { + return + } + return time.Unix(sec, 0), nil +} + +/* +'A': []interface{} +'D': Decimal +'F': Table +'I': int32 +'S': string +'T': time.Time +'V': nil +'b': byte +'d': float64 +'f': float32 +'l': int64 +'s': int16 +'t': bool +'x': []byte +*/ +func readField(r io.Reader) (v interface{}, err error) { + var typ byte + if err = binary.Read(r, binary.BigEndian, &typ); err != nil { + return + } + + switch typ { + case 't': + var value uint8 + if err = binary.Read(r, binary.BigEndian, &value); err != nil { + return + } + return (value != 0), nil + + case 'b': + var value [1]byte + if _, err = io.ReadFull(r, value[0:1]); err != nil { + return + } + return value[0], nil + + case 's': + var value int16 + if err = binary.Read(r, binary.BigEndian, &value); err != nil { + return + } + return value, nil + + case 'I': + var value int32 + if err = binary.Read(r, binary.BigEndian, &value); err != nil { + return + } + return value, nil + + case 'l': + var value int64 + if err = binary.Read(r, binary.BigEndian, &value); err != nil { + return + } + return value, nil + + case 'f': + var value float32 + if err = binary.Read(r, binary.BigEndian, &value); err != nil { + return + } + return value, nil + + case 'd': + var value float64 + if err = binary.Read(r, binary.BigEndian, &value); err != nil { + return + } + return value, nil + + case 'D': + return readDecimal(r) + + case 'S': + return readLongstr(r) + + case 'A': + return readArray(r) + + case 'T': + return readTimestamp(r) + + case 'F': + return readTable(r) + + case 'x': + var len int32 + if err = binary.Read(r, binary.BigEndian, &len); err != nil { + return nil, err + } + + value := make([]byte, len) + if _, err = io.ReadFull(r, value); err != nil { + return nil, err + } + return value, err + + case 'V': + return nil, nil + } + + return nil, ErrSyntax +} + +/* + Field tables are long strings that contain packed name-value pairs. The + name-value pairs are encoded as short string defining the name, and octet + defining the values type and then the value itself. The valid field types for + tables are an extension of the native integer, bit, string, and timestamp + types, and are shown in the grammar. Multi-octet integer fields are always + held in network byte order. +*/ +func readTable(r io.Reader) (table Table, err error) { + var nested bytes.Buffer + var str string + + if str, err = readLongstr(r); err != nil { + return + } + + nested.Write([]byte(str)) + + table = make(Table) + + for nested.Len() > 0 { + var key string + var value interface{} + + if key, err = readShortstr(&nested); err != nil { + return + } + + if value, err = readField(&nested); err != nil { + return + } + + table[key] = value + } + + return +} + +func readArray(r io.Reader) ([]interface{}, error) { + var ( + size uint32 + err error + ) + + if err = binary.Read(r, binary.BigEndian, &size); err != nil { + return nil, err + } + + var ( + lim = &io.LimitedReader{R: r, N: int64(size)} + arr = []interface{}{} + field interface{} + ) + + for { + if field, err = readField(lim); err != nil { + if err == io.EOF { + break + } + return nil, err + } + arr = append(arr, field) + } + + return arr, nil +} + +// Checks if this bit mask matches the flags bitset +func hasProperty(mask uint16, prop int) bool { + return int(mask)&prop > 0 +} + +func (r *reader) parseHeaderFrame(channel uint16, size uint32) (frame frame, err error) { + hf := &headerFrame{ + ChannelId: channel, + } + + if err = binary.Read(r.r, binary.BigEndian, &hf.ClassId); err != nil { + return + } + + if err = binary.Read(r.r, binary.BigEndian, &hf.weight); err != nil { + return + } + + if err = binary.Read(r.r, binary.BigEndian, &hf.Size); err != nil { + return + } + + var flags uint16 + + if err = binary.Read(r.r, binary.BigEndian, &flags); err != nil { + return + } + + if hasProperty(flags, flagContentType) { + if hf.Properties.ContentType, err = readShortstr(r.r); err != nil { + return + } + } + if hasProperty(flags, flagContentEncoding) { + if hf.Properties.ContentEncoding, err = readShortstr(r.r); err != nil { + return + } + } + if hasProperty(flags, flagHeaders) { + if hf.Properties.Headers, err = readTable(r.r); err != nil { + return + } + } + if hasProperty(flags, flagDeliveryMode) { + if err = binary.Read(r.r, binary.BigEndian, &hf.Properties.DeliveryMode); err != nil { + return + } + } + if hasProperty(flags, flagPriority) { + if err = binary.Read(r.r, binary.BigEndian, &hf.Properties.Priority); err != nil { + return + } + } + if hasProperty(flags, flagCorrelationId) { + if hf.Properties.CorrelationId, err = readShortstr(r.r); err != nil { + return + } + } + if hasProperty(flags, flagReplyTo) { + if hf.Properties.ReplyTo, err = readShortstr(r.r); err != nil { + return + } + } + if hasProperty(flags, flagExpiration) { + if hf.Properties.Expiration, err = readShortstr(r.r); err != nil { + return + } + } + if hasProperty(flags, flagMessageId) { + if hf.Properties.MessageId, err = readShortstr(r.r); err != nil { + return + } + } + if hasProperty(flags, flagTimestamp) { + if hf.Properties.Timestamp, err = readTimestamp(r.r); err != nil { + return + } + } + if hasProperty(flags, flagType) { + if hf.Properties.Type, err = readShortstr(r.r); err != nil { + return + } + } + if hasProperty(flags, flagUserId) { + if hf.Properties.UserId, err = readShortstr(r.r); err != nil { + return + } + } + if hasProperty(flags, flagAppId) { + if hf.Properties.AppId, err = readShortstr(r.r); err != nil { + return + } + } + if hasProperty(flags, flagReserved1) { + if hf.Properties.reserved1, err = readShortstr(r.r); err != nil { + return + } + } + + return hf, nil +} + +func (r *reader) parseBodyFrame(channel uint16, size uint32) (frame frame, err error) { + bf := &bodyFrame{ + ChannelId: channel, + Body: make([]byte, size), + } + + if _, err = io.ReadFull(r.r, bf.Body); err != nil { + return nil, err + } + + return bf, nil +} + +var errHeartbeatPayload = errors.New("Heartbeats should not have a payload") + +func (r *reader) parseHeartbeatFrame(channel uint16, size uint32) (frame frame, err error) { + hf := &heartbeatFrame{ + ChannelId: channel, + } + + if size > 0 { + return nil, errHeartbeatPayload + } + + return hf, nil +} diff --git a/vendor/github.com/streadway/amqp/read_test.go b/vendor/github.com/streadway/amqp/read_test.go new file mode 100644 index 0000000..143e262 --- /dev/null +++ b/vendor/github.com/streadway/amqp/read_test.go @@ -0,0 +1,26 @@ +package amqp + +import ( + "strings" + "testing" +) + +func TestGoFuzzCrashers(t *testing.T) { + if testing.Short() { + t.Skip("excessive allocation") + } + + testData := []string{ + "\b000000", + "\x02\x16\x10�[��\t\xbdui�" + "\x10\x01\x00\xff\xbf\xef\xbfサn\x99\x00\x10r", + "\x0300\x00\x00\x00\x040000", + } + + for idx, testStr := range testData { + r := reader{strings.NewReader(testStr)} + frame, err := r.ReadFrame() + if err != nil && frame != nil { + t.Errorf("%d. frame is not nil: %#v err = %v", idx, frame, err) + } + } +} diff --git a/vendor/github.com/streadway/amqp/reconnect_test.go b/vendor/github.com/streadway/amqp/reconnect_test.go new file mode 100644 index 0000000..5a06cb7 --- /dev/null +++ b/vendor/github.com/streadway/amqp/reconnect_test.go @@ -0,0 +1,113 @@ +package amqp_test + +import ( + "fmt" + "github.com/streadway/amqp" + "os" +) + +// Every connection should declare the topology they expect +func setup(url, queue string) (*amqp.Connection, *amqp.Channel, error) { + conn, err := amqp.Dial(url) + if err != nil { + return nil, nil, err + } + + ch, err := conn.Channel() + if err != nil { + return nil, nil, err + } + + if _, err := ch.QueueDeclare(queue, false, true, false, false, nil); err != nil { + return nil, nil, err + } + + return conn, ch, nil +} + +func consume(url, queue string) (*amqp.Connection, <-chan amqp.Delivery, error) { + conn, ch, err := setup(url, queue) + if err != nil { + return nil, nil, err + } + + // Indicate we only want 1 message to acknowledge at a time. + if err := ch.Qos(1, 0, false); err != nil { + return nil, nil, err + } + + // Exclusive consumer + deliveries, err := ch.Consume(queue, "", false, true, false, false, nil) + + return conn, deliveries, err +} + +func ExampleConnection_reconnect() { + if url := os.Getenv("AMQP_URL"); url != "" { + queue := "example.reconnect" + + // The connection/channel for publishing to interleave the ingress messages + // between reconnects, shares the same topology as the consumer. If we rather + // sent all messages up front, the first consumer would receive every message. + // We would rather show how the messages are not lost between reconnects. + _, pub, err := setup(url, queue) + if err != nil { + fmt.Println("err publisher setup:", err) + return + } + + // Purge the queue from the publisher side to establish initial state + if _, err := pub.QueuePurge(queue, false); err != nil { + fmt.Println("err purge:", err) + return + } + + // Reconnect simulation, should be for { ... } in production + for i := 1; i <= 3; i++ { + fmt.Println("connect") + + conn, deliveries, err := consume(url, queue) + if err != nil { + fmt.Println("err consume:", err) + return + } + + // Simulate a producer on a different connection showing that consumers + // continue where they were left off after each reconnect. + if err := pub.Publish("", queue, false, false, amqp.Publishing{ + Body: []byte(fmt.Sprintf("%d", i)), + }); err != nil { + fmt.Println("err publish:", err) + return + } + + // Simulates a consumer that when the range finishes, will setup a new + // session and begin ranging over the deliveries again. + for msg := range deliveries { + fmt.Println(string(msg.Body)) + msg.Ack(false) + + // Simulate an error like a server restart, loss of route or operator + // intervention that results in the connection terminating + go conn.Close() + } + } + } else { + // pass with expected output when not running in an integration + // environment. + fmt.Println("connect") + fmt.Println("1") + fmt.Println("connect") + fmt.Println("2") + fmt.Println("connect") + fmt.Println("3") + } + + // Output: + // connect + // 1 + // connect + // 2 + // connect + // 3 +} diff --git a/vendor/github.com/streadway/amqp/return.go b/vendor/github.com/streadway/amqp/return.go new file mode 100644 index 0000000..10dcedb --- /dev/null +++ b/vendor/github.com/streadway/amqp/return.go @@ -0,0 +1,64 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +package amqp + +import ( + "time" +) + +// Return captures a flattened struct of fields returned by the server when a +// Publishing is unable to be delivered either due to the `mandatory` flag set +// and no route found, or `immediate` flag set and no free consumer. +type Return struct { + ReplyCode uint16 // reason + ReplyText string // description + Exchange string // basic.publish exchange + RoutingKey string // basic.publish routing key + + // Properties + ContentType string // MIME content type + ContentEncoding string // MIME content encoding + Headers Table // Application or header exchange table + DeliveryMode uint8 // queue implementation use - non-persistent (1) or persistent (2) + Priority uint8 // queue implementation use - 0 to 9 + CorrelationId string // application use - correlation identifier + ReplyTo string // application use - address to to reply to (ex: RPC) + Expiration string // implementation use - message expiration spec + MessageId string // application use - message identifier + Timestamp time.Time // application use - message timestamp + Type string // application use - message type name + UserId string // application use - creating user id + AppId string // application use - creating application + + Body []byte +} + +func newReturn(msg basicReturn) *Return { + props, body := msg.getContent() + + return &Return{ + ReplyCode: msg.ReplyCode, + ReplyText: msg.ReplyText, + Exchange: msg.Exchange, + RoutingKey: msg.RoutingKey, + + Headers: props.Headers, + ContentType: props.ContentType, + ContentEncoding: props.ContentEncoding, + DeliveryMode: props.DeliveryMode, + Priority: props.Priority, + CorrelationId: props.CorrelationId, + ReplyTo: props.ReplyTo, + Expiration: props.Expiration, + MessageId: props.MessageId, + Timestamp: props.Timestamp, + Type: props.Type, + UserId: props.UserId, + AppId: props.AppId, + + Body: body, + } +} diff --git a/vendor/github.com/streadway/amqp/shared_test.go b/vendor/github.com/streadway/amqp/shared_test.go new file mode 100644 index 0000000..669a50a --- /dev/null +++ b/vendor/github.com/streadway/amqp/shared_test.go @@ -0,0 +1,71 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +package amqp + +import ( + "encoding/hex" + "io" + "testing" +) + +type pipe struct { + r *io.PipeReader + w *io.PipeWriter +} + +func (p pipe) Read(b []byte) (int, error) { + return p.r.Read(b) +} + +func (p pipe) Write(b []byte) (int, error) { + return p.w.Write(b) +} + +func (p pipe) Close() error { + p.r.Close() + p.w.Close() + return nil +} + +type logIO struct { + t *testing.T + prefix string + proxy io.ReadWriteCloser +} + +func (log *logIO) Read(p []byte) (n int, err error) { + log.t.Logf("%s reading %d\n", log.prefix, len(p)) + n, err = log.proxy.Read(p) + if err != nil { + log.t.Logf("%s read %x: %v\n", log.prefix, p[0:n], err) + } else { + log.t.Logf("%s read:\n%s\n", log.prefix, hex.Dump(p[0:n])) + //fmt.Printf("%s read:\n%s\n", log.prefix, hex.Dump(p[0:n])) + } + return +} + +func (log *logIO) Write(p []byte) (n int, err error) { + log.t.Logf("%s writing %d\n", log.prefix, len(p)) + n, err = log.proxy.Write(p) + if err != nil { + log.t.Logf("%s write %d, %x: %v\n", log.prefix, len(p), p[0:n], err) + } else { + log.t.Logf("%s write %d:\n%s", log.prefix, len(p), hex.Dump(p[0:n])) + //fmt.Printf("%s write %d:\n%s", log.prefix, len(p), hex.Dump(p[0:n])) + } + return +} + +func (log *logIO) Close() (err error) { + err = log.proxy.Close() + if err != nil { + log.t.Logf("%s close : %v\n", log.prefix, err) + } else { + log.t.Logf("%s close\n", log.prefix) + } + return +} diff --git a/vendor/github.com/streadway/amqp/spec/amqp0-9-1.stripped.extended.xml b/vendor/github.com/streadway/amqp/spec/amqp0-9-1.stripped.extended.xml new file mode 100644 index 0000000..fbddb93 --- /dev/null +++ b/vendor/github.com/streadway/amqp/spec/amqp0-9-1.stripped.extended.xml @@ -0,0 +1,537 @@ + + + + + + + + + + + + + + + + + Errata: Section 1.2 ought to define an exception 312 "No route", which used to + exist in 0-9 and is what RabbitMQ sends back with 'basic.return' when a + 'mandatory' message cannot be delivered to any queue. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/vendor/github.com/streadway/amqp/spec/gen.go b/vendor/github.com/streadway/amqp/spec/gen.go new file mode 100644 index 0000000..6914769 --- /dev/null +++ b/vendor/github.com/streadway/amqp/spec/gen.go @@ -0,0 +1,536 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +// +build ignore + +package main + +import ( + "bytes" + "encoding/xml" + "errors" + "fmt" + "io/ioutil" + "log" + "os" + "regexp" + "strings" + "text/template" +) + +var ( + ErrUnknownType = errors.New("Unknown field type in gen") + ErrUnknownDomain = errors.New("Unknown domain type in gen") +) + +var amqpTypeToNative = map[string]string{ + "bit": "bool", + "octet": "byte", + "shortshort": "uint8", + "short": "uint16", + "long": "uint32", + "longlong": "uint64", + "timestamp": "time.Time", + "table": "Table", + "shortstr": "string", + "longstr": "string", +} + +type Rule struct { + Name string `xml:"name,attr"` + Docs []string `xml:"doc"` +} + +type Doc struct { + Type string `xml:"type,attr"` + Body string `xml:",innerxml"` +} + +type Chassis struct { + Name string `xml:"name,attr"` + Implement string `xml:"implement,attr"` +} + +type Assert struct { + Check string `xml:"check,attr"` + Value string `xml:"value,attr"` + Method string `xml:"method,attr"` +} + +type Field struct { + Name string `xml:"name,attr"` + Domain string `xml:"domain,attr"` + Type string `xml:"type,attr"` + Label string `xml:"label,attr"` + Reserved bool `xml:"reserved,attr"` + Docs []Doc `xml:"doc"` + Asserts []Assert `xml:"assert"` +} + +type Response struct { + Name string `xml:"name,attr"` +} + +type Method struct { + Name string `xml:"name,attr"` + Response Response `xml:"response"` + Synchronous bool `xml:"synchronous,attr"` + Content bool `xml:"content,attr"` + Index string `xml:"index,attr"` + Label string `xml:"label,attr"` + Docs []Doc `xml:"doc"` + Rules []Rule `xml:"rule"` + Fields []Field `xml:"field"` + Chassis []Chassis `xml:"chassis"` +} + +type Class struct { + Name string `xml:"name,attr"` + Handler string `xml:"handler,attr"` + Index string `xml:"index,attr"` + Label string `xml:"label,attr"` + Docs []Doc `xml:"doc"` + Methods []Method `xml:"method"` + Chassis []Chassis `xml:"chassis"` +} + +type Domain struct { + Name string `xml:"name,attr"` + Type string `xml:"type,attr"` + Label string `xml:"label,attr"` + Rules []Rule `xml:"rule"` + Docs []Doc `xml:"doc"` +} + +type Constant struct { + Name string `xml:"name,attr"` + Value int `xml:"value,attr"` + Class string `xml:"class,attr"` + Doc string `xml:"doc"` +} + +type Amqp struct { + Major int `xml:"major,attr"` + Minor int `xml:"minor,attr"` + Port int `xml:"port,attr"` + Comment string `xml:"comment,attr"` + + Constants []Constant `xml:"constant"` + Domains []Domain `xml:"domain"` + Classes []Class `xml:"class"` +} + +type renderer struct { + Root Amqp + bitcounter int +} + +type fieldset struct { + AmqpType string + NativeType string + Fields []Field + *renderer +} + +var ( + helpers = template.FuncMap{ + "public": public, + "private": private, + "clean": clean, + } + + packageTemplate = template.Must(template.New("package").Funcs(helpers).Parse(` + // Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. + // Use of this source code is governed by a BSD-style + // license that can be found in the LICENSE file. + // Source code and contact info at http://github.com/streadway/amqp + + /* GENERATED FILE - DO NOT EDIT */ + /* Rebuild from the spec/gen.go tool */ + + {{with .Root}} + package amqp + + import ( + "fmt" + "encoding/binary" + "io" + ) + + // Error codes that can be sent from the server during a connection or + // channel exception or used by the client to indicate a class of error like + // ErrCredentials. The text of the error is likely more interesting than + // these constants. + const ( + {{range $c := .Constants}} + {{if $c.IsError}}{{.Name | public}}{{else}}{{.Name | private}}{{end}} = {{.Value}}{{end}} + ) + + func isSoftExceptionCode(code int) bool { + switch code { + {{range $c := .Constants}} {{if $c.IsSoftError}} case {{$c.Value}}: + return true + {{end}}{{end}} + } + return false + } + + {{range .Classes}} + {{$class := .}} + {{range .Methods}} + {{$method := .}} + {{$struct := $.StructName $class.Name $method.Name}} + {{if .Docs}}/* {{range .Docs}} {{.Body | clean}} {{end}} */{{end}} + type {{$struct}} struct { + {{range .Fields}} + {{$.FieldName .}} {{$.FieldType . | $.NativeType}} {{if .Label}}// {{.Label}}{{end}}{{end}} + {{if .Content}}Properties properties + Body []byte{{end}} + } + + func (msg *{{$struct}}) id() (uint16, uint16) { + return {{$class.Index}}, {{$method.Index}} + } + + func (msg *{{$struct}}) wait() (bool) { + return {{.Synchronous}}{{if $.HasField "NoWait" .}} && !msg.NoWait{{end}} + } + + {{if .Content}} + func (msg *{{$struct}}) getContent() (properties, []byte) { + return msg.Properties, msg.Body + } + + func (msg *{{$struct}}) setContent(props properties, body []byte) { + msg.Properties, msg.Body = props, body + } + {{end}} + func (msg *{{$struct}}) write(w io.Writer) (err error) { + {{if $.HasType "bit" $method}}var bits byte{{end}} + {{.Fields | $.Fieldsets | $.Partial "enc-"}} + return + } + + func (msg *{{$struct}}) read(r io.Reader) (err error) { + {{if $.HasType "bit" $method}}var bits byte{{end}} + {{.Fields | $.Fieldsets | $.Partial "dec-"}} + return + } + {{end}} + {{end}} + + func (r *reader) parseMethodFrame(channel uint16, size uint32) (f frame, err error) { + mf := &methodFrame { + ChannelId: channel, + } + + if err = binary.Read(r.r, binary.BigEndian, &mf.ClassId); err != nil { + return + } + + if err = binary.Read(r.r, binary.BigEndian, &mf.MethodId); err != nil { + return + } + + switch mf.ClassId { + {{range .Classes}} + {{$class := .}} + case {{.Index}}: // {{.Name}} + switch mf.MethodId { + {{range .Methods}} + case {{.Index}}: // {{$class.Name}} {{.Name}} + //fmt.Println("NextMethod: class:{{$class.Index}} method:{{.Index}}") + method := &{{$.StructName $class.Name .Name}}{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + {{end}} + default: + return nil, fmt.Errorf("Bad method frame, unknown method %d for class %d", mf.MethodId, mf.ClassId) + } + {{end}} + default: + return nil, fmt.Errorf("Bad method frame, unknown class %d", mf.ClassId) + } + + return mf, nil + } + {{end}} + + {{define "enc-bit"}} + {{range $off, $field := .Fields}} + if msg.{{$field | $.FieldName}} { bits |= 1 << {{$off}} } + {{end}} + if err = binary.Write(w, binary.BigEndian, bits); err != nil { return } + {{end}} + {{define "enc-octet"}} + {{range .Fields}} if err = binary.Write(w, binary.BigEndian, msg.{{. | $.FieldName}}); err != nil { return } + {{end}} + {{end}} + {{define "enc-shortshort"}} + {{range .Fields}} if err = binary.Write(w, binary.BigEndian, msg.{{. | $.FieldName}}); err != nil { return } + {{end}} + {{end}} + {{define "enc-short"}} + {{range .Fields}} if err = binary.Write(w, binary.BigEndian, msg.{{. | $.FieldName}}); err != nil { return } + {{end}} + {{end}} + {{define "enc-long"}} + {{range .Fields}} if err = binary.Write(w, binary.BigEndian, msg.{{. | $.FieldName}}); err != nil { return } + {{end}} + {{end}} + {{define "enc-longlong"}} + {{range .Fields}} if err = binary.Write(w, binary.BigEndian, msg.{{. | $.FieldName}}); err != nil { return } + {{end}} + {{end}} + {{define "enc-timestamp"}} + {{range .Fields}} if err = writeTimestamp(w, msg.{{. | $.FieldName}}); err != nil { return } + {{end}} + {{end}} + {{define "enc-shortstr"}} + {{range .Fields}} if err = writeShortstr(w, msg.{{. | $.FieldName}}); err != nil { return } + {{end}} + {{end}} + {{define "enc-longstr"}} + {{range .Fields}} if err = writeLongstr(w, msg.{{. | $.FieldName}}); err != nil { return } + {{end}} + {{end}} + {{define "enc-table"}} + {{range .Fields}} if err = writeTable(w, msg.{{. | $.FieldName}}); err != nil { return } + {{end}} + {{end}} + + {{define "dec-bit"}} + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + {{range $off, $field := .Fields}} msg.{{$field | $.FieldName}} = (bits & (1 << {{$off}}) > 0) + {{end}} + {{end}} + {{define "dec-octet"}} + {{range .Fields}} if err = binary.Read(r, binary.BigEndian, &msg.{{. | $.FieldName}}); err != nil { return } + {{end}} + {{end}} + {{define "dec-shortshort"}} + {{range .Fields}} if err = binary.Read(r, binary.BigEndian, &msg.{{. | $.FieldName}}); err != nil { return } + {{end}} + {{end}} + {{define "dec-short"}} + {{range .Fields}} if err = binary.Read(r, binary.BigEndian, &msg.{{. | $.FieldName}}); err != nil { return } + {{end}} + {{end}} + {{define "dec-long"}} + {{range .Fields}} if err = binary.Read(r, binary.BigEndian, &msg.{{. | $.FieldName}}); err != nil { return } + {{end}} + {{end}} + {{define "dec-longlong"}} + {{range .Fields}} if err = binary.Read(r, binary.BigEndian, &msg.{{. | $.FieldName}}); err != nil { return } + {{end}} + {{end}} + {{define "dec-timestamp"}} + {{range .Fields}} if msg.{{. | $.FieldName}}, err = readTimestamp(r); err != nil { return } + {{end}} + {{end}} + {{define "dec-shortstr"}} + {{range .Fields}} if msg.{{. | $.FieldName}}, err = readShortstr(r); err != nil { return } + {{end}} + {{end}} + {{define "dec-longstr"}} + {{range .Fields}} if msg.{{. | $.FieldName}}, err = readLongstr(r); err != nil { return } + {{end}} + {{end}} + {{define "dec-table"}} + {{range .Fields}} if msg.{{. | $.FieldName}}, err = readTable(r); err != nil { return } + {{end}} + {{end}} + + `)) +) + +func (c *Constant) IsError() bool { + return strings.Contains(c.Class, "error") +} + +func (c *Constant) IsSoftError() bool { + return c.Class == "soft-error" +} + +func (renderer *renderer) Partial(prefix string, fields []fieldset) (s string, err error) { + var buf bytes.Buffer + for _, set := range fields { + name := prefix + set.AmqpType + t := packageTemplate.Lookup(name) + if t == nil { + return "", errors.New(fmt.Sprintf("Missing template: %s", name)) + } + if err = t.Execute(&buf, set); err != nil { + return + } + } + return string(buf.Bytes()), nil +} + +// Groups the fields so that the right encoder/decoder can be called +func (renderer *renderer) Fieldsets(fields []Field) (f []fieldset, err error) { + if len(fields) > 0 { + for _, field := range fields { + cur := fieldset{} + cur.AmqpType, err = renderer.FieldType(field) + if err != nil { + return + } + + cur.NativeType, err = renderer.NativeType(cur.AmqpType) + if err != nil { + return + } + cur.Fields = append(cur.Fields, field) + f = append(f, cur) + } + + i, j := 0, 1 + for j < len(f) { + if f[i].AmqpType == f[j].AmqpType { + f[i].Fields = append(f[i].Fields, f[j].Fields...) + } else { + i++ + f[i] = f[j] + } + j++ + } + return f[:i+1], nil + } + + return +} + +func (renderer *renderer) HasType(typ string, method Method) bool { + for _, f := range method.Fields { + name, _ := renderer.FieldType(f) + if name == typ { + return true + } + } + return false +} + +func (renderer *renderer) HasField(field string, method Method) bool { + for _, f := range method.Fields { + name := renderer.FieldName(f) + if name == field { + return true + } + } + return false +} + +func (renderer *renderer) Domain(field Field) (domain Domain, err error) { + for _, domain = range renderer.Root.Domains { + if field.Domain == domain.Name { + return + } + } + return domain, nil + //return domain, ErrUnknownDomain +} + +func (renderer *renderer) FieldName(field Field) (t string) { + t = public(field.Name) + + if field.Reserved { + t = strings.ToLower(t) + } + + return +} + +func (renderer *renderer) FieldType(field Field) (t string, err error) { + t = field.Type + + if t == "" { + var domain Domain + domain, err = renderer.Domain(field) + if err != nil { + return "", err + } + t = domain.Type + } + + return +} + +func (renderer *renderer) NativeType(amqpType string) (t string, err error) { + if t, ok := amqpTypeToNative[amqpType]; ok { + return t, nil + } + return "", ErrUnknownType +} + +func (renderer *renderer) Tag(d Domain) string { + label := "`" + + label += `domain:"` + d.Name + `"` + + if len(d.Type) > 0 { + label += `,type:"` + d.Type + `"` + } + + label += "`" + + return label +} + +func (renderer *renderer) StructName(parts ...string) string { + return parts[0] + public(parts[1:]...) +} + +func clean(body string) (res string) { + return strings.Replace(body, "\r", "", -1) +} + +func private(parts ...string) string { + return export(regexp.MustCompile(`[-_]\w`), parts...) +} + +func public(parts ...string) string { + return export(regexp.MustCompile(`^\w|[-_]\w`), parts...) +} + +func export(delim *regexp.Regexp, parts ...string) (res string) { + for _, in := range parts { + + res += delim.ReplaceAllStringFunc(in, func(match string) string { + switch len(match) { + case 1: + return strings.ToUpper(match) + case 2: + return strings.ToUpper(match[1:]) + } + panic("unreachable") + }) + } + + return +} + +func main() { + var r renderer + + spec, err := ioutil.ReadAll(os.Stdin) + if err != nil { + log.Fatalln("Please pass spec on stdin", err) + } + + err = xml.Unmarshal(spec, &r.Root) + + if err != nil { + log.Fatalln("Could not parse XML:", err) + } + + if err = packageTemplate.Execute(os.Stdout, &r); err != nil { + log.Fatalln("Generate error: ", err) + } +} diff --git a/vendor/github.com/streadway/amqp/spec091.go b/vendor/github.com/streadway/amqp/spec091.go new file mode 100644 index 0000000..cd53ebe --- /dev/null +++ b/vendor/github.com/streadway/amqp/spec091.go @@ -0,0 +1,3306 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +/* GENERATED FILE - DO NOT EDIT */ +/* Rebuild from the spec/gen.go tool */ + +package amqp + +import ( + "encoding/binary" + "fmt" + "io" +) + +// Error codes that can be sent from the server during a connection or +// channel exception or used by the client to indicate a class of error like +// ErrCredentials. The text of the error is likely more interesting than +// these constants. +const ( + frameMethod = 1 + frameHeader = 2 + frameBody = 3 + frameHeartbeat = 8 + frameMinSize = 4096 + frameEnd = 206 + replySuccess = 200 + ContentTooLarge = 311 + NoRoute = 312 + NoConsumers = 313 + ConnectionForced = 320 + InvalidPath = 402 + AccessRefused = 403 + NotFound = 404 + ResourceLocked = 405 + PreconditionFailed = 406 + FrameError = 501 + SyntaxError = 502 + CommandInvalid = 503 + ChannelError = 504 + UnexpectedFrame = 505 + ResourceError = 506 + NotAllowed = 530 + NotImplemented = 540 + InternalError = 541 +) + +func isSoftExceptionCode(code int) bool { + switch code { + case 311: + return true + case 312: + return true + case 313: + return true + case 403: + return true + case 404: + return true + case 405: + return true + case 406: + return true + + } + return false +} + +type connectionStart struct { + VersionMajor byte + VersionMinor byte + ServerProperties Table + Mechanisms string + Locales string +} + +func (msg *connectionStart) id() (uint16, uint16) { + return 10, 10 +} + +func (msg *connectionStart) wait() bool { + return true +} + +func (msg *connectionStart) write(w io.Writer) (err error) { + + if err = binary.Write(w, binary.BigEndian, msg.VersionMajor); err != nil { + return + } + if err = binary.Write(w, binary.BigEndian, msg.VersionMinor); err != nil { + return + } + + if err = writeTable(w, msg.ServerProperties); err != nil { + return + } + + if err = writeLongstr(w, msg.Mechanisms); err != nil { + return + } + if err = writeLongstr(w, msg.Locales); err != nil { + return + } + + return +} + +func (msg *connectionStart) read(r io.Reader) (err error) { + + if err = binary.Read(r, binary.BigEndian, &msg.VersionMajor); err != nil { + return + } + if err = binary.Read(r, binary.BigEndian, &msg.VersionMinor); err != nil { + return + } + + if msg.ServerProperties, err = readTable(r); err != nil { + return + } + + if msg.Mechanisms, err = readLongstr(r); err != nil { + return + } + if msg.Locales, err = readLongstr(r); err != nil { + return + } + + return +} + +type connectionStartOk struct { + ClientProperties Table + Mechanism string + Response string + Locale string +} + +func (msg *connectionStartOk) id() (uint16, uint16) { + return 10, 11 +} + +func (msg *connectionStartOk) wait() bool { + return true +} + +func (msg *connectionStartOk) write(w io.Writer) (err error) { + + if err = writeTable(w, msg.ClientProperties); err != nil { + return + } + + if err = writeShortstr(w, msg.Mechanism); err != nil { + return + } + + if err = writeLongstr(w, msg.Response); err != nil { + return + } + + if err = writeShortstr(w, msg.Locale); err != nil { + return + } + + return +} + +func (msg *connectionStartOk) read(r io.Reader) (err error) { + + if msg.ClientProperties, err = readTable(r); err != nil { + return + } + + if msg.Mechanism, err = readShortstr(r); err != nil { + return + } + + if msg.Response, err = readLongstr(r); err != nil { + return + } + + if msg.Locale, err = readShortstr(r); err != nil { + return + } + + return +} + +type connectionSecure struct { + Challenge string +} + +func (msg *connectionSecure) id() (uint16, uint16) { + return 10, 20 +} + +func (msg *connectionSecure) wait() bool { + return true +} + +func (msg *connectionSecure) write(w io.Writer) (err error) { + + if err = writeLongstr(w, msg.Challenge); err != nil { + return + } + + return +} + +func (msg *connectionSecure) read(r io.Reader) (err error) { + + if msg.Challenge, err = readLongstr(r); err != nil { + return + } + + return +} + +type connectionSecureOk struct { + Response string +} + +func (msg *connectionSecureOk) id() (uint16, uint16) { + return 10, 21 +} + +func (msg *connectionSecureOk) wait() bool { + return true +} + +func (msg *connectionSecureOk) write(w io.Writer) (err error) { + + if err = writeLongstr(w, msg.Response); err != nil { + return + } + + return +} + +func (msg *connectionSecureOk) read(r io.Reader) (err error) { + + if msg.Response, err = readLongstr(r); err != nil { + return + } + + return +} + +type connectionTune struct { + ChannelMax uint16 + FrameMax uint32 + Heartbeat uint16 +} + +func (msg *connectionTune) id() (uint16, uint16) { + return 10, 30 +} + +func (msg *connectionTune) wait() bool { + return true +} + +func (msg *connectionTune) write(w io.Writer) (err error) { + + if err = binary.Write(w, binary.BigEndian, msg.ChannelMax); err != nil { + return + } + + if err = binary.Write(w, binary.BigEndian, msg.FrameMax); err != nil { + return + } + + if err = binary.Write(w, binary.BigEndian, msg.Heartbeat); err != nil { + return + } + + return +} + +func (msg *connectionTune) read(r io.Reader) (err error) { + + if err = binary.Read(r, binary.BigEndian, &msg.ChannelMax); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &msg.FrameMax); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &msg.Heartbeat); err != nil { + return + } + + return +} + +type connectionTuneOk struct { + ChannelMax uint16 + FrameMax uint32 + Heartbeat uint16 +} + +func (msg *connectionTuneOk) id() (uint16, uint16) { + return 10, 31 +} + +func (msg *connectionTuneOk) wait() bool { + return true +} + +func (msg *connectionTuneOk) write(w io.Writer) (err error) { + + if err = binary.Write(w, binary.BigEndian, msg.ChannelMax); err != nil { + return + } + + if err = binary.Write(w, binary.BigEndian, msg.FrameMax); err != nil { + return + } + + if err = binary.Write(w, binary.BigEndian, msg.Heartbeat); err != nil { + return + } + + return +} + +func (msg *connectionTuneOk) read(r io.Reader) (err error) { + + if err = binary.Read(r, binary.BigEndian, &msg.ChannelMax); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &msg.FrameMax); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &msg.Heartbeat); err != nil { + return + } + + return +} + +type connectionOpen struct { + VirtualHost string + reserved1 string + reserved2 bool +} + +func (msg *connectionOpen) id() (uint16, uint16) { + return 10, 40 +} + +func (msg *connectionOpen) wait() bool { + return true +} + +func (msg *connectionOpen) write(w io.Writer) (err error) { + var bits byte + + if err = writeShortstr(w, msg.VirtualHost); err != nil { + return + } + if err = writeShortstr(w, msg.reserved1); err != nil { + return + } + + if msg.reserved2 { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (msg *connectionOpen) read(r io.Reader) (err error) { + var bits byte + + if msg.VirtualHost, err = readShortstr(r); err != nil { + return + } + if msg.reserved1, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.reserved2 = (bits&(1<<0) > 0) + + return +} + +type connectionOpenOk struct { + reserved1 string +} + +func (msg *connectionOpenOk) id() (uint16, uint16) { + return 10, 41 +} + +func (msg *connectionOpenOk) wait() bool { + return true +} + +func (msg *connectionOpenOk) write(w io.Writer) (err error) { + + if err = writeShortstr(w, msg.reserved1); err != nil { + return + } + + return +} + +func (msg *connectionOpenOk) read(r io.Reader) (err error) { + + if msg.reserved1, err = readShortstr(r); err != nil { + return + } + + return +} + +type connectionClose struct { + ReplyCode uint16 + ReplyText string + ClassId uint16 + MethodId uint16 +} + +func (msg *connectionClose) id() (uint16, uint16) { + return 10, 50 +} + +func (msg *connectionClose) wait() bool { + return true +} + +func (msg *connectionClose) write(w io.Writer) (err error) { + + if err = binary.Write(w, binary.BigEndian, msg.ReplyCode); err != nil { + return + } + + if err = writeShortstr(w, msg.ReplyText); err != nil { + return + } + + if err = binary.Write(w, binary.BigEndian, msg.ClassId); err != nil { + return + } + if err = binary.Write(w, binary.BigEndian, msg.MethodId); err != nil { + return + } + + return +} + +func (msg *connectionClose) read(r io.Reader) (err error) { + + if err = binary.Read(r, binary.BigEndian, &msg.ReplyCode); err != nil { + return + } + + if msg.ReplyText, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &msg.ClassId); err != nil { + return + } + if err = binary.Read(r, binary.BigEndian, &msg.MethodId); err != nil { + return + } + + return +} + +type connectionCloseOk struct { +} + +func (msg *connectionCloseOk) id() (uint16, uint16) { + return 10, 51 +} + +func (msg *connectionCloseOk) wait() bool { + return true +} + +func (msg *connectionCloseOk) write(w io.Writer) (err error) { + + return +} + +func (msg *connectionCloseOk) read(r io.Reader) (err error) { + + return +} + +type connectionBlocked struct { + Reason string +} + +func (msg *connectionBlocked) id() (uint16, uint16) { + return 10, 60 +} + +func (msg *connectionBlocked) wait() bool { + return false +} + +func (msg *connectionBlocked) write(w io.Writer) (err error) { + + if err = writeShortstr(w, msg.Reason); err != nil { + return + } + + return +} + +func (msg *connectionBlocked) read(r io.Reader) (err error) { + + if msg.Reason, err = readShortstr(r); err != nil { + return + } + + return +} + +type connectionUnblocked struct { +} + +func (msg *connectionUnblocked) id() (uint16, uint16) { + return 10, 61 +} + +func (msg *connectionUnblocked) wait() bool { + return false +} + +func (msg *connectionUnblocked) write(w io.Writer) (err error) { + + return +} + +func (msg *connectionUnblocked) read(r io.Reader) (err error) { + + return +} + +type channelOpen struct { + reserved1 string +} + +func (msg *channelOpen) id() (uint16, uint16) { + return 20, 10 +} + +func (msg *channelOpen) wait() bool { + return true +} + +func (msg *channelOpen) write(w io.Writer) (err error) { + + if err = writeShortstr(w, msg.reserved1); err != nil { + return + } + + return +} + +func (msg *channelOpen) read(r io.Reader) (err error) { + + if msg.reserved1, err = readShortstr(r); err != nil { + return + } + + return +} + +type channelOpenOk struct { + reserved1 string +} + +func (msg *channelOpenOk) id() (uint16, uint16) { + return 20, 11 +} + +func (msg *channelOpenOk) wait() bool { + return true +} + +func (msg *channelOpenOk) write(w io.Writer) (err error) { + + if err = writeLongstr(w, msg.reserved1); err != nil { + return + } + + return +} + +func (msg *channelOpenOk) read(r io.Reader) (err error) { + + if msg.reserved1, err = readLongstr(r); err != nil { + return + } + + return +} + +type channelFlow struct { + Active bool +} + +func (msg *channelFlow) id() (uint16, uint16) { + return 20, 20 +} + +func (msg *channelFlow) wait() bool { + return true +} + +func (msg *channelFlow) write(w io.Writer) (err error) { + var bits byte + + if msg.Active { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (msg *channelFlow) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.Active = (bits&(1<<0) > 0) + + return +} + +type channelFlowOk struct { + Active bool +} + +func (msg *channelFlowOk) id() (uint16, uint16) { + return 20, 21 +} + +func (msg *channelFlowOk) wait() bool { + return false +} + +func (msg *channelFlowOk) write(w io.Writer) (err error) { + var bits byte + + if msg.Active { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (msg *channelFlowOk) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.Active = (bits&(1<<0) > 0) + + return +} + +type channelClose struct { + ReplyCode uint16 + ReplyText string + ClassId uint16 + MethodId uint16 +} + +func (msg *channelClose) id() (uint16, uint16) { + return 20, 40 +} + +func (msg *channelClose) wait() bool { + return true +} + +func (msg *channelClose) write(w io.Writer) (err error) { + + if err = binary.Write(w, binary.BigEndian, msg.ReplyCode); err != nil { + return + } + + if err = writeShortstr(w, msg.ReplyText); err != nil { + return + } + + if err = binary.Write(w, binary.BigEndian, msg.ClassId); err != nil { + return + } + if err = binary.Write(w, binary.BigEndian, msg.MethodId); err != nil { + return + } + + return +} + +func (msg *channelClose) read(r io.Reader) (err error) { + + if err = binary.Read(r, binary.BigEndian, &msg.ReplyCode); err != nil { + return + } + + if msg.ReplyText, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &msg.ClassId); err != nil { + return + } + if err = binary.Read(r, binary.BigEndian, &msg.MethodId); err != nil { + return + } + + return +} + +type channelCloseOk struct { +} + +func (msg *channelCloseOk) id() (uint16, uint16) { + return 20, 41 +} + +func (msg *channelCloseOk) wait() bool { + return true +} + +func (msg *channelCloseOk) write(w io.Writer) (err error) { + + return +} + +func (msg *channelCloseOk) read(r io.Reader) (err error) { + + return +} + +type exchangeDeclare struct { + reserved1 uint16 + Exchange string + Type string + Passive bool + Durable bool + AutoDelete bool + Internal bool + NoWait bool + Arguments Table +} + +func (msg *exchangeDeclare) id() (uint16, uint16) { + return 40, 10 +} + +func (msg *exchangeDeclare) wait() bool { + return true && !msg.NoWait +} + +func (msg *exchangeDeclare) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, msg.reserved1); err != nil { + return + } + + if err = writeShortstr(w, msg.Exchange); err != nil { + return + } + if err = writeShortstr(w, msg.Type); err != nil { + return + } + + if msg.Passive { + bits |= 1 << 0 + } + + if msg.Durable { + bits |= 1 << 1 + } + + if msg.AutoDelete { + bits |= 1 << 2 + } + + if msg.Internal { + bits |= 1 << 3 + } + + if msg.NoWait { + bits |= 1 << 4 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + if err = writeTable(w, msg.Arguments); err != nil { + return + } + + return +} + +func (msg *exchangeDeclare) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &msg.reserved1); err != nil { + return + } + + if msg.Exchange, err = readShortstr(r); err != nil { + return + } + if msg.Type, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.Passive = (bits&(1<<0) > 0) + msg.Durable = (bits&(1<<1) > 0) + msg.AutoDelete = (bits&(1<<2) > 0) + msg.Internal = (bits&(1<<3) > 0) + msg.NoWait = (bits&(1<<4) > 0) + + if msg.Arguments, err = readTable(r); err != nil { + return + } + + return +} + +type exchangeDeclareOk struct { +} + +func (msg *exchangeDeclareOk) id() (uint16, uint16) { + return 40, 11 +} + +func (msg *exchangeDeclareOk) wait() bool { + return true +} + +func (msg *exchangeDeclareOk) write(w io.Writer) (err error) { + + return +} + +func (msg *exchangeDeclareOk) read(r io.Reader) (err error) { + + return +} + +type exchangeDelete struct { + reserved1 uint16 + Exchange string + IfUnused bool + NoWait bool +} + +func (msg *exchangeDelete) id() (uint16, uint16) { + return 40, 20 +} + +func (msg *exchangeDelete) wait() bool { + return true && !msg.NoWait +} + +func (msg *exchangeDelete) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, msg.reserved1); err != nil { + return + } + + if err = writeShortstr(w, msg.Exchange); err != nil { + return + } + + if msg.IfUnused { + bits |= 1 << 0 + } + + if msg.NoWait { + bits |= 1 << 1 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (msg *exchangeDelete) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &msg.reserved1); err != nil { + return + } + + if msg.Exchange, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.IfUnused = (bits&(1<<0) > 0) + msg.NoWait = (bits&(1<<1) > 0) + + return +} + +type exchangeDeleteOk struct { +} + +func (msg *exchangeDeleteOk) id() (uint16, uint16) { + return 40, 21 +} + +func (msg *exchangeDeleteOk) wait() bool { + return true +} + +func (msg *exchangeDeleteOk) write(w io.Writer) (err error) { + + return +} + +func (msg *exchangeDeleteOk) read(r io.Reader) (err error) { + + return +} + +type exchangeBind struct { + reserved1 uint16 + Destination string + Source string + RoutingKey string + NoWait bool + Arguments Table +} + +func (msg *exchangeBind) id() (uint16, uint16) { + return 40, 30 +} + +func (msg *exchangeBind) wait() bool { + return true && !msg.NoWait +} + +func (msg *exchangeBind) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, msg.reserved1); err != nil { + return + } + + if err = writeShortstr(w, msg.Destination); err != nil { + return + } + if err = writeShortstr(w, msg.Source); err != nil { + return + } + if err = writeShortstr(w, msg.RoutingKey); err != nil { + return + } + + if msg.NoWait { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + if err = writeTable(w, msg.Arguments); err != nil { + return + } + + return +} + +func (msg *exchangeBind) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &msg.reserved1); err != nil { + return + } + + if msg.Destination, err = readShortstr(r); err != nil { + return + } + if msg.Source, err = readShortstr(r); err != nil { + return + } + if msg.RoutingKey, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.NoWait = (bits&(1<<0) > 0) + + if msg.Arguments, err = readTable(r); err != nil { + return + } + + return +} + +type exchangeBindOk struct { +} + +func (msg *exchangeBindOk) id() (uint16, uint16) { + return 40, 31 +} + +func (msg *exchangeBindOk) wait() bool { + return true +} + +func (msg *exchangeBindOk) write(w io.Writer) (err error) { + + return +} + +func (msg *exchangeBindOk) read(r io.Reader) (err error) { + + return +} + +type exchangeUnbind struct { + reserved1 uint16 + Destination string + Source string + RoutingKey string + NoWait bool + Arguments Table +} + +func (msg *exchangeUnbind) id() (uint16, uint16) { + return 40, 40 +} + +func (msg *exchangeUnbind) wait() bool { + return true && !msg.NoWait +} + +func (msg *exchangeUnbind) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, msg.reserved1); err != nil { + return + } + + if err = writeShortstr(w, msg.Destination); err != nil { + return + } + if err = writeShortstr(w, msg.Source); err != nil { + return + } + if err = writeShortstr(w, msg.RoutingKey); err != nil { + return + } + + if msg.NoWait { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + if err = writeTable(w, msg.Arguments); err != nil { + return + } + + return +} + +func (msg *exchangeUnbind) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &msg.reserved1); err != nil { + return + } + + if msg.Destination, err = readShortstr(r); err != nil { + return + } + if msg.Source, err = readShortstr(r); err != nil { + return + } + if msg.RoutingKey, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.NoWait = (bits&(1<<0) > 0) + + if msg.Arguments, err = readTable(r); err != nil { + return + } + + return +} + +type exchangeUnbindOk struct { +} + +func (msg *exchangeUnbindOk) id() (uint16, uint16) { + return 40, 51 +} + +func (msg *exchangeUnbindOk) wait() bool { + return true +} + +func (msg *exchangeUnbindOk) write(w io.Writer) (err error) { + + return +} + +func (msg *exchangeUnbindOk) read(r io.Reader) (err error) { + + return +} + +type queueDeclare struct { + reserved1 uint16 + Queue string + Passive bool + Durable bool + Exclusive bool + AutoDelete bool + NoWait bool + Arguments Table +} + +func (msg *queueDeclare) id() (uint16, uint16) { + return 50, 10 +} + +func (msg *queueDeclare) wait() bool { + return true && !msg.NoWait +} + +func (msg *queueDeclare) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, msg.reserved1); err != nil { + return + } + + if err = writeShortstr(w, msg.Queue); err != nil { + return + } + + if msg.Passive { + bits |= 1 << 0 + } + + if msg.Durable { + bits |= 1 << 1 + } + + if msg.Exclusive { + bits |= 1 << 2 + } + + if msg.AutoDelete { + bits |= 1 << 3 + } + + if msg.NoWait { + bits |= 1 << 4 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + if err = writeTable(w, msg.Arguments); err != nil { + return + } + + return +} + +func (msg *queueDeclare) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &msg.reserved1); err != nil { + return + } + + if msg.Queue, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.Passive = (bits&(1<<0) > 0) + msg.Durable = (bits&(1<<1) > 0) + msg.Exclusive = (bits&(1<<2) > 0) + msg.AutoDelete = (bits&(1<<3) > 0) + msg.NoWait = (bits&(1<<4) > 0) + + if msg.Arguments, err = readTable(r); err != nil { + return + } + + return +} + +type queueDeclareOk struct { + Queue string + MessageCount uint32 + ConsumerCount uint32 +} + +func (msg *queueDeclareOk) id() (uint16, uint16) { + return 50, 11 +} + +func (msg *queueDeclareOk) wait() bool { + return true +} + +func (msg *queueDeclareOk) write(w io.Writer) (err error) { + + if err = writeShortstr(w, msg.Queue); err != nil { + return + } + + if err = binary.Write(w, binary.BigEndian, msg.MessageCount); err != nil { + return + } + if err = binary.Write(w, binary.BigEndian, msg.ConsumerCount); err != nil { + return + } + + return +} + +func (msg *queueDeclareOk) read(r io.Reader) (err error) { + + if msg.Queue, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &msg.MessageCount); err != nil { + return + } + if err = binary.Read(r, binary.BigEndian, &msg.ConsumerCount); err != nil { + return + } + + return +} + +type queueBind struct { + reserved1 uint16 + Queue string + Exchange string + RoutingKey string + NoWait bool + Arguments Table +} + +func (msg *queueBind) id() (uint16, uint16) { + return 50, 20 +} + +func (msg *queueBind) wait() bool { + return true && !msg.NoWait +} + +func (msg *queueBind) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, msg.reserved1); err != nil { + return + } + + if err = writeShortstr(w, msg.Queue); err != nil { + return + } + if err = writeShortstr(w, msg.Exchange); err != nil { + return + } + if err = writeShortstr(w, msg.RoutingKey); err != nil { + return + } + + if msg.NoWait { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + if err = writeTable(w, msg.Arguments); err != nil { + return + } + + return +} + +func (msg *queueBind) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &msg.reserved1); err != nil { + return + } + + if msg.Queue, err = readShortstr(r); err != nil { + return + } + if msg.Exchange, err = readShortstr(r); err != nil { + return + } + if msg.RoutingKey, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.NoWait = (bits&(1<<0) > 0) + + if msg.Arguments, err = readTable(r); err != nil { + return + } + + return +} + +type queueBindOk struct { +} + +func (msg *queueBindOk) id() (uint16, uint16) { + return 50, 21 +} + +func (msg *queueBindOk) wait() bool { + return true +} + +func (msg *queueBindOk) write(w io.Writer) (err error) { + + return +} + +func (msg *queueBindOk) read(r io.Reader) (err error) { + + return +} + +type queueUnbind struct { + reserved1 uint16 + Queue string + Exchange string + RoutingKey string + Arguments Table +} + +func (msg *queueUnbind) id() (uint16, uint16) { + return 50, 50 +} + +func (msg *queueUnbind) wait() bool { + return true +} + +func (msg *queueUnbind) write(w io.Writer) (err error) { + + if err = binary.Write(w, binary.BigEndian, msg.reserved1); err != nil { + return + } + + if err = writeShortstr(w, msg.Queue); err != nil { + return + } + if err = writeShortstr(w, msg.Exchange); err != nil { + return + } + if err = writeShortstr(w, msg.RoutingKey); err != nil { + return + } + + if err = writeTable(w, msg.Arguments); err != nil { + return + } + + return +} + +func (msg *queueUnbind) read(r io.Reader) (err error) { + + if err = binary.Read(r, binary.BigEndian, &msg.reserved1); err != nil { + return + } + + if msg.Queue, err = readShortstr(r); err != nil { + return + } + if msg.Exchange, err = readShortstr(r); err != nil { + return + } + if msg.RoutingKey, err = readShortstr(r); err != nil { + return + } + + if msg.Arguments, err = readTable(r); err != nil { + return + } + + return +} + +type queueUnbindOk struct { +} + +func (msg *queueUnbindOk) id() (uint16, uint16) { + return 50, 51 +} + +func (msg *queueUnbindOk) wait() bool { + return true +} + +func (msg *queueUnbindOk) write(w io.Writer) (err error) { + + return +} + +func (msg *queueUnbindOk) read(r io.Reader) (err error) { + + return +} + +type queuePurge struct { + reserved1 uint16 + Queue string + NoWait bool +} + +func (msg *queuePurge) id() (uint16, uint16) { + return 50, 30 +} + +func (msg *queuePurge) wait() bool { + return true && !msg.NoWait +} + +func (msg *queuePurge) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, msg.reserved1); err != nil { + return + } + + if err = writeShortstr(w, msg.Queue); err != nil { + return + } + + if msg.NoWait { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (msg *queuePurge) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &msg.reserved1); err != nil { + return + } + + if msg.Queue, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.NoWait = (bits&(1<<0) > 0) + + return +} + +type queuePurgeOk struct { + MessageCount uint32 +} + +func (msg *queuePurgeOk) id() (uint16, uint16) { + return 50, 31 +} + +func (msg *queuePurgeOk) wait() bool { + return true +} + +func (msg *queuePurgeOk) write(w io.Writer) (err error) { + + if err = binary.Write(w, binary.BigEndian, msg.MessageCount); err != nil { + return + } + + return +} + +func (msg *queuePurgeOk) read(r io.Reader) (err error) { + + if err = binary.Read(r, binary.BigEndian, &msg.MessageCount); err != nil { + return + } + + return +} + +type queueDelete struct { + reserved1 uint16 + Queue string + IfUnused bool + IfEmpty bool + NoWait bool +} + +func (msg *queueDelete) id() (uint16, uint16) { + return 50, 40 +} + +func (msg *queueDelete) wait() bool { + return true && !msg.NoWait +} + +func (msg *queueDelete) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, msg.reserved1); err != nil { + return + } + + if err = writeShortstr(w, msg.Queue); err != nil { + return + } + + if msg.IfUnused { + bits |= 1 << 0 + } + + if msg.IfEmpty { + bits |= 1 << 1 + } + + if msg.NoWait { + bits |= 1 << 2 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (msg *queueDelete) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &msg.reserved1); err != nil { + return + } + + if msg.Queue, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.IfUnused = (bits&(1<<0) > 0) + msg.IfEmpty = (bits&(1<<1) > 0) + msg.NoWait = (bits&(1<<2) > 0) + + return +} + +type queueDeleteOk struct { + MessageCount uint32 +} + +func (msg *queueDeleteOk) id() (uint16, uint16) { + return 50, 41 +} + +func (msg *queueDeleteOk) wait() bool { + return true +} + +func (msg *queueDeleteOk) write(w io.Writer) (err error) { + + if err = binary.Write(w, binary.BigEndian, msg.MessageCount); err != nil { + return + } + + return +} + +func (msg *queueDeleteOk) read(r io.Reader) (err error) { + + if err = binary.Read(r, binary.BigEndian, &msg.MessageCount); err != nil { + return + } + + return +} + +type basicQos struct { + PrefetchSize uint32 + PrefetchCount uint16 + Global bool +} + +func (msg *basicQos) id() (uint16, uint16) { + return 60, 10 +} + +func (msg *basicQos) wait() bool { + return true +} + +func (msg *basicQos) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, msg.PrefetchSize); err != nil { + return + } + + if err = binary.Write(w, binary.BigEndian, msg.PrefetchCount); err != nil { + return + } + + if msg.Global { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (msg *basicQos) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &msg.PrefetchSize); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &msg.PrefetchCount); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.Global = (bits&(1<<0) > 0) + + return +} + +type basicQosOk struct { +} + +func (msg *basicQosOk) id() (uint16, uint16) { + return 60, 11 +} + +func (msg *basicQosOk) wait() bool { + return true +} + +func (msg *basicQosOk) write(w io.Writer) (err error) { + + return +} + +func (msg *basicQosOk) read(r io.Reader) (err error) { + + return +} + +type basicConsume struct { + reserved1 uint16 + Queue string + ConsumerTag string + NoLocal bool + NoAck bool + Exclusive bool + NoWait bool + Arguments Table +} + +func (msg *basicConsume) id() (uint16, uint16) { + return 60, 20 +} + +func (msg *basicConsume) wait() bool { + return true && !msg.NoWait +} + +func (msg *basicConsume) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, msg.reserved1); err != nil { + return + } + + if err = writeShortstr(w, msg.Queue); err != nil { + return + } + if err = writeShortstr(w, msg.ConsumerTag); err != nil { + return + } + + if msg.NoLocal { + bits |= 1 << 0 + } + + if msg.NoAck { + bits |= 1 << 1 + } + + if msg.Exclusive { + bits |= 1 << 2 + } + + if msg.NoWait { + bits |= 1 << 3 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + if err = writeTable(w, msg.Arguments); err != nil { + return + } + + return +} + +func (msg *basicConsume) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &msg.reserved1); err != nil { + return + } + + if msg.Queue, err = readShortstr(r); err != nil { + return + } + if msg.ConsumerTag, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.NoLocal = (bits&(1<<0) > 0) + msg.NoAck = (bits&(1<<1) > 0) + msg.Exclusive = (bits&(1<<2) > 0) + msg.NoWait = (bits&(1<<3) > 0) + + if msg.Arguments, err = readTable(r); err != nil { + return + } + + return +} + +type basicConsumeOk struct { + ConsumerTag string +} + +func (msg *basicConsumeOk) id() (uint16, uint16) { + return 60, 21 +} + +func (msg *basicConsumeOk) wait() bool { + return true +} + +func (msg *basicConsumeOk) write(w io.Writer) (err error) { + + if err = writeShortstr(w, msg.ConsumerTag); err != nil { + return + } + + return +} + +func (msg *basicConsumeOk) read(r io.Reader) (err error) { + + if msg.ConsumerTag, err = readShortstr(r); err != nil { + return + } + + return +} + +type basicCancel struct { + ConsumerTag string + NoWait bool +} + +func (msg *basicCancel) id() (uint16, uint16) { + return 60, 30 +} + +func (msg *basicCancel) wait() bool { + return true && !msg.NoWait +} + +func (msg *basicCancel) write(w io.Writer) (err error) { + var bits byte + + if err = writeShortstr(w, msg.ConsumerTag); err != nil { + return + } + + if msg.NoWait { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (msg *basicCancel) read(r io.Reader) (err error) { + var bits byte + + if msg.ConsumerTag, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.NoWait = (bits&(1<<0) > 0) + + return +} + +type basicCancelOk struct { + ConsumerTag string +} + +func (msg *basicCancelOk) id() (uint16, uint16) { + return 60, 31 +} + +func (msg *basicCancelOk) wait() bool { + return true +} + +func (msg *basicCancelOk) write(w io.Writer) (err error) { + + if err = writeShortstr(w, msg.ConsumerTag); err != nil { + return + } + + return +} + +func (msg *basicCancelOk) read(r io.Reader) (err error) { + + if msg.ConsumerTag, err = readShortstr(r); err != nil { + return + } + + return +} + +type basicPublish struct { + reserved1 uint16 + Exchange string + RoutingKey string + Mandatory bool + Immediate bool + Properties properties + Body []byte +} + +func (msg *basicPublish) id() (uint16, uint16) { + return 60, 40 +} + +func (msg *basicPublish) wait() bool { + return false +} + +func (msg *basicPublish) getContent() (properties, []byte) { + return msg.Properties, msg.Body +} + +func (msg *basicPublish) setContent(props properties, body []byte) { + msg.Properties, msg.Body = props, body +} + +func (msg *basicPublish) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, msg.reserved1); err != nil { + return + } + + if err = writeShortstr(w, msg.Exchange); err != nil { + return + } + if err = writeShortstr(w, msg.RoutingKey); err != nil { + return + } + + if msg.Mandatory { + bits |= 1 << 0 + } + + if msg.Immediate { + bits |= 1 << 1 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (msg *basicPublish) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &msg.reserved1); err != nil { + return + } + + if msg.Exchange, err = readShortstr(r); err != nil { + return + } + if msg.RoutingKey, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.Mandatory = (bits&(1<<0) > 0) + msg.Immediate = (bits&(1<<1) > 0) + + return +} + +type basicReturn struct { + ReplyCode uint16 + ReplyText string + Exchange string + RoutingKey string + Properties properties + Body []byte +} + +func (msg *basicReturn) id() (uint16, uint16) { + return 60, 50 +} + +func (msg *basicReturn) wait() bool { + return false +} + +func (msg *basicReturn) getContent() (properties, []byte) { + return msg.Properties, msg.Body +} + +func (msg *basicReturn) setContent(props properties, body []byte) { + msg.Properties, msg.Body = props, body +} + +func (msg *basicReturn) write(w io.Writer) (err error) { + + if err = binary.Write(w, binary.BigEndian, msg.ReplyCode); err != nil { + return + } + + if err = writeShortstr(w, msg.ReplyText); err != nil { + return + } + if err = writeShortstr(w, msg.Exchange); err != nil { + return + } + if err = writeShortstr(w, msg.RoutingKey); err != nil { + return + } + + return +} + +func (msg *basicReturn) read(r io.Reader) (err error) { + + if err = binary.Read(r, binary.BigEndian, &msg.ReplyCode); err != nil { + return + } + + if msg.ReplyText, err = readShortstr(r); err != nil { + return + } + if msg.Exchange, err = readShortstr(r); err != nil { + return + } + if msg.RoutingKey, err = readShortstr(r); err != nil { + return + } + + return +} + +type basicDeliver struct { + ConsumerTag string + DeliveryTag uint64 + Redelivered bool + Exchange string + RoutingKey string + Properties properties + Body []byte +} + +func (msg *basicDeliver) id() (uint16, uint16) { + return 60, 60 +} + +func (msg *basicDeliver) wait() bool { + return false +} + +func (msg *basicDeliver) getContent() (properties, []byte) { + return msg.Properties, msg.Body +} + +func (msg *basicDeliver) setContent(props properties, body []byte) { + msg.Properties, msg.Body = props, body +} + +func (msg *basicDeliver) write(w io.Writer) (err error) { + var bits byte + + if err = writeShortstr(w, msg.ConsumerTag); err != nil { + return + } + + if err = binary.Write(w, binary.BigEndian, msg.DeliveryTag); err != nil { + return + } + + if msg.Redelivered { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + if err = writeShortstr(w, msg.Exchange); err != nil { + return + } + if err = writeShortstr(w, msg.RoutingKey); err != nil { + return + } + + return +} + +func (msg *basicDeliver) read(r io.Reader) (err error) { + var bits byte + + if msg.ConsumerTag, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &msg.DeliveryTag); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.Redelivered = (bits&(1<<0) > 0) + + if msg.Exchange, err = readShortstr(r); err != nil { + return + } + if msg.RoutingKey, err = readShortstr(r); err != nil { + return + } + + return +} + +type basicGet struct { + reserved1 uint16 + Queue string + NoAck bool +} + +func (msg *basicGet) id() (uint16, uint16) { + return 60, 70 +} + +func (msg *basicGet) wait() bool { + return true +} + +func (msg *basicGet) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, msg.reserved1); err != nil { + return + } + + if err = writeShortstr(w, msg.Queue); err != nil { + return + } + + if msg.NoAck { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (msg *basicGet) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &msg.reserved1); err != nil { + return + } + + if msg.Queue, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.NoAck = (bits&(1<<0) > 0) + + return +} + +type basicGetOk struct { + DeliveryTag uint64 + Redelivered bool + Exchange string + RoutingKey string + MessageCount uint32 + Properties properties + Body []byte +} + +func (msg *basicGetOk) id() (uint16, uint16) { + return 60, 71 +} + +func (msg *basicGetOk) wait() bool { + return true +} + +func (msg *basicGetOk) getContent() (properties, []byte) { + return msg.Properties, msg.Body +} + +func (msg *basicGetOk) setContent(props properties, body []byte) { + msg.Properties, msg.Body = props, body +} + +func (msg *basicGetOk) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, msg.DeliveryTag); err != nil { + return + } + + if msg.Redelivered { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + if err = writeShortstr(w, msg.Exchange); err != nil { + return + } + if err = writeShortstr(w, msg.RoutingKey); err != nil { + return + } + + if err = binary.Write(w, binary.BigEndian, msg.MessageCount); err != nil { + return + } + + return +} + +func (msg *basicGetOk) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &msg.DeliveryTag); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.Redelivered = (bits&(1<<0) > 0) + + if msg.Exchange, err = readShortstr(r); err != nil { + return + } + if msg.RoutingKey, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &msg.MessageCount); err != nil { + return + } + + return +} + +type basicGetEmpty struct { + reserved1 string +} + +func (msg *basicGetEmpty) id() (uint16, uint16) { + return 60, 72 +} + +func (msg *basicGetEmpty) wait() bool { + return true +} + +func (msg *basicGetEmpty) write(w io.Writer) (err error) { + + if err = writeShortstr(w, msg.reserved1); err != nil { + return + } + + return +} + +func (msg *basicGetEmpty) read(r io.Reader) (err error) { + + if msg.reserved1, err = readShortstr(r); err != nil { + return + } + + return +} + +type basicAck struct { + DeliveryTag uint64 + Multiple bool +} + +func (msg *basicAck) id() (uint16, uint16) { + return 60, 80 +} + +func (msg *basicAck) wait() bool { + return false +} + +func (msg *basicAck) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, msg.DeliveryTag); err != nil { + return + } + + if msg.Multiple { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (msg *basicAck) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &msg.DeliveryTag); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.Multiple = (bits&(1<<0) > 0) + + return +} + +type basicReject struct { + DeliveryTag uint64 + Requeue bool +} + +func (msg *basicReject) id() (uint16, uint16) { + return 60, 90 +} + +func (msg *basicReject) wait() bool { + return false +} + +func (msg *basicReject) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, msg.DeliveryTag); err != nil { + return + } + + if msg.Requeue { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (msg *basicReject) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &msg.DeliveryTag); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.Requeue = (bits&(1<<0) > 0) + + return +} + +type basicRecoverAsync struct { + Requeue bool +} + +func (msg *basicRecoverAsync) id() (uint16, uint16) { + return 60, 100 +} + +func (msg *basicRecoverAsync) wait() bool { + return false +} + +func (msg *basicRecoverAsync) write(w io.Writer) (err error) { + var bits byte + + if msg.Requeue { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (msg *basicRecoverAsync) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.Requeue = (bits&(1<<0) > 0) + + return +} + +type basicRecover struct { + Requeue bool +} + +func (msg *basicRecover) id() (uint16, uint16) { + return 60, 110 +} + +func (msg *basicRecover) wait() bool { + return true +} + +func (msg *basicRecover) write(w io.Writer) (err error) { + var bits byte + + if msg.Requeue { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (msg *basicRecover) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.Requeue = (bits&(1<<0) > 0) + + return +} + +type basicRecoverOk struct { +} + +func (msg *basicRecoverOk) id() (uint16, uint16) { + return 60, 111 +} + +func (msg *basicRecoverOk) wait() bool { + return true +} + +func (msg *basicRecoverOk) write(w io.Writer) (err error) { + + return +} + +func (msg *basicRecoverOk) read(r io.Reader) (err error) { + + return +} + +type basicNack struct { + DeliveryTag uint64 + Multiple bool + Requeue bool +} + +func (msg *basicNack) id() (uint16, uint16) { + return 60, 120 +} + +func (msg *basicNack) wait() bool { + return false +} + +func (msg *basicNack) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, msg.DeliveryTag); err != nil { + return + } + + if msg.Multiple { + bits |= 1 << 0 + } + + if msg.Requeue { + bits |= 1 << 1 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (msg *basicNack) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &msg.DeliveryTag); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.Multiple = (bits&(1<<0) > 0) + msg.Requeue = (bits&(1<<1) > 0) + + return +} + +type txSelect struct { +} + +func (msg *txSelect) id() (uint16, uint16) { + return 90, 10 +} + +func (msg *txSelect) wait() bool { + return true +} + +func (msg *txSelect) write(w io.Writer) (err error) { + + return +} + +func (msg *txSelect) read(r io.Reader) (err error) { + + return +} + +type txSelectOk struct { +} + +func (msg *txSelectOk) id() (uint16, uint16) { + return 90, 11 +} + +func (msg *txSelectOk) wait() bool { + return true +} + +func (msg *txSelectOk) write(w io.Writer) (err error) { + + return +} + +func (msg *txSelectOk) read(r io.Reader) (err error) { + + return +} + +type txCommit struct { +} + +func (msg *txCommit) id() (uint16, uint16) { + return 90, 20 +} + +func (msg *txCommit) wait() bool { + return true +} + +func (msg *txCommit) write(w io.Writer) (err error) { + + return +} + +func (msg *txCommit) read(r io.Reader) (err error) { + + return +} + +type txCommitOk struct { +} + +func (msg *txCommitOk) id() (uint16, uint16) { + return 90, 21 +} + +func (msg *txCommitOk) wait() bool { + return true +} + +func (msg *txCommitOk) write(w io.Writer) (err error) { + + return +} + +func (msg *txCommitOk) read(r io.Reader) (err error) { + + return +} + +type txRollback struct { +} + +func (msg *txRollback) id() (uint16, uint16) { + return 90, 30 +} + +func (msg *txRollback) wait() bool { + return true +} + +func (msg *txRollback) write(w io.Writer) (err error) { + + return +} + +func (msg *txRollback) read(r io.Reader) (err error) { + + return +} + +type txRollbackOk struct { +} + +func (msg *txRollbackOk) id() (uint16, uint16) { + return 90, 31 +} + +func (msg *txRollbackOk) wait() bool { + return true +} + +func (msg *txRollbackOk) write(w io.Writer) (err error) { + + return +} + +func (msg *txRollbackOk) read(r io.Reader) (err error) { + + return +} + +type confirmSelect struct { + Nowait bool +} + +func (msg *confirmSelect) id() (uint16, uint16) { + return 85, 10 +} + +func (msg *confirmSelect) wait() bool { + return true +} + +func (msg *confirmSelect) write(w io.Writer) (err error) { + var bits byte + + if msg.Nowait { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (msg *confirmSelect) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.Nowait = (bits&(1<<0) > 0) + + return +} + +type confirmSelectOk struct { +} + +func (msg *confirmSelectOk) id() (uint16, uint16) { + return 85, 11 +} + +func (msg *confirmSelectOk) wait() bool { + return true +} + +func (msg *confirmSelectOk) write(w io.Writer) (err error) { + + return +} + +func (msg *confirmSelectOk) read(r io.Reader) (err error) { + + return +} + +func (r *reader) parseMethodFrame(channel uint16, size uint32) (f frame, err error) { + mf := &methodFrame{ + ChannelId: channel, + } + + if err = binary.Read(r.r, binary.BigEndian, &mf.ClassId); err != nil { + return + } + + if err = binary.Read(r.r, binary.BigEndian, &mf.MethodId); err != nil { + return + } + + switch mf.ClassId { + + case 10: // connection + switch mf.MethodId { + + case 10: // connection start + //fmt.Println("NextMethod: class:10 method:10") + method := &connectionStart{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 11: // connection start-ok + //fmt.Println("NextMethod: class:10 method:11") + method := &connectionStartOk{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 20: // connection secure + //fmt.Println("NextMethod: class:10 method:20") + method := &connectionSecure{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 21: // connection secure-ok + //fmt.Println("NextMethod: class:10 method:21") + method := &connectionSecureOk{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 30: // connection tune + //fmt.Println("NextMethod: class:10 method:30") + method := &connectionTune{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 31: // connection tune-ok + //fmt.Println("NextMethod: class:10 method:31") + method := &connectionTuneOk{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 40: // connection open + //fmt.Println("NextMethod: class:10 method:40") + method := &connectionOpen{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 41: // connection open-ok + //fmt.Println("NextMethod: class:10 method:41") + method := &connectionOpenOk{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 50: // connection close + //fmt.Println("NextMethod: class:10 method:50") + method := &connectionClose{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 51: // connection close-ok + //fmt.Println("NextMethod: class:10 method:51") + method := &connectionCloseOk{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 60: // connection blocked + //fmt.Println("NextMethod: class:10 method:60") + method := &connectionBlocked{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 61: // connection unblocked + //fmt.Println("NextMethod: class:10 method:61") + method := &connectionUnblocked{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + default: + return nil, fmt.Errorf("Bad method frame, unknown method %d for class %d", mf.MethodId, mf.ClassId) + } + + case 20: // channel + switch mf.MethodId { + + case 10: // channel open + //fmt.Println("NextMethod: class:20 method:10") + method := &channelOpen{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 11: // channel open-ok + //fmt.Println("NextMethod: class:20 method:11") + method := &channelOpenOk{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 20: // channel flow + //fmt.Println("NextMethod: class:20 method:20") + method := &channelFlow{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 21: // channel flow-ok + //fmt.Println("NextMethod: class:20 method:21") + method := &channelFlowOk{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 40: // channel close + //fmt.Println("NextMethod: class:20 method:40") + method := &channelClose{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 41: // channel close-ok + //fmt.Println("NextMethod: class:20 method:41") + method := &channelCloseOk{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + default: + return nil, fmt.Errorf("Bad method frame, unknown method %d for class %d", mf.MethodId, mf.ClassId) + } + + case 40: // exchange + switch mf.MethodId { + + case 10: // exchange declare + //fmt.Println("NextMethod: class:40 method:10") + method := &exchangeDeclare{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 11: // exchange declare-ok + //fmt.Println("NextMethod: class:40 method:11") + method := &exchangeDeclareOk{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 20: // exchange delete + //fmt.Println("NextMethod: class:40 method:20") + method := &exchangeDelete{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 21: // exchange delete-ok + //fmt.Println("NextMethod: class:40 method:21") + method := &exchangeDeleteOk{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 30: // exchange bind + //fmt.Println("NextMethod: class:40 method:30") + method := &exchangeBind{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 31: // exchange bind-ok + //fmt.Println("NextMethod: class:40 method:31") + method := &exchangeBindOk{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 40: // exchange unbind + //fmt.Println("NextMethod: class:40 method:40") + method := &exchangeUnbind{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 51: // exchange unbind-ok + //fmt.Println("NextMethod: class:40 method:51") + method := &exchangeUnbindOk{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + default: + return nil, fmt.Errorf("Bad method frame, unknown method %d for class %d", mf.MethodId, mf.ClassId) + } + + case 50: // queue + switch mf.MethodId { + + case 10: // queue declare + //fmt.Println("NextMethod: class:50 method:10") + method := &queueDeclare{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 11: // queue declare-ok + //fmt.Println("NextMethod: class:50 method:11") + method := &queueDeclareOk{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 20: // queue bind + //fmt.Println("NextMethod: class:50 method:20") + method := &queueBind{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 21: // queue bind-ok + //fmt.Println("NextMethod: class:50 method:21") + method := &queueBindOk{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 50: // queue unbind + //fmt.Println("NextMethod: class:50 method:50") + method := &queueUnbind{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 51: // queue unbind-ok + //fmt.Println("NextMethod: class:50 method:51") + method := &queueUnbindOk{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 30: // queue purge + //fmt.Println("NextMethod: class:50 method:30") + method := &queuePurge{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 31: // queue purge-ok + //fmt.Println("NextMethod: class:50 method:31") + method := &queuePurgeOk{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 40: // queue delete + //fmt.Println("NextMethod: class:50 method:40") + method := &queueDelete{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 41: // queue delete-ok + //fmt.Println("NextMethod: class:50 method:41") + method := &queueDeleteOk{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + default: + return nil, fmt.Errorf("Bad method frame, unknown method %d for class %d", mf.MethodId, mf.ClassId) + } + + case 60: // basic + switch mf.MethodId { + + case 10: // basic qos + //fmt.Println("NextMethod: class:60 method:10") + method := &basicQos{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 11: // basic qos-ok + //fmt.Println("NextMethod: class:60 method:11") + method := &basicQosOk{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 20: // basic consume + //fmt.Println("NextMethod: class:60 method:20") + method := &basicConsume{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 21: // basic consume-ok + //fmt.Println("NextMethod: class:60 method:21") + method := &basicConsumeOk{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 30: // basic cancel + //fmt.Println("NextMethod: class:60 method:30") + method := &basicCancel{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 31: // basic cancel-ok + //fmt.Println("NextMethod: class:60 method:31") + method := &basicCancelOk{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 40: // basic publish + //fmt.Println("NextMethod: class:60 method:40") + method := &basicPublish{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 50: // basic return + //fmt.Println("NextMethod: class:60 method:50") + method := &basicReturn{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 60: // basic deliver + //fmt.Println("NextMethod: class:60 method:60") + method := &basicDeliver{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 70: // basic get + //fmt.Println("NextMethod: class:60 method:70") + method := &basicGet{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 71: // basic get-ok + //fmt.Println("NextMethod: class:60 method:71") + method := &basicGetOk{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 72: // basic get-empty + //fmt.Println("NextMethod: class:60 method:72") + method := &basicGetEmpty{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 80: // basic ack + //fmt.Println("NextMethod: class:60 method:80") + method := &basicAck{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 90: // basic reject + //fmt.Println("NextMethod: class:60 method:90") + method := &basicReject{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 100: // basic recover-async + //fmt.Println("NextMethod: class:60 method:100") + method := &basicRecoverAsync{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 110: // basic recover + //fmt.Println("NextMethod: class:60 method:110") + method := &basicRecover{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 111: // basic recover-ok + //fmt.Println("NextMethod: class:60 method:111") + method := &basicRecoverOk{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 120: // basic nack + //fmt.Println("NextMethod: class:60 method:120") + method := &basicNack{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + default: + return nil, fmt.Errorf("Bad method frame, unknown method %d for class %d", mf.MethodId, mf.ClassId) + } + + case 90: // tx + switch mf.MethodId { + + case 10: // tx select + //fmt.Println("NextMethod: class:90 method:10") + method := &txSelect{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 11: // tx select-ok + //fmt.Println("NextMethod: class:90 method:11") + method := &txSelectOk{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 20: // tx commit + //fmt.Println("NextMethod: class:90 method:20") + method := &txCommit{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 21: // tx commit-ok + //fmt.Println("NextMethod: class:90 method:21") + method := &txCommitOk{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 30: // tx rollback + //fmt.Println("NextMethod: class:90 method:30") + method := &txRollback{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 31: // tx rollback-ok + //fmt.Println("NextMethod: class:90 method:31") + method := &txRollbackOk{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + default: + return nil, fmt.Errorf("Bad method frame, unknown method %d for class %d", mf.MethodId, mf.ClassId) + } + + case 85: // confirm + switch mf.MethodId { + + case 10: // confirm select + //fmt.Println("NextMethod: class:85 method:10") + method := &confirmSelect{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 11: // confirm select-ok + //fmt.Println("NextMethod: class:85 method:11") + method := &confirmSelectOk{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + default: + return nil, fmt.Errorf("Bad method frame, unknown method %d for class %d", mf.MethodId, mf.ClassId) + } + + default: + return nil, fmt.Errorf("Bad method frame, unknown class %d", mf.ClassId) + } + + return mf, nil +} diff --git a/vendor/github.com/streadway/amqp/tls_test.go b/vendor/github.com/streadway/amqp/tls_test.go new file mode 100644 index 0000000..f6021c9 --- /dev/null +++ b/vendor/github.com/streadway/amqp/tls_test.go @@ -0,0 +1,223 @@ +package amqp_test + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "io" + "net" + "testing" + "time" + + "github.com/streadway/amqp" +) + +type tlsServer struct { + net.Listener + URL string + Config *tls.Config + Header chan []byte +} + +// Captures the header for each accepted connection +func (s *tlsServer) Serve() { + for { + c, err := s.Accept() + if err != nil { + return + } + + header := make([]byte, 4) + io.ReadFull(c, header) + s.Header <- header + c.Write([]byte{'A', 'M', 'Q', 'P', 0, 0, 0, 0}) + c.Close() + } +} + +func tlsConfig() *tls.Config { + cfg := new(tls.Config) + + cfg.ClientCAs = x509.NewCertPool() + cfg.ClientCAs.AppendCertsFromPEM([]byte(caCert)) + + cert, err := tls.X509KeyPair([]byte(serverCert), []byte(serverKey)) + if err != nil { + panic(err) + } + + cfg.Certificates = append(cfg.Certificates, cert) + cfg.ClientAuth = tls.RequireAndVerifyClientCert + + return cfg +} + +func startTlsServer() tlsServer { + cfg := tlsConfig() + + l, err := tls.Listen("tcp", "127.0.0.1:0", cfg) + if err != nil { + panic(err) + } + + s := tlsServer{ + Listener: l, + Config: cfg, + URL: fmt.Sprintf("amqps://%s/", l.Addr().String()), + Header: make(chan []byte, 1), + } + + go s.Serve() + return s +} + +// Tests that the server has handshaked the connection and seen the client +// protocol announcement. Does not nest that the connection.open is successful. +func TestTLSHandshake(t *testing.T) { + srv := startTlsServer() + defer srv.Close() + + cfg := new(tls.Config) + cfg.RootCAs = x509.NewCertPool() + cfg.RootCAs.AppendCertsFromPEM([]byte(caCert)) + + cert, _ := tls.X509KeyPair([]byte(clientCert), []byte(clientKey)) + cfg.Certificates = append(cfg.Certificates, cert) + + c, err := amqp.DialTLS(srv.URL, cfg) + + select { + case <-time.After(10 * time.Millisecond): + t.Fatalf("did not succeed to handshake the TLS connection after 10ms") + case header := <-srv.Header: + if string(header) != "AMQP" { + t.Fatalf("expected to handshake a TLS connection, got err: %v", err) + } + } + + if st := c.ConnectionState(); !st.HandshakeComplete { + t.Errorf("TLS handshake failed, TLS connection state: %+v", st) + } +} + +const caCert = ` +-----BEGIN CERTIFICATE----- +MIICxjCCAa6gAwIBAgIJANWuMWMQSxvdMA0GCSqGSIb3DQEBBQUAMBMxETAPBgNV +BAMTCE15VGVzdENBMB4XDTE0MDEyNzE5NTIyMloXDTI0MDEyNTE5NTIyMlowEzER +MA8GA1UEAxMITXlUZXN0Q0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB +AQDBsIrkW4ob9Z/gzR2/Maa2stbutry6/vvz8eiJwIKIbaHGwqtFOUGiWeKw7H76 +IH3SjTAhNQY2hoKPyH41D36sDJkYBRyHFJTK/6ffvOhpyLnuXJAnoS62eKPSNUAx +5i/lkHj42ESutYAH9qbHCI/gBm9G4WmhGAyA16xzC1n07JObl6KFoY1PqHKl823z +mvF47I24DzemEfjdwC9nAAX/pGYOg9FA9nQv7NnhlsJMxueCx55RNU1ADRoqsbfE +T0CQTOT4ryugGrUp9J4Cwen6YbXZrS6+Kff5SQCAns0Qu8/bwj0DKkuBGLF+Mnwe +mq9bMzyZPUrPM3Gu48ao8YAfAgMBAAGjHTAbMAwGA1UdEwQFMAMBAf8wCwYDVR0P +BAQDAgEGMA0GCSqGSIb3DQEBBQUAA4IBAQCBwXGblRxIEOlEP6ANZ1C8AHWyG8lR +CQduFclc0tmyCCz5fnyLK0aGu9LhXXe6/HSKqgs4mJqeqYOojdjkfOme/YdwDzjK +WIf0kRYQHcB6NeyEZwW8C7subTP1Xw6zbAmjvQrtCGvRM+fi3/cs1sSSkd/EoRk4 +7GM9qQl/JIIoCOGncninf2NQm5YSpbit6/mOQD7EhqXsw+bX+IRh3DHC1Apv/PoA +HlDNeM4vjWaBxsmvRSndrIvew1czboFM18oRSSIqAkU7dKZ0SbC11grzmNxMG2aD +f9y8FIG6RK/SEaOZuc+uBGXx7tj7dczpE/2puqYcaVGwcv4kkrC/ZuRm +-----END CERTIFICATE----- +` + +const serverCert = ` +-----BEGIN CERTIFICATE----- +MIIC8zCCAdugAwIBAgIBATANBgkqhkiG9w0BAQUFADATMREwDwYDVQQDEwhNeVRl +c3RDQTAeFw0xNDAxMjcxOTUyMjNaFw0yNDAxMjUxOTUyMjNaMCUxEjAQBgNVBAMT +CTEyNy4wLjAuMTEPMA0GA1UEChMGc2VydmVyMIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEAxYAKbeGyg0gP0xwVsZsufzk/SUCtD44Gp3lQYQ9QumQ1IVZu +PmZWwPWrzI93a1Abruz6ZhXaB3jcL5QPAy1N44IiFgVN45CZXBsqkpJe/abzRFOV +DRnHxattPDHdgwML5d3nURKGUM/7+ACj5E4pZEDlM3RIjIKVd+doJsL7n6myO8FE +tIpt4vTz1MFp3F+ntPnHU3BZ/VZ1UjSlFWnCjT0CR0tnXsPmlIaC98HThS8x5zNB +fvvSN+Zln8RWdNLnEVHVdqYtOQ828QbCx8s1HfClGgaVoSDrzz+qQgtZFO4wW264 +2CWkNd8DSJUJ/HlPNXmbXsrRMgvGaL7YUz2yRQIDAQABo0AwPjAJBgNVHRMEAjAA +MAsGA1UdDwQEAwIFIDATBgNVHSUEDDAKBggrBgEFBQcDATAPBgNVHREECDAGhwR/ +AAABMA0GCSqGSIb3DQEBBQUAA4IBAQAE2g+wAFf9Xg5svcnb7+mfseYV16k9l5WG +onrmR3FLsbTxfbr4PZJMHrswPbi2NRk0+ETPUpcv1RP7pUB7wSEvuS1NPGcU92iP +58ycP3dYtLzmuu6BkgToZqwsCU8fC2zM0wt3+ifzPpDMffWWOioVuA3zdM9WPQYz ++Ofajd0XaZwFZS8uTI5WXgObz7Xqfmln4tF3Sq1CTyuJ44qK4p83XOKFq+L04aD0 +d0c8w3YQNUENny/vMP9mDu3FQ3SnDz2GKl1LSjGe2TUnkoMkDfdk4wSzndTz/ecb +QiCPKijwVPWNOWV3NDE2edMxDPxDoKoEm5F4UGfGjxSRnYCIoZLh +-----END CERTIFICATE----- +` + +const serverKey = ` +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAxYAKbeGyg0gP0xwVsZsufzk/SUCtD44Gp3lQYQ9QumQ1IVZu +PmZWwPWrzI93a1Abruz6ZhXaB3jcL5QPAy1N44IiFgVN45CZXBsqkpJe/abzRFOV +DRnHxattPDHdgwML5d3nURKGUM/7+ACj5E4pZEDlM3RIjIKVd+doJsL7n6myO8FE +tIpt4vTz1MFp3F+ntPnHU3BZ/VZ1UjSlFWnCjT0CR0tnXsPmlIaC98HThS8x5zNB +fvvSN+Zln8RWdNLnEVHVdqYtOQ828QbCx8s1HfClGgaVoSDrzz+qQgtZFO4wW264 +2CWkNd8DSJUJ/HlPNXmbXsrRMgvGaL7YUz2yRQIDAQABAoIBAGsyEvcPAGg3DbfE +z5WFp9gPx2TIAOanbL8rnlAAEw4H47qDgfTGcSHsdeHioKuTYGMyZrpP8/YISGJe +l0NfLJ5mfH+9Q0hXrJWMfS/u2DYOjo0wXH8u1fpZEEISwqsgVS3fonSjfFmSea1j +E5GQRvEONBkYbWQuYFgjNqmLPS2r5lKbWCQvc1MB/vvVBwOTiO0ON7m/EkM5RKt9 +cDT5ZhhVjBpdmd9HpVbKTdBj8Q0l5/ZHZUEgZA6FDZEwYxTd9l87Z4YT+5SR0z9t +k8/Z0CHd3x3Rv891t7m66ZJkaOda8NC65/432MQEQwJltmrKnc22dS8yI26rrmpp +g3tcbSUCgYEA5nMXdQKS4vF+Kp10l/HqvGz2sU8qQaWYZQIg7Th3QJPo6N52po/s +nn3UF0P5mT1laeZ5ZQJKx4gnmuPnIZ2ZtJQDyFhIbRPcZ+2hSNSuLYVcrumOC3EP +3OZyFtFE1THO73aFe5e1jEdtoOne3Bds/Hq6NF45fkVdL+M9e8pfXIsCgYEA22W8 +zGjbWyrFOYvKknMQVtHnMx8BJEtsvWRknP6CWAv/8WyeZpE128Pve1m441AQnopS +CuOF5wFK0iUXBFbS3Pe1/1j3em6yfVznuUHqJ7Qc+dNzxVvkTK8jGB6x+vm+M9Hg +muHUM726IUxckoSNXbPNAVPIZab1NdSxam7F9m8CgYEAx55QZmIJXJ41XLKxqWC7 +peZ5NpPNlbncrTpPzUzJN94ntXfmrVckbxGt401VayEctMQYyZ9XqUlOjUP3FU5Q +M3S3Zhba/eljVX8o406fZf0MkNLs4QpZ5E6V6x/xEP+pMhKng6yhbVb+JpIPIvUD +yhyBKRWplbB+DRo5Sv685gsCgYA7l5m9h+m1DJv/cnn2Z2yTuHXtC8namuYRV1iA +0ByFX9UINXGc+GpBpCnDPm6ax5+MAJQiQwSW52H0TIDA+/hQbrQvhHHL/o9av8Zt +Kns4h5KrRQUYIUqUjamhnozHV9iS6LnyN87Usv8AlmY6oehoADN53dD702qdUYVT +HH2G3wKBgCdvqyw78FR/n8cUWesTPnxx5HCeWJ1J+2BESnUnPmKZ71CV1H7uweja +vPUxuuuGLKfNx84OKCfRDbtOgMOeyh9T1RmXry6Srz/7/udjlF0qmFiRXfBNAgoR +tNb0+Ri/vY0AHrQ7UnCbl12qPVaqhEXLr+kCGNEPFqpMJPPEeMK0 +-----END RSA PRIVATE KEY----- +` + +const clientCert = ` +-----BEGIN CERTIFICATE----- +MIIC4jCCAcqgAwIBAgIBAjANBgkqhkiG9w0BAQUFADATMREwDwYDVQQDEwhNeVRl +c3RDQTAeFw0xNDAxMjcxOTUyMjNaFw0yNDAxMjUxOTUyMjNaMCUxEjAQBgNVBAMT +CTEyNy4wLjAuMTEPMA0GA1UEChMGY2xpZW50MIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEAu7LMqd+agoH168Bsi0WJ36ulYqDypq+GZPF7uWOo2pE0raKH +B++31/hjnkt6yC5kLKVZZ0EfolBa9q4Cy6swfGaEMafy44ZCRneLnt1azL1N6Kfz ++U0KsOqyQDoMxYJG1gVTEZN19/U/ew2eazcxKyERI3oGCQ4SbpkxBTbfxtAFk49e +xIB3obsuMVUrmtXE4FkUkvG7NgpPUgrhp0yxYpj9zruZGzGGT1zNhcarbQ/4i7It +ZMbnv6pqQWtYDgnGX2TDRcEiXGeO+KrzhfpTRLfO3K4np8e8cmTyXM+4lMlWUgma +KrRdu1QXozGqRs47u2prGKGdSQWITpqNVCY8fQIDAQABoy8wLTAJBgNVHRMEAjAA +MAsGA1UdDwQEAwIHgDATBgNVHSUEDDAKBggrBgEFBQcDAjANBgkqhkiG9w0BAQUF +AAOCAQEAhCuBCLznPc4O96hT3P8Fx19L3ltrWbc/pWrx8JjxUaGk8kNmjMjY+/Mt +JBbjUBx2kJwaY0EHMAfw7D1f1wcCeNycx/0dyb0E6xzhmPw5fY15GGNg8rzWwqSY ++i/1iqU0IRkmRHV7XCF+trd2H0Ec+V1Fd/61E2ccJfOL5aSAyWbMCUtWxS3QMnqH +FBfKdVEiY9WNht5hnvsXQBRaNhowJ6Cwa7/1/LZjmhcXiJ0xrc1Hggj3cvS+4vll +Ew+20a0tPKjD/v/2oSQL+qkeYKV4fhCGkaBHCpPlSJrqorb7B6NmPy3nS26ETKE/ +o2UCfZc5g2MU1ENa31kT1iuhKZapsA== +-----END CERTIFICATE----- +` + +const clientKey = ` +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAu7LMqd+agoH168Bsi0WJ36ulYqDypq+GZPF7uWOo2pE0raKH +B++31/hjnkt6yC5kLKVZZ0EfolBa9q4Cy6swfGaEMafy44ZCRneLnt1azL1N6Kfz ++U0KsOqyQDoMxYJG1gVTEZN19/U/ew2eazcxKyERI3oGCQ4SbpkxBTbfxtAFk49e +xIB3obsuMVUrmtXE4FkUkvG7NgpPUgrhp0yxYpj9zruZGzGGT1zNhcarbQ/4i7It +ZMbnv6pqQWtYDgnGX2TDRcEiXGeO+KrzhfpTRLfO3K4np8e8cmTyXM+4lMlWUgma +KrRdu1QXozGqRs47u2prGKGdSQWITpqNVCY8fQIDAQABAoIBAGSEn3hFyEAmCyYi +2b5IEksXaC2GlgxQKb/7Vs/0oCPU6YonZPsKFMFzQx4tu+ZiecEzF8rlJGTPdbdv +fw3FcuTcHeVd1QSmDO4h7UK5tnu40XVMJKsY6CXQun8M13QajYbmORNLjjypOULU +C0fNueYoAj6mhX7p61MRdSAev/5+0+bVQQG/tSVDQzdngvKpaCunOphiB2VW2Aa0 +7aYPOFCoPB2uo0DwUmBB0yfx9x4hXX9ovQI0YFou7bq6iYJ0vlZBvYQ9YrVdxjKL +avcz1N5xM3WFAkZJSVT/Ho5+uTbZx4RrJ8b5T+t2spOKmXyAjwS2rL/XMAh8YRZ1 +u44duoECgYEA4jpK2qshgQ0t49rjVHEDKX5x7ElEZefl0rHZ/2X/uHUDKpKj2fTq +3TQzHquiQ4Aof7OEB9UE3DGrtpvo/j/PYxL5Luu5VR4AIEJm+CA8GYuE96+uIL0Z +M2r3Lux6Bp30Z47Eit2KiY4fhrWs59WB3NHHoFxgzHSVbnuA02gcX2ECgYEA1GZw +iXIVYaK07ED+q/0ObyS5hD1cMhJ7ifSN9BxuG0qUpSigbkTGj09fUDS4Fqsz9dvz +F0P93fZvyia242TIfDUwJEsDQCgHk7SGa4Rx/p/3x/obIEERk7K76Hdg93U5NXhV +NvczvgL0HYxnb+qtumwMgGPzncB4lGcTnRyOfp0CgYBTIsDnYwRI/KLknUf1fCKB +WSpcfwBXwsS+jQVjygQTsUyclI8KResZp1kx6DkVPT+kzj+y8SF8GfTUgq844BJC +gnJ4P8A3+3JoaH6WqKHtcUxICZOgDF36e1CjOdwOGnX6qIipz4hdzJDhXFpSSDAV +CjKmR8x61k0j8NcC2buzgQKBgFr7eo9VwBTvpoJhIPY5UvqHB7S+uAR26FZi3H/J +wdyM6PmKWpaBfXCb9l8cBhMnyP0y94FqzY9L5fz48nSbkkmqWvHg9AaCXySFOuNJ +e68vhOszlnUNimLzOAzPPkkh/JyL7Cy8XXyyNTGHGDPXmg12BTDmH8/eR4iCUuOE +/QD9AoGBALQ/SkvfO3D5+k9e/aTHRuMJ0+PWdLUMTZ39oJQxUx+qj7/xpjDvWTBn +eDmF/wjnIAg+020oXyBYo6plEZfDz3EYJQZ+3kLLEU+O/A7VxCakPYPwCr7N/InL +Ccg/TVSIXxw/6uJnojoAjMIEU45NoP6RMp0mWYYb2OlteEv08Ovp +-----END RSA PRIVATE KEY----- +` diff --git a/vendor/github.com/streadway/amqp/types.go b/vendor/github.com/streadway/amqp/types.go new file mode 100644 index 0000000..d3ece70 --- /dev/null +++ b/vendor/github.com/streadway/amqp/types.go @@ -0,0 +1,428 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +package amqp + +import ( + "fmt" + "io" + "time" +) + +// Constants for standard AMQP 0-9-1 exchange types. +const ( + ExchangeDirect = "direct" + ExchangeFanout = "fanout" + ExchangeTopic = "topic" + ExchangeHeaders = "headers" +) + +var ( + // ErrClosed is returned when the channel or connection is not open + ErrClosed = &Error{Code: ChannelError, Reason: "channel/connection is not open"} + + // ErrChannelMax is returned when Connection.Channel has been called enough + // times that all channel IDs have been exhausted in the client or the + // server. + ErrChannelMax = &Error{Code: ChannelError, Reason: "channel id space exhausted"} + + // ErrSASL is returned from Dial when the authentication mechanism could not + // be negoated. + ErrSASL = &Error{Code: AccessRefused, Reason: "SASL could not negotiate a shared mechanism"} + + // ErrCredentials is returned when the authenticated client is not authorized + // to any vhost. + ErrCredentials = &Error{Code: AccessRefused, Reason: "username or password not allowed"} + + // ErrVhost is returned when the authenticated user is not permitted to + // access the requested Vhost. + ErrVhost = &Error{Code: AccessRefused, Reason: "no access to this vhost"} + + // ErrSyntax is hard protocol error, indicating an unsupported protocol, + // implementation or encoding. + ErrSyntax = &Error{Code: SyntaxError, Reason: "invalid field or value inside of a frame"} + + // ErrFrame is returned when the protocol frame cannot be read from the + // server, indicating an unsupported protocol or unsupported frame type. + ErrFrame = &Error{Code: FrameError, Reason: "frame could not be parsed"} + + // ErrCommandInvalid is returned when the server sends an unexpected response + // to this requested message type. This indicates a bug in this client. + ErrCommandInvalid = &Error{Code: CommandInvalid, Reason: "unexpected command received"} + + // ErrUnexpectedFrame is returned when something other than a method or + // heartbeat frame is delivered to the Connection, indicating a bug in the + // client. + ErrUnexpectedFrame = &Error{Code: UnexpectedFrame, Reason: "unexpected frame received"} + + // ErrFieldType is returned when writing a message containing a Go type unsupported by AMQP. + ErrFieldType = &Error{Code: SyntaxError, Reason: "unsupported table field type"} +) + +// Error captures the code and reason a channel or connection has been closed +// by the server. +type Error struct { + Code int // constant code from the specification + Reason string // description of the error + Server bool // true when initiated from the server, false when from this library + Recover bool // true when this error can be recovered by retrying later or with different parameters +} + +func newError(code uint16, text string) *Error { + return &Error{ + Code: int(code), + Reason: text, + Recover: isSoftExceptionCode(int(code)), + Server: true, + } +} + +func (e Error) Error() string { + return fmt.Sprintf("Exception (%d) Reason: %q", e.Code, e.Reason) +} + +// Used by header frames to capture routing and header information +type properties struct { + ContentType string // MIME content type + ContentEncoding string // MIME content encoding + Headers Table // Application or header exchange table + DeliveryMode uint8 // queue implementation use - Transient (1) or Persistent (2) + Priority uint8 // queue implementation use - 0 to 9 + CorrelationId string // application use - correlation identifier + ReplyTo string // application use - address to to reply to (ex: RPC) + Expiration string // implementation use - message expiration spec + MessageId string // application use - message identifier + Timestamp time.Time // application use - message timestamp + Type string // application use - message type name + UserId string // application use - creating user id + AppId string // application use - creating application + reserved1 string // was cluster-id - process for buffer consumption +} + +// DeliveryMode. Transient means higher throughput but messages will not be +// restored on broker restart. The delivery mode of publishings is unrelated +// to the durability of the queues they reside on. Transient messages will +// not be restored to durable queues, persistent messages will be restored to +// durable queues and lost on non-durable queues during server restart. +// +// This remains typed as uint8 to match Publishing.DeliveryMode. Other +// delivery modes specific to custom queue implementations are not enumerated +// here. +const ( + Transient uint8 = 1 + Persistent uint8 = 2 +) + +// The property flags are an array of bits that indicate the presence or +// absence of each property value in sequence. The bits are ordered from most +// high to low - bit 15 indicates the first property. +const ( + flagContentType = 0x8000 + flagContentEncoding = 0x4000 + flagHeaders = 0x2000 + flagDeliveryMode = 0x1000 + flagPriority = 0x0800 + flagCorrelationId = 0x0400 + flagReplyTo = 0x0200 + flagExpiration = 0x0100 + flagMessageId = 0x0080 + flagTimestamp = 0x0040 + flagType = 0x0020 + flagUserId = 0x0010 + flagAppId = 0x0008 + flagReserved1 = 0x0004 +) + +// Queue captures the current server state of the queue on the server returned +// from Channel.QueueDeclare or Channel.QueueInspect. +type Queue struct { + Name string // server confirmed or generated name + Messages int // count of messages not awaiting acknowledgment + Consumers int // number of consumers receiving deliveries +} + +// Publishing captures the client message sent to the server. The fields +// outside of the Headers table included in this struct mirror the underlying +// fields in the content frame. They use native types for convenience and +// efficiency. +type Publishing struct { + // Application or exchange specific fields, + // the headers exchange will inspect this field. + Headers Table + + // Properties + ContentType string // MIME content type + ContentEncoding string // MIME content encoding + DeliveryMode uint8 // Transient (0 or 1) or Persistent (2) + Priority uint8 // 0 to 9 + CorrelationId string // correlation identifier + ReplyTo string // address to to reply to (ex: RPC) + Expiration string // message expiration spec + MessageId string // message identifier + Timestamp time.Time // message timestamp + Type string // message type name + UserId string // creating user id - ex: "guest" + AppId string // creating application id + + // The application specific payload of the message + Body []byte +} + +// Blocking notifies the server's TCP flow control of the Connection. When a +// server hits a memory or disk alarm it will block all connections until the +// resources are reclaimed. Use NotifyBlock on the Connection to receive these +// events. +type Blocking struct { + Active bool // TCP pushback active/inactive on server + Reason string // Server reason for activation +} + +// Confirmation notifies the acknowledgment or negative acknowledgement of a +// publishing identified by its delivery tag. Use NotifyPublish on the Channel +// to consume these events. +type Confirmation struct { + DeliveryTag uint64 // A 1 based counter of publishings from when the channel was put in Confirm mode + Ack bool // True when the server successfully received the publishing +} + +// Decimal matches the AMQP decimal type. Scale is the number of decimal +// digits Scale == 2, Value == 12345, Decimal == 123.45 +type Decimal struct { + Scale uint8 + Value int32 +} + +// Table stores user supplied fields of the following types: +// +// bool +// byte +// float32 +// float64 +// int +// int16 +// int32 +// int64 +// nil +// string +// time.Time +// amqp.Decimal +// amqp.Table +// []byte +// []interface{} - containing above types +// +// Functions taking a table will immediately fail when the table contains a +// value of an unsupported type. +// +// The caller must be specific in which precision of integer it wishes to +// encode. +// +// Use a type assertion when reading values from a table for type conversion. +// +// RabbitMQ expects int32 for integer values. +// +type Table map[string]interface{} + +func validateField(f interface{}) error { + switch fv := f.(type) { + case nil, bool, byte, int, int16, int32, int64, float32, float64, string, []byte, Decimal, time.Time: + return nil + + case []interface{}: + for _, v := range fv { + if err := validateField(v); err != nil { + return fmt.Errorf("in array %s", err) + } + } + return nil + + case Table: + for k, v := range fv { + if err := validateField(v); err != nil { + return fmt.Errorf("table field %q %s", k, err) + } + } + return nil + } + + return fmt.Errorf("value %t not supported", f) +} + +// Validate returns and error if any Go types in the table are incompatible with AMQP types. +func (t Table) Validate() error { + return validateField(t) +} + +// Heap interface for maintaining delivery tags +type tagSet []uint64 + +func (set tagSet) Len() int { return len(set) } +func (set tagSet) Less(i, j int) bool { return (set)[i] < (set)[j] } +func (set tagSet) Swap(i, j int) { (set)[i], (set)[j] = (set)[j], (set)[i] } +func (set *tagSet) Push(tag interface{}) { *set = append(*set, tag.(uint64)) } +func (set *tagSet) Pop() interface{} { + val := (*set)[len(*set)-1] + *set = (*set)[:len(*set)-1] + return val +} + +type message interface { + id() (uint16, uint16) + wait() bool + read(io.Reader) error + write(io.Writer) error +} + +type messageWithContent interface { + message + getContent() (properties, []byte) + setContent(properties, []byte) +} + +/* +The base interface implemented as: + +2.3.5 frame Details + +All frames consist of a header (7 octets), a payload of arbitrary size, and a 'frame-end' octet that detects +malformed frames: + + 0 1 3 7 size+7 size+8 + +------+---------+-------------+ +------------+ +-----------+ + | type | channel | size | | payload | | frame-end | + +------+---------+-------------+ +------------+ +-----------+ + octet short long size octets octet + +To read a frame, we: + + 1. Read the header and check the frame type and channel. + 2. Depending on the frame type, we read the payload and process it. + 3. Read the frame end octet. + +In realistic implementations where performance is a concern, we would use +“read-ahead buffering” or “gathering reads” to avoid doing three separate +system calls to read a frame. + +*/ +type frame interface { + write(io.Writer) error + channel() uint16 +} + +type reader struct { + r io.Reader +} + +type writer struct { + w io.Writer +} + +// Implements the frame interface for Connection RPC +type protocolHeader struct{} + +func (protocolHeader) write(w io.Writer) error { + _, err := w.Write([]byte{'A', 'M', 'Q', 'P', 0, 0, 9, 1}) + return err +} + +func (protocolHeader) channel() uint16 { + panic("only valid as initial handshake") +} + +/* +Method frames carry the high-level protocol commands (which we call "methods"). +One method frame carries one command. The method frame payload has this format: + + 0 2 4 + +----------+-----------+-------------- - - + | class-id | method-id | arguments... + +----------+-----------+-------------- - - + short short ... + +To process a method frame, we: + 1. Read the method frame payload. + 2. Unpack it into a structure. A given method always has the same structure, + so we can unpack the method rapidly. 3. Check that the method is allowed in + the current context. + 4. Check that the method arguments are valid. + 5. Execute the method. + +Method frame bodies are constructed as a list of AMQP data fields (bits, +integers, strings and string tables). The marshalling code is trivially +generated directly from the protocol specifications, and can be very rapid. +*/ +type methodFrame struct { + ChannelId uint16 + ClassId uint16 + MethodId uint16 + Method message +} + +func (f *methodFrame) channel() uint16 { return f.ChannelId } + +/* +Heartbeating is a technique designed to undo one of TCP/IP's features, namely +its ability to recover from a broken physical connection by closing only after +a quite long time-out. In some scenarios we need to know very rapidly if a +peer is disconnected or not responding for other reasons (e.g. it is looping). +Since heartbeating can be done at a low level, we implement this as a special +type of frame that peers exchange at the transport level, rather than as a +class method. +*/ +type heartbeatFrame struct { + ChannelId uint16 +} + +func (f *heartbeatFrame) channel() uint16 { return f.ChannelId } + +/* +Certain methods (such as Basic.Publish, Basic.Deliver, etc.) are formally +defined as carrying content. When a peer sends such a method frame, it always +follows it with a content header and zero or more content body frames. + +A content header frame has this format: + + 0 2 4 12 14 + +----------+--------+-----------+----------------+------------- - - + | class-id | weight | body size | property flags | property list... + +----------+--------+-----------+----------------+------------- - - + short short long long short remainder... + +We place content body in distinct frames (rather than including it in the +method) so that AMQP may support "zero copy" techniques in which content is +never marshalled or encoded. We place the content properties in their own +frame so that recipients can selectively discard contents they do not want to +process +*/ +type headerFrame struct { + ChannelId uint16 + ClassId uint16 + weight uint16 + Size uint64 + Properties properties +} + +func (f *headerFrame) channel() uint16 { return f.ChannelId } + +/* +Content is the application data we carry from client-to-client via the AMQP +server. Content is, roughly speaking, a set of properties plus a binary data +part. The set of allowed properties are defined by the Basic class, and these +form the "content header frame". The data can be any size, and MAY be broken +into several (or many) chunks, each forming a "content body frame". + +Looking at the frames for a specific channel, as they pass on the wire, we +might see something like this: + + [method] + [method] [header] [body] [body] + [method] + ... +*/ +type bodyFrame struct { + ChannelId uint16 + Body []byte +} + +func (f *bodyFrame) channel() uint16 { return f.ChannelId } diff --git a/vendor/github.com/streadway/amqp/uri.go b/vendor/github.com/streadway/amqp/uri.go new file mode 100644 index 0000000..e584715 --- /dev/null +++ b/vendor/github.com/streadway/amqp/uri.go @@ -0,0 +1,176 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +package amqp + +import ( + "errors" + "net" + "net/url" + "strconv" + "strings" +) + +var errURIScheme = errors.New("AMQP scheme must be either 'amqp://' or 'amqps://'") +var errURIWhitespace = errors.New("URI must not contain whitespace") + +var schemePorts = map[string]int{ + "amqp": 5672, + "amqps": 5671, +} + +var defaultURI = URI{ + Scheme: "amqp", + Host: "localhost", + Port: 5672, + Username: "guest", + Password: "guest", + Vhost: "/", +} + +// URI represents a parsed AMQP URI string. +type URI struct { + Scheme string + Host string + Port int + Username string + Password string + Vhost string +} + +// ParseURI attempts to parse the given AMQP URI according to the spec. +// See http://www.rabbitmq.com/uri-spec.html. +// +// Default values for the fields are: +// +// Scheme: amqp +// Host: localhost +// Port: 5672 +// Username: guest +// Password: guest +// Vhost: / +// +func ParseURI(uri string) (URI, error) { + builder := defaultURI + + if strings.Contains(uri, " ") == true { + return builder, errURIWhitespace + } + + u, err := url.Parse(uri) + if err != nil { + return builder, err + } + + defaultPort, okScheme := schemePorts[u.Scheme] + + if okScheme { + builder.Scheme = u.Scheme + } else { + return builder, errURIScheme + } + + host := u.Hostname() + port := u.Port() + + if host != "" { + builder.Host = host + } + + if port != "" { + port32, err := strconv.ParseInt(port, 10, 32) + if err != nil { + return builder, err + } + builder.Port = int(port32) + } else { + builder.Port = defaultPort + } + + if u.User != nil { + builder.Username = u.User.Username() + if password, ok := u.User.Password(); ok { + builder.Password = password + } + } + + if u.Path != "" { + if strings.HasPrefix(u.Path, "/") { + if u.Host == "" && strings.HasPrefix(u.Path, "///") { + // net/url doesn't handle local context authorities and leaves that up + // to the scheme handler. In our case, we translate amqp:/// into the + // default host and whatever the vhost should be + if len(u.Path) > 3 { + builder.Vhost = u.Path[3:] + } + } else if len(u.Path) > 1 { + builder.Vhost = u.Path[1:] + } + } else { + builder.Vhost = u.Path + } + } + + return builder, nil +} + +// PlainAuth returns a PlainAuth structure based on the parsed URI's +// Username and Password fields. +func (uri URI) PlainAuth() *PlainAuth { + return &PlainAuth{ + Username: uri.Username, + Password: uri.Password, + } +} + +// AMQPlainAuth returns a PlainAuth structure based on the parsed URI's +// Username and Password fields. +func (uri URI) AMQPlainAuth() *AMQPlainAuth { + return &AMQPlainAuth{ + Username: uri.Username, + Password: uri.Password, + } +} + +func (uri URI) String() string { + authority, err := url.Parse("") + if err != nil { + return err.Error() + } + + authority.Scheme = uri.Scheme + + if uri.Username != defaultURI.Username || uri.Password != defaultURI.Password { + authority.User = url.User(uri.Username) + + if uri.Password != defaultURI.Password { + authority.User = url.UserPassword(uri.Username, uri.Password) + } + } + + authority.Host = net.JoinHostPort(uri.Host, strconv.Itoa(uri.Port)) + + if defaultPort, found := schemePorts[uri.Scheme]; !found || defaultPort != uri.Port { + authority.Host = net.JoinHostPort(uri.Host, strconv.Itoa(uri.Port)) + } else { + // JoinHostPort() automatically add brackets to the host if it's + // an IPv6 address. + // + // If not port is specified, JoinHostPort() return an IP address in the + // form of "[::1]:", so we use TrimSuffix() to remove the extra ":". + authority.Host = strings.TrimSuffix(net.JoinHostPort(uri.Host, ""), ":") + } + + if uri.Vhost != defaultURI.Vhost { + // Make sure net/url does not double escape, e.g. + // "%2F" does not become "%252F". + authority.Path = uri.Vhost + authority.RawPath = url.QueryEscape(uri.Vhost) + } else { + authority.Path = "/" + } + + return authority.String() +} diff --git a/vendor/github.com/streadway/amqp/uri_test.go b/vendor/github.com/streadway/amqp/uri_test.go new file mode 100644 index 0000000..fc41b5d --- /dev/null +++ b/vendor/github.com/streadway/amqp/uri_test.go @@ -0,0 +1,365 @@ +package amqp + +import ( + "testing" +) + +// Test matrix defined on http://www.rabbitmq.com/uri-spec.html +type testURI struct { + url string + username string + password string + host string + port int + vhost string + canon string +} + +var uriTests = []testURI{ + { + url: "amqp://user:pass@host:10000/vhost", + username: "user", + password: "pass", + host: "host", + port: 10000, + vhost: "vhost", + canon: "amqp://user:pass@host:10000/vhost", + }, + + { + url: "amqp://", + username: defaultURI.Username, + password: defaultURI.Password, + host: defaultURI.Host, + port: defaultURI.Port, + vhost: defaultURI.Vhost, + canon: "amqp://localhost/", + }, + + { + url: "amqp://:@/", + username: "", + password: "", + host: defaultURI.Host, + port: defaultURI.Port, + vhost: defaultURI.Vhost, + canon: "amqp://:@localhost/", + }, + + { + url: "amqp://user@", + username: "user", + password: defaultURI.Password, + host: defaultURI.Host, + port: defaultURI.Port, + vhost: defaultURI.Vhost, + canon: "amqp://user@localhost/", + }, + + { + url: "amqp://user:pass@", + username: "user", + password: "pass", + host: defaultURI.Host, + port: defaultURI.Port, + vhost: defaultURI.Vhost, + canon: "amqp://user:pass@localhost/", + }, + + { + url: "amqp://guest:pass@", + username: "guest", + password: "pass", + host: defaultURI.Host, + port: defaultURI.Port, + vhost: defaultURI.Vhost, + canon: "amqp://guest:pass@localhost/", + }, + + { + url: "amqp://host", + username: defaultURI.Username, + password: defaultURI.Password, + host: "host", + port: defaultURI.Port, + vhost: defaultURI.Vhost, + canon: "amqp://host/", + }, + + { + url: "amqp://:10000", + username: defaultURI.Username, + password: defaultURI.Password, + host: defaultURI.Host, + port: 10000, + vhost: defaultURI.Vhost, + canon: "amqp://localhost:10000/", + }, + + { + url: "amqp:///vhost", + username: defaultURI.Username, + password: defaultURI.Password, + host: defaultURI.Host, + port: defaultURI.Port, + vhost: "vhost", + canon: "amqp://localhost/vhost", + }, + + { + url: "amqp://host/", + username: defaultURI.Username, + password: defaultURI.Password, + host: "host", + port: defaultURI.Port, + vhost: defaultURI.Vhost, + canon: "amqp://host/", + }, + + { + url: "amqp://host/%2F", + username: defaultURI.Username, + password: defaultURI.Password, + host: "host", + port: defaultURI.Port, + vhost: "/", + canon: "amqp://host/", + }, + + { + url: "amqp://host/%2F%2F", + username: defaultURI.Username, + password: defaultURI.Password, + host: "host", + port: defaultURI.Port, + vhost: "//", + canon: "amqp://host/%2F%2F", + }, + + { + url: "amqp://host/%2Fslash%2F", + username: defaultURI.Username, + password: defaultURI.Password, + host: "host", + port: defaultURI.Port, + vhost: "/slash/", + canon: "amqp://host/%2Fslash%2F", + }, + + { + url: "amqp://192.168.1.1:1000/", + username: defaultURI.Username, + password: defaultURI.Password, + host: "192.168.1.1", + port: 1000, + vhost: defaultURI.Vhost, + canon: "amqp://192.168.1.1:1000/", + }, + + { + url: "amqp://[::1]", + username: defaultURI.Username, + password: defaultURI.Password, + host: "::1", + port: defaultURI.Port, + vhost: defaultURI.Vhost, + canon: "amqp://[::1]/", + }, + + { + url: "amqp://[::1]:1000", + username: defaultURI.Username, + password: defaultURI.Password, + host: "::1", + port: 1000, + vhost: defaultURI.Vhost, + canon: "amqp://[::1]:1000/", + }, + + { + url: "amqp://[fe80::1]", + username: defaultURI.Username, + password: defaultURI.Password, + host: "fe80::1", + port: defaultURI.Port, + vhost: defaultURI.Vhost, + canon: "amqp://[fe80::1]/", + }, + + { + url: "amqp://[fe80::1]", + username: defaultURI.Username, + password: defaultURI.Password, + host: "fe80::1", + port: defaultURI.Port, + vhost: defaultURI.Vhost, + canon: "amqp://[fe80::1]/", + }, + + { + url: "amqp://[fe80::1%25en0]", + username: defaultURI.Username, + password: defaultURI.Password, + host: "fe80::1%en0", + port: defaultURI.Port, + vhost: defaultURI.Vhost, + canon: "amqp://[fe80::1%25en0]/", + }, + + { + url: "amqp://[fe80::1]:5671", + username: defaultURI.Username, + password: defaultURI.Password, + host: "fe80::1", + port: 5671, + vhost: defaultURI.Vhost, + canon: "amqp://[fe80::1]:5671/", + }, + + { + url: "amqps:///", + username: defaultURI.Username, + password: defaultURI.Password, + host: defaultURI.Host, + port: schemePorts["amqps"], + vhost: defaultURI.Vhost, + canon: "amqps://localhost/", + }, + + { + url: "amqps://host:1000/", + username: defaultURI.Username, + password: defaultURI.Password, + host: "host", + port: 1000, + vhost: defaultURI.Vhost, + canon: "amqps://host:1000/", + }, +} + +func TestURISpec(t *testing.T) { + for _, test := range uriTests { + u, err := ParseURI(test.url) + if err != nil { + t.Fatal("Could not parse spec URI: ", test.url, " err: ", err) + } + + if test.username != u.Username { + t.Error("For: ", test.url, " usernames do not match. want: ", test.username, " got: ", u.Username) + } + + if test.password != u.Password { + t.Error("For: ", test.url, " passwords do not match. want: ", test.password, " got: ", u.Password) + } + + if test.host != u.Host { + t.Error("For: ", test.url, " hosts do not match. want: ", test.host, " got: ", u.Host) + } + + if test.port != u.Port { + t.Error("For: ", test.url, " ports do not match. want: ", test.port, " got: ", u.Port) + } + + if test.vhost != u.Vhost { + t.Error("For: ", test.url, " vhosts do not match. want: ", test.vhost, " got: ", u.Vhost) + } + + if test.canon != u.String() { + t.Error("For: ", test.url, " canonical string does not match. want: ", test.canon, " got: ", u.String()) + } + } +} + +func TestURIUnknownScheme(t *testing.T) { + if _, err := ParseURI("http://example.com/"); err == nil { + t.Fatal("Expected error when parsing non-amqp scheme") + } +} + +func TestURIScheme(t *testing.T) { + if _, err := ParseURI("amqp://example.com/"); err != nil { + t.Fatalf("Expected to parse amqp scheme, got %v", err) + } + + if _, err := ParseURI("amqps://example.com/"); err != nil { + t.Fatalf("Expected to parse amqps scheme, got %v", err) + } +} + +func TestURIWhitespace(t *testing.T) { + if _, err := ParseURI("amqp://admin:PASSWORD@rabbitmq-service/ -http_port=8080"); err == nil { + t.Fatal("Expected to fail if URI contains whitespace") + } +} + +func TestURIDefaults(t *testing.T) { + url := "amqp://" + uri, err := ParseURI(url) + if err != nil { + t.Fatal("Could not parse") + } + + if uri.String() != "amqp://localhost/" { + t.Fatal("Defaults not encoded properly got:", uri.String()) + } +} + +func TestURIComplete(t *testing.T) { + url := "amqp://bob:dobbs@foo.bar:5678/private" + uri, err := ParseURI(url) + if err != nil { + t.Fatal("Could not parse") + } + + if uri.String() != url { + t.Fatal("Defaults not encoded properly want:", url, " got:", uri.String()) + } +} + +func TestURIDefaultPortAmqpNotIncluded(t *testing.T) { + url := "amqp://foo.bar:5672/" + uri, err := ParseURI(url) + if err != nil { + t.Fatal("Could not parse") + } + + if uri.String() != "amqp://foo.bar/" { + t.Fatal("Defaults not encoded properly got:", uri.String()) + } +} + +func TestURIDefaultPortAmqp(t *testing.T) { + url := "amqp://foo.bar/" + uri, err := ParseURI(url) + if err != nil { + t.Fatal("Could not parse") + } + + if uri.Port != 5672 { + t.Fatal("Default port not correct for amqp, got:", uri.Port) + } +} + +func TestURIDefaultPortAmqpsNotIncludedInString(t *testing.T) { + url := "amqps://foo.bar:5671/" + uri, err := ParseURI(url) + if err != nil { + t.Fatal("Could not parse") + } + + if uri.String() != "amqps://foo.bar/" { + t.Fatal("Defaults not encoded properly got:", uri.String()) + } +} + +func TestURIDefaultPortAmqps(t *testing.T) { + url := "amqps://foo.bar/" + uri, err := ParseURI(url) + if err != nil { + t.Fatal("Could not parse") + } + + if uri.Port != 5671 { + t.Fatal("Default port not correct for amqps, got:", uri.Port) + } +} diff --git a/vendor/github.com/streadway/amqp/write.go b/vendor/github.com/streadway/amqp/write.go new file mode 100644 index 0000000..94a46d1 --- /dev/null +++ b/vendor/github.com/streadway/amqp/write.go @@ -0,0 +1,416 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +package amqp + +import ( + "bufio" + "bytes" + "encoding/binary" + "errors" + "io" + "math" + "time" +) + +func (w *writer) WriteFrame(frame frame) (err error) { + if err = frame.write(w.w); err != nil { + return + } + + if buf, ok := w.w.(*bufio.Writer); ok { + err = buf.Flush() + } + + return +} + +func (f *methodFrame) write(w io.Writer) (err error) { + var payload bytes.Buffer + + if f.Method == nil { + return errors.New("malformed frame: missing method") + } + + class, method := f.Method.id() + + if err = binary.Write(&payload, binary.BigEndian, class); err != nil { + return + } + + if err = binary.Write(&payload, binary.BigEndian, method); err != nil { + return + } + + if err = f.Method.write(&payload); err != nil { + return + } + + return writeFrame(w, frameMethod, f.ChannelId, payload.Bytes()) +} + +// Heartbeat +// +// Payload is empty +func (f *heartbeatFrame) write(w io.Writer) (err error) { + return writeFrame(w, frameHeartbeat, f.ChannelId, []byte{}) +} + +// CONTENT HEADER +// 0 2 4 12 14 +// +----------+--------+-----------+----------------+------------- - - +// | class-id | weight | body size | property flags | property list... +// +----------+--------+-----------+----------------+------------- - - +// short short long long short remainder... +// +func (f *headerFrame) write(w io.Writer) (err error) { + var payload bytes.Buffer + var zeroTime time.Time + + if err = binary.Write(&payload, binary.BigEndian, f.ClassId); err != nil { + return + } + + if err = binary.Write(&payload, binary.BigEndian, f.weight); err != nil { + return + } + + if err = binary.Write(&payload, binary.BigEndian, f.Size); err != nil { + return + } + + // First pass will build the mask to be serialized, second pass will serialize + // each of the fields that appear in the mask. + + var mask uint16 + + if len(f.Properties.ContentType) > 0 { + mask = mask | flagContentType + } + if len(f.Properties.ContentEncoding) > 0 { + mask = mask | flagContentEncoding + } + if f.Properties.Headers != nil && len(f.Properties.Headers) > 0 { + mask = mask | flagHeaders + } + if f.Properties.DeliveryMode > 0 { + mask = mask | flagDeliveryMode + } + if f.Properties.Priority > 0 { + mask = mask | flagPriority + } + if len(f.Properties.CorrelationId) > 0 { + mask = mask | flagCorrelationId + } + if len(f.Properties.ReplyTo) > 0 { + mask = mask | flagReplyTo + } + if len(f.Properties.Expiration) > 0 { + mask = mask | flagExpiration + } + if len(f.Properties.MessageId) > 0 { + mask = mask | flagMessageId + } + if f.Properties.Timestamp != zeroTime { + mask = mask | flagTimestamp + } + if len(f.Properties.Type) > 0 { + mask = mask | flagType + } + if len(f.Properties.UserId) > 0 { + mask = mask | flagUserId + } + if len(f.Properties.AppId) > 0 { + mask = mask | flagAppId + } + + if err = binary.Write(&payload, binary.BigEndian, mask); err != nil { + return + } + + if hasProperty(mask, flagContentType) { + if err = writeShortstr(&payload, f.Properties.ContentType); err != nil { + return + } + } + if hasProperty(mask, flagContentEncoding) { + if err = writeShortstr(&payload, f.Properties.ContentEncoding); err != nil { + return + } + } + if hasProperty(mask, flagHeaders) { + if err = writeTable(&payload, f.Properties.Headers); err != nil { + return + } + } + if hasProperty(mask, flagDeliveryMode) { + if err = binary.Write(&payload, binary.BigEndian, f.Properties.DeliveryMode); err != nil { + return + } + } + if hasProperty(mask, flagPriority) { + if err = binary.Write(&payload, binary.BigEndian, f.Properties.Priority); err != nil { + return + } + } + if hasProperty(mask, flagCorrelationId) { + if err = writeShortstr(&payload, f.Properties.CorrelationId); err != nil { + return + } + } + if hasProperty(mask, flagReplyTo) { + if err = writeShortstr(&payload, f.Properties.ReplyTo); err != nil { + return + } + } + if hasProperty(mask, flagExpiration) { + if err = writeShortstr(&payload, f.Properties.Expiration); err != nil { + return + } + } + if hasProperty(mask, flagMessageId) { + if err = writeShortstr(&payload, f.Properties.MessageId); err != nil { + return + } + } + if hasProperty(mask, flagTimestamp) { + if err = binary.Write(&payload, binary.BigEndian, uint64(f.Properties.Timestamp.Unix())); err != nil { + return + } + } + if hasProperty(mask, flagType) { + if err = writeShortstr(&payload, f.Properties.Type); err != nil { + return + } + } + if hasProperty(mask, flagUserId) { + if err = writeShortstr(&payload, f.Properties.UserId); err != nil { + return + } + } + if hasProperty(mask, flagAppId) { + if err = writeShortstr(&payload, f.Properties.AppId); err != nil { + return + } + } + + return writeFrame(w, frameHeader, f.ChannelId, payload.Bytes()) +} + +// Body +// +// Payload is one byterange from the full body who's size is declared in the +// Header frame +func (f *bodyFrame) write(w io.Writer) (err error) { + return writeFrame(w, frameBody, f.ChannelId, f.Body) +} + +func writeFrame(w io.Writer, typ uint8, channel uint16, payload []byte) (err error) { + end := []byte{frameEnd} + size := uint(len(payload)) + + _, err = w.Write([]byte{ + byte(typ), + byte((channel & 0xff00) >> 8), + byte((channel & 0x00ff) >> 0), + byte((size & 0xff000000) >> 24), + byte((size & 0x00ff0000) >> 16), + byte((size & 0x0000ff00) >> 8), + byte((size & 0x000000ff) >> 0), + }) + + if err != nil { + return + } + + if _, err = w.Write(payload); err != nil { + return + } + + if _, err = w.Write(end); err != nil { + return + } + + return +} + +func writeShortstr(w io.Writer, s string) (err error) { + b := []byte(s) + + var length = uint8(len(b)) + + if err = binary.Write(w, binary.BigEndian, length); err != nil { + return + } + + if _, err = w.Write(b[:length]); err != nil { + return + } + + return +} + +func writeLongstr(w io.Writer, s string) (err error) { + b := []byte(s) + + var length = uint32(len(b)) + + if err = binary.Write(w, binary.BigEndian, length); err != nil { + return + } + + if _, err = w.Write(b[:length]); err != nil { + return + } + + return +} + +/* +'A': []interface{} +'D': Decimal +'F': Table +'I': int32 +'S': string +'T': time.Time +'V': nil +'b': byte +'d': float64 +'f': float32 +'l': int64 +'s': int16 +'t': bool +'x': []byte +*/ +func writeField(w io.Writer, value interface{}) (err error) { + var buf [9]byte + var enc []byte + + switch v := value.(type) { + case bool: + buf[0] = 't' + if v { + buf[1] = byte(1) + } else { + buf[1] = byte(0) + } + enc = buf[:2] + + case byte: + buf[0] = 'b' + buf[1] = byte(v) + enc = buf[:2] + + case int16: + buf[0] = 's' + binary.BigEndian.PutUint16(buf[1:3], uint16(v)) + enc = buf[:3] + + case int: + buf[0] = 'I' + binary.BigEndian.PutUint32(buf[1:5], uint32(v)) + enc = buf[:5] + + case int32: + buf[0] = 'I' + binary.BigEndian.PutUint32(buf[1:5], uint32(v)) + enc = buf[:5] + + case int64: + buf[0] = 'l' + binary.BigEndian.PutUint64(buf[1:9], uint64(v)) + enc = buf[:9] + + case float32: + buf[0] = 'f' + binary.BigEndian.PutUint32(buf[1:5], math.Float32bits(v)) + enc = buf[:5] + + case float64: + buf[0] = 'd' + binary.BigEndian.PutUint64(buf[1:9], math.Float64bits(v)) + enc = buf[:9] + + case Decimal: + buf[0] = 'D' + buf[1] = byte(v.Scale) + binary.BigEndian.PutUint32(buf[2:6], uint32(v.Value)) + enc = buf[:6] + + case string: + buf[0] = 'S' + binary.BigEndian.PutUint32(buf[1:5], uint32(len(v))) + enc = append(buf[:5], []byte(v)...) + + case []interface{}: // field-array + buf[0] = 'A' + + sec := new(bytes.Buffer) + for _, val := range v { + if err = writeField(sec, val); err != nil { + return + } + } + + binary.BigEndian.PutUint32(buf[1:5], uint32(sec.Len())) + if _, err = w.Write(buf[:5]); err != nil { + return + } + + if _, err = w.Write(sec.Bytes()); err != nil { + return + } + + return + + case time.Time: + buf[0] = 'T' + binary.BigEndian.PutUint64(buf[1:9], uint64(v.Unix())) + enc = buf[:9] + + case Table: + if _, err = w.Write([]byte{'F'}); err != nil { + return + } + return writeTable(w, v) + + case []byte: + buf[0] = 'x' + binary.BigEndian.PutUint32(buf[1:5], uint32(len(v))) + if _, err = w.Write(buf[0:5]); err != nil { + return + } + if _, err = w.Write(v); err != nil { + return + } + return + + case nil: + buf[0] = 'V' + enc = buf[:1] + + default: + return ErrFieldType + } + + _, err = w.Write(enc) + + return +} + +func writeTable(w io.Writer, table Table) (err error) { + var buf bytes.Buffer + + for key, val := range table { + if err = writeShortstr(&buf, key); err != nil { + return + } + if err = writeField(&buf, val); err != nil { + return + } + } + + return writeLongstr(w, string(buf.Bytes())) +} diff --git a/vendor/github.com/xdg/scram/.gitignore b/vendor/github.com/xdg/scram/.gitignore new file mode 100644 index 0000000..e69de29 diff --git a/vendor/github.com/xdg/scram/.travis.yml b/vendor/github.com/xdg/scram/.travis.yml new file mode 100644 index 0000000..f391327 --- /dev/null +++ b/vendor/github.com/xdg/scram/.travis.yml @@ -0,0 +1,11 @@ +language: go +sudo: false +go: + - "1.7" + - "1.8" + - "1.9" + - "1.10" + - master +matrix: + allow_failures: + - go: master diff --git a/vendor/github.com/xdg/scram/LICENSE b/vendor/github.com/xdg/scram/LICENSE new file mode 100644 index 0000000..67db858 --- /dev/null +++ b/vendor/github.com/xdg/scram/LICENSE @@ -0,0 +1,175 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. diff --git a/vendor/github.com/xdg/scram/README.md b/vendor/github.com/xdg/scram/README.md new file mode 100644 index 0000000..6782d94 --- /dev/null +++ b/vendor/github.com/xdg/scram/README.md @@ -0,0 +1,71 @@ +[![GoDoc](https://godoc.org/github.com/xdg/scram?status.svg)](https://godoc.org/github.com/xdg/scram) +[![Build Status](https://travis-ci.org/xdg/scram.svg?branch=master)](https://travis-ci.org/xdg/scram) + +# scram – Go implementation of RFC-5802 + +## Description + +Package scram provides client and server implementations of the Salted +Challenge Response Authentication Mechanism (SCRAM) described in +[RFC-5802](https://tools.ietf.org/html/rfc5802) and +[RFC-7677](https://tools.ietf.org/html/rfc7677). + +It includes both client and server side support. + +Channel binding and extensions are not (yet) supported. + +## Examples + +### Client side + + package main + + import "github.com/xdg/scram" + + func main() { + // Get Client with username, password and (optional) authorization ID. + clientSHA1, err := scram.SHA1.NewClient("mulder", "trustno1", "") + if err != nil { + panic(err) + } + + // Prepare the authentication conversation. Use the empty string as the + // initial server message argument to start the conversation. + conv := clientSHA1.NewConversation() + var serverMsg string + + // Get the first message, send it and read the response. + firstMsg, err := conv.Step(serverMsg) + if err != nil { + panic(err) + } + serverMsg = sendClientMsg(firstMsg) + + // Get the second message, send it, and read the response. + secondMsg, err := conv.Step(serverMsg) + if err != nil { + panic(err) + } + serverMsg = sendClientMsg(secondMsg) + + // Validate the server's final message. We have no further message to + // send so ignore that return value. + _, err = conv.Step(serverMsg) + if err != nil { + panic(err) + } + + return + } + + func sendClientMsg(s string) string { + // A real implementation would send this to a server and read a reply. + return "" + } + +## Copyright and License + +Copyright 2018 by David A. Golden. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"). You may +obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 diff --git a/vendor/github.com/xdg/scram/client.go b/vendor/github.com/xdg/scram/client.go new file mode 100644 index 0000000..ca0c4c7 --- /dev/null +++ b/vendor/github.com/xdg/scram/client.go @@ -0,0 +1,130 @@ +// Copyright 2018 by David A. Golden. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package scram + +import ( + "sync" + + "golang.org/x/crypto/pbkdf2" +) + +// Client implements the client side of SCRAM authentication. It holds +// configuration values needed to initialize new client-side conversations for +// a specific username, password and authorization ID tuple. Client caches +// the computationally-expensive parts of a SCRAM conversation as described in +// RFC-5802. If repeated authentication conversations may be required for a +// user (e.g. disconnect/reconnect), the user's Client should be preserved. +// +// For security reasons, Clients have a default minimum PBKDF2 iteration count +// of 4096. If a server requests a smaller iteration count, an authentication +// conversation will error. +// +// A Client can also be used by a server application to construct the hashed +// authentication values to be stored for a new user. See StoredCredentials() +// for more. +type Client struct { + sync.RWMutex + username string + password string + authzID string + minIters int + nonceGen NonceGeneratorFcn + hashGen HashGeneratorFcn + cache map[KeyFactors]derivedKeys +} + +func newClient(username, password, authzID string, fcn HashGeneratorFcn) *Client { + return &Client{ + username: username, + password: password, + authzID: authzID, + minIters: 4096, + nonceGen: defaultNonceGenerator, + hashGen: fcn, + cache: make(map[KeyFactors]derivedKeys), + } +} + +// WithMinIterations changes minimum required PBKDF2 iteration count. +func (c *Client) WithMinIterations(n int) *Client { + c.Lock() + defer c.Unlock() + c.minIters = n + return c +} + +// WithNonceGenerator replaces the default nonce generator (base64 encoding of +// 24 bytes from crypto/rand) with a custom generator. This is provided for +// testing or for users with custom nonce requirements. +func (c *Client) WithNonceGenerator(ng NonceGeneratorFcn) *Client { + c.Lock() + defer c.Unlock() + c.nonceGen = ng + return c +} + +// NewConversation constructs a client-side authentication conversation. +// Conversations cannot be reused, so this must be called for each new +// authentication attempt. +func (c *Client) NewConversation() *ClientConversation { + c.RLock() + defer c.RUnlock() + return &ClientConversation{ + client: c, + nonceGen: c.nonceGen, + hashGen: c.hashGen, + minIters: c.minIters, + } +} + +func (c *Client) getDerivedKeys(kf KeyFactors) derivedKeys { + dk, ok := c.getCache(kf) + if !ok { + dk = c.computeKeys(kf) + c.setCache(kf, dk) + } + return dk +} + +// GetStoredCredentials takes a salt and iteration count structure and +// provides the values that must be stored by a server to authentication a +// user. These values are what the Server credential lookup function must +// return for a given username. +func (c *Client) GetStoredCredentials(kf KeyFactors) StoredCredentials { + dk := c.getDerivedKeys(kf) + return StoredCredentials{ + KeyFactors: kf, + StoredKey: dk.StoredKey, + ServerKey: dk.ServerKey, + } +} + +func (c *Client) computeKeys(kf KeyFactors) derivedKeys { + h := c.hashGen() + saltedPassword := pbkdf2.Key([]byte(c.password), []byte(kf.Salt), kf.Iters, h.Size(), c.hashGen) + clientKey := computeHMAC(c.hashGen, saltedPassword, []byte("Client Key")) + + return derivedKeys{ + ClientKey: clientKey, + StoredKey: computeHash(c.hashGen, clientKey), + ServerKey: computeHMAC(c.hashGen, saltedPassword, []byte("Server Key")), + } +} + +func (c *Client) getCache(kf KeyFactors) (derivedKeys, bool) { + c.RLock() + defer c.RUnlock() + dk, ok := c.cache[kf] + return dk, ok +} + +func (c *Client) setCache(kf KeyFactors, dk derivedKeys) { + c.Lock() + defer c.Unlock() + c.cache[kf] = dk + return +} diff --git a/vendor/github.com/xdg/scram/client_conv.go b/vendor/github.com/xdg/scram/client_conv.go new file mode 100644 index 0000000..8340568 --- /dev/null +++ b/vendor/github.com/xdg/scram/client_conv.go @@ -0,0 +1,149 @@ +// Copyright 2018 by David A. Golden. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package scram + +import ( + "crypto/hmac" + "encoding/base64" + "errors" + "fmt" + "strings" +) + +type clientState int + +const ( + clientStarting clientState = iota + clientFirst + clientFinal + clientDone +) + +// ClientConversation implements the client-side of an authentication +// conversation with a server. A new conversation must be created for +// each authentication attempt. +type ClientConversation struct { + client *Client + nonceGen NonceGeneratorFcn + hashGen HashGeneratorFcn + minIters int + state clientState + valid bool + gs2 string + nonce string + c1b string + serveSig []byte +} + +// Step takes a string provided from a server (or just an empty string for the +// very first conversation step) and attempts to move the authentication +// conversation forward. It returns a string to be sent to the server or an +// error if the server message is invalid. Calling Step after a conversation +// completes is also an error. +func (cc *ClientConversation) Step(challenge string) (response string, err error) { + switch cc.state { + case clientStarting: + cc.state = clientFirst + response, err = cc.firstMsg() + case clientFirst: + cc.state = clientFinal + response, err = cc.finalMsg(challenge) + case clientFinal: + cc.state = clientDone + response, err = cc.validateServer(challenge) + default: + response, err = "", errors.New("Conversation already completed") + } + return +} + +// Done returns true if the conversation is completed or has errored. +func (cc *ClientConversation) Done() bool { + return cc.state == clientDone +} + +// Valid returns true if the conversation successfully authenticated with the +// server, including counter-validation that the server actually has the +// user's stored credentials. +func (cc *ClientConversation) Valid() bool { + return cc.valid +} + +func (cc *ClientConversation) firstMsg() (string, error) { + // Values are cached for use in final message parameters + cc.gs2 = cc.gs2Header() + cc.nonce = cc.client.nonceGen() + cc.c1b = fmt.Sprintf("n=%s,r=%s", encodeName(cc.client.username), cc.nonce) + + return cc.gs2 + cc.c1b, nil +} + +func (cc *ClientConversation) finalMsg(s1 string) (string, error) { + msg, err := parseServerFirst(s1) + if err != nil { + return "", err + } + + // Check nonce prefix and update + if !strings.HasPrefix(msg.nonce, cc.nonce) { + return "", errors.New("server nonce did not extend client nonce") + } + cc.nonce = msg.nonce + + // Check iteration count vs minimum + if msg.iters < cc.minIters { + return "", fmt.Errorf("server requested too few iterations (%d)", msg.iters) + } + + // Create client-final-message-without-proof + c2wop := fmt.Sprintf( + "c=%s,r=%s", + base64.StdEncoding.EncodeToString([]byte(cc.gs2)), + cc.nonce, + ) + + // Create auth message + authMsg := cc.c1b + "," + s1 + "," + c2wop + + // Get derived keys from client cache + dk := cc.client.getDerivedKeys(KeyFactors{Salt: string(msg.salt), Iters: msg.iters}) + + // Create proof as clientkey XOR clientsignature + clientSignature := computeHMAC(cc.hashGen, dk.StoredKey, []byte(authMsg)) + clientProof := xorBytes(dk.ClientKey, clientSignature) + proof := base64.StdEncoding.EncodeToString(clientProof) + + // Cache ServerSignature for later validation + cc.serveSig = computeHMAC(cc.hashGen, dk.ServerKey, []byte(authMsg)) + + return fmt.Sprintf("%s,p=%s", c2wop, proof), nil +} + +func (cc *ClientConversation) validateServer(s2 string) (string, error) { + msg, err := parseServerFinal(s2) + if err != nil { + return "", err + } + + if len(msg.err) > 0 { + return "", fmt.Errorf("server error: %s", msg.err) + } + + if !hmac.Equal(msg.verifier, cc.serveSig) { + return "", errors.New("server validation failed") + } + + cc.valid = true + return "", nil +} + +func (cc *ClientConversation) gs2Header() string { + if cc.client.authzID == "" { + return "n,," + } + return fmt.Sprintf("n,%s,", encodeName(cc.client.authzID)) +} diff --git a/vendor/github.com/xdg/scram/client_conv_test.go b/vendor/github.com/xdg/scram/client_conv_test.go new file mode 100644 index 0000000..67b7e2f --- /dev/null +++ b/vendor/github.com/xdg/scram/client_conv_test.go @@ -0,0 +1,79 @@ +// Copyright 2018 by David A. Golden. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package scram + +import ( + "strconv" + "testing" +) + +func TestClientConv(t *testing.T) { + cases, err := getTestData("good", "bad-server") + if err != nil { + t.Fatal(err) + } + + for _, v := range cases { + t.Run(v.Label, genClientSubTest(v)) + } +} + +func genClientSubTest(c TestCase) func(t *testing.T) { + return func(t *testing.T) { + hgf, err := getHGF(c.Digest) + if err != nil { + t.Fatal(err) + } + + var client *Client + if c.SkipSASLprep { + client, err = hgf.NewClientUnprepped(c.User, c.Pass, c.AuthzID) + } else { + client, err = hgf.NewClient(c.User, c.Pass, c.AuthzID) + } + if err != nil { + t.Errorf("%s: expected no error from NewClient, but got '%v'", c.Label, err) + } + if c.ClientNonce != "" { + client = client.WithNonceGenerator(func() string { return c.ClientNonce }) + } + conv := client.NewConversation() + + for i, s := range clientSteps(c) { + if conv.Done() { + t.Errorf("%s: Premature end of conversation before step %d", c.Label, i+1) + return + } + got, err := conv.Step(s.Input) + if s.IsError && err == nil { + t.Errorf("%s: step %d: expected error but didn't get one", c.Label, i+1) + return + } else if !s.IsError && err != nil { + t.Errorf("%s: step %d: expected no error but got '%v'", c.Label, i+1, err) + return + } + if got != s.Expect { + t.Errorf("%s: step %d: incorrect step message; got %s, expected %s", + c.Label, + i+1, + strconv.QuoteToASCII(got), + strconv.QuoteToASCII(s.Expect), + ) + return + } + } + + if c.Valid != conv.Valid() { + t.Errorf("%s: Conversation Valid() incorrect: got '%v', expected '%v'", c.Label, conv.Valid(), c.Valid) + return + } + + if !conv.Done() { + t.Errorf("%s: Conversation not marked done after last step", c.Label) + } + } +} diff --git a/vendor/github.com/xdg/scram/common.go b/vendor/github.com/xdg/scram/common.go new file mode 100644 index 0000000..cb705cb --- /dev/null +++ b/vendor/github.com/xdg/scram/common.go @@ -0,0 +1,97 @@ +// Copyright 2018 by David A. Golden. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package scram + +import ( + "crypto/hmac" + "crypto/rand" + "encoding/base64" + "strings" +) + +// NonceGeneratorFcn defines a function that returns a string of high-quality +// random printable ASCII characters EXCLUDING the comma (',') character. The +// default nonce generator provides Base64 encoding of 24 bytes from +// crypto/rand. +type NonceGeneratorFcn func() string + +// derivedKeys collects the three cryptographically derived values +// into one struct for caching. +type derivedKeys struct { + ClientKey []byte + StoredKey []byte + ServerKey []byte +} + +// KeyFactors represent the two server-provided factors needed to compute +// client credentials for authentication. Salt is decoded bytes (i.e. not +// base64), but in string form so that KeyFactors can be used as a map key for +// cached credentials. +type KeyFactors struct { + Salt string + Iters int +} + +// StoredCredentials are the values that a server must store for a given +// username to allow authentication. They include the salt and iteration +// count, plus the derived values to authenticate a client and for the server +// to authenticate itself back to the client. +// +// NOTE: these are specific to a given hash function. To allow a user to +// authenticate with either SCRAM-SHA-1 or SCRAM-SHA-256, two sets of +// StoredCredentials must be created and stored, one for each hash function. +type StoredCredentials struct { + KeyFactors + StoredKey []byte + ServerKey []byte +} + +// CredentialLookup is a callback to provide StoredCredentials for a given +// username. This is used to configure Server objects. +// +// NOTE: these are specific to a given hash function. The callback provided +// to a Server with a given hash function must provide the corresponding +// StoredCredentials. +type CredentialLookup func(string) (StoredCredentials, error) + +func defaultNonceGenerator() string { + raw := make([]byte, 24) + nonce := make([]byte, base64.StdEncoding.EncodedLen(len(raw))) + rand.Read(raw) + base64.StdEncoding.Encode(nonce, raw) + return string(nonce) +} + +func encodeName(s string) string { + return strings.Replace(strings.Replace(s, "=", "=3D", -1), ",", "=2C", -1) +} + +func decodeName(s string) (string, error) { + // TODO Check for = not followed by 2C or 3D + return strings.Replace(strings.Replace(s, "=2C", ",", -1), "=3D", "=", -1), nil +} + +func computeHash(hg HashGeneratorFcn, b []byte) []byte { + h := hg() + h.Write(b) + return h.Sum(nil) +} + +func computeHMAC(hg HashGeneratorFcn, key, data []byte) []byte { + mac := hmac.New(hg, key) + mac.Write(data) + return mac.Sum(nil) +} + +func xorBytes(a, b []byte) []byte { + // TODO check a & b are same length, or just xor to smallest + xor := make([]byte, len(a)) + for i := range a { + xor[i] = a[i] ^ b[i] + } + return xor +} diff --git a/vendor/github.com/xdg/scram/common_test.go b/vendor/github.com/xdg/scram/common_test.go new file mode 100644 index 0000000..655ecbd --- /dev/null +++ b/vendor/github.com/xdg/scram/common_test.go @@ -0,0 +1,27 @@ +// Copyright 2018 by David A. Golden. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package scram + +import "testing" + +func TestEncodeName(t *testing.T) { + cases := []struct { + input string + expect string + }{ + {input: "arthur", expect: "arthur"}, + {input: "doe,jane", expect: "doe=2Cjane"}, + {input: "a,b,c,d", expect: "a=2Cb=2Cc=2Cd"}, + {input: "a,b=c,d=", expect: "a=2Cb=3Dc=2Cd=3D"}, + } + + for _, c := range cases { + if got := encodeName(c.input); got != c.expect { + t.Errorf("Failed encoding '%s', got '%s', expected '%s'", c.input, got, c.expect) + } + } +} diff --git a/vendor/github.com/xdg/scram/doc.go b/vendor/github.com/xdg/scram/doc.go new file mode 100644 index 0000000..d43bee6 --- /dev/null +++ b/vendor/github.com/xdg/scram/doc.go @@ -0,0 +1,24 @@ +// Copyright 2018 by David A. Golden. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +// Package scram provides client and server implementations of the Salted +// Challenge Response Authentication Mechanism (SCRAM) described in RFC-5802 +// and RFC-7677. +// +// Usage +// +// The scram package provides two variables, `SHA1` and `SHA256`, that are +// used to construct Client or Server objects. +// +// clientSHA1, err := scram.SHA1.NewClient(username, password, authID) +// clientSHA256, err := scram.SHA256.NewClient(username, password, authID) +// +// serverSHA1, err := scram.SHA1.NewServer(credentialLookupFcn) +// serverSHA256, err := scram.SHA256.NewServer(credentialLookupFcn) +// +// These objects are used to construct ClientConversation or +// ServerConversation objects that are used to carry out authentication. +package scram diff --git a/vendor/github.com/xdg/scram/doc_test.go b/vendor/github.com/xdg/scram/doc_test.go new file mode 100644 index 0000000..e9f6369 --- /dev/null +++ b/vendor/github.com/xdg/scram/doc_test.go @@ -0,0 +1,44 @@ +package scram_test + +import "github.com/xdg/scram" + +func Example() { + // Get Client with username, password and (optional) authorization ID. + clientSHA1, err := scram.SHA1.NewClient("mulder", "trustno1", "") + if err != nil { + panic(err) + } + + // Prepare the authentication conversation. Use the empty string as the + // initial server message argument to start the conversation. + conv := clientSHA1.NewConversation() + var serverMsg string + + // Get the first message, send it and read the response. + firstMsg, err := conv.Step(serverMsg) + if err != nil { + panic(err) + } + serverMsg = sendClientMsg(firstMsg) + + // Get the second message, send it, and read the response. + secondMsg, err := conv.Step(serverMsg) + if err != nil { + panic(err) + } + serverMsg = sendClientMsg(secondMsg) + + // Validate the server's final message. We have no further message to + // send so ignore that return value. + _, err = conv.Step(serverMsg) + if err != nil { + panic(err) + } + + return +} + +func sendClientMsg(s string) string { + // A real implementation would send this to a server and read a reply. + return "" +} diff --git a/vendor/github.com/xdg/scram/parse.go b/vendor/github.com/xdg/scram/parse.go new file mode 100644 index 0000000..722f604 --- /dev/null +++ b/vendor/github.com/xdg/scram/parse.go @@ -0,0 +1,205 @@ +// Copyright 2018 by David A. Golden. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package scram + +import ( + "encoding/base64" + "errors" + "fmt" + "strconv" + "strings" +) + +type c1Msg struct { + gs2Header string + authzID string + username string + nonce string + c1b string +} + +type c2Msg struct { + cbind []byte + nonce string + proof []byte + c2wop string +} + +type s1Msg struct { + nonce string + salt []byte + iters int +} + +type s2Msg struct { + verifier []byte + err string +} + +func parseField(s, k string) (string, error) { + t := strings.TrimPrefix(s, k+"=") + if t == s { + return "", fmt.Errorf("error parsing '%s' for field '%s'", s, k) + } + return t, nil +} + +func parseGS2Flag(s string) (string, error) { + if s[0] == 'p' { + return "", fmt.Errorf("channel binding requested but not supported") + } + + if s == "n" || s == "y" { + return s, nil + } + + return "", fmt.Errorf("error parsing '%s' for gs2 flag", s) +} + +func parseFieldBase64(s, k string) ([]byte, error) { + raw, err := parseField(s, k) + if err != nil { + return nil, err + } + + dec, err := base64.StdEncoding.DecodeString(raw) + if err != nil { + return nil, err + } + + return dec, nil +} + +func parseFieldInt(s, k string) (int, error) { + raw, err := parseField(s, k) + if err != nil { + return 0, err + } + + num, err := strconv.Atoi(raw) + if err != nil { + return 0, fmt.Errorf("error parsing field '%s': %v", k, err) + } + + return num, nil +} + +func parseClientFirst(c1 string) (msg c1Msg, err error) { + + fields := strings.Split(c1, ",") + if len(fields) < 4 { + err = errors.New("not enough fields in first server message") + return + } + + gs2flag, err := parseGS2Flag(fields[0]) + if err != nil { + return + } + + // 'a' field is optional + if len(fields[1]) > 0 { + msg.authzID, err = parseField(fields[1], "a") + if err != nil { + return + } + } + + // Recombine and save the gs2 header + msg.gs2Header = gs2flag + "," + msg.authzID + "," + + // Check for unsupported extensions field "m". + if strings.HasPrefix(fields[2], "m=") { + err = errors.New("SCRAM message extensions are not supported") + return + } + + msg.username, err = parseField(fields[2], "n") + if err != nil { + return + } + + msg.nonce, err = parseField(fields[3], "r") + if err != nil { + return + } + + msg.c1b = strings.Join(fields[2:], ",") + + return +} + +func parseClientFinal(c2 string) (msg c2Msg, err error) { + fields := strings.Split(c2, ",") + if len(fields) < 3 { + err = errors.New("not enough fields in first server message") + return + } + + msg.cbind, err = parseFieldBase64(fields[0], "c") + if err != nil { + return + } + + msg.nonce, err = parseField(fields[1], "r") + if err != nil { + return + } + + // Extension fields may come between nonce and proof, so we + // grab the *last* fields as proof. + msg.proof, err = parseFieldBase64(fields[len(fields)-1], "p") + if err != nil { + return + } + + msg.c2wop = c2[:strings.LastIndex(c2, ",")] + + return +} + +func parseServerFirst(s1 string) (msg s1Msg, err error) { + + // Check for unsupported extensions field "m". + if strings.HasPrefix(s1, "m=") { + err = errors.New("SCRAM message extensions are not supported") + return + } + + fields := strings.Split(s1, ",") + if len(fields) < 3 { + err = errors.New("not enough fields in first server message") + return + } + + msg.nonce, err = parseField(fields[0], "r") + if err != nil { + return + } + + msg.salt, err = parseFieldBase64(fields[1], "s") + if err != nil { + return + } + + msg.iters, err = parseFieldInt(fields[2], "i") + + return +} + +func parseServerFinal(s2 string) (msg s2Msg, err error) { + fields := strings.Split(s2, ",") + + msg.verifier, err = parseFieldBase64(fields[0], "v") + if err == nil { + return + } + + msg.err, err = parseField(fields[0], "e") + + return +} diff --git a/vendor/github.com/xdg/scram/scram.go b/vendor/github.com/xdg/scram/scram.go new file mode 100644 index 0000000..9e9836a --- /dev/null +++ b/vendor/github.com/xdg/scram/scram.go @@ -0,0 +1,66 @@ +// Copyright 2018 by David A. Golden. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package scram + +import ( + "crypto/sha1" + "crypto/sha256" + "fmt" + "hash" + + "github.com/xdg/stringprep" +) + +// HashGeneratorFcn abstracts a factory function that returns a hash.Hash +// value to be used for SCRAM operations. Generally, one would use the +// provided package variables, `scram.SHA1` and `scram.SHA256`, for the most +// common forms of SCRAM. +type HashGeneratorFcn func() hash.Hash + +// SHA1 is a function that returns a crypto/sha1 hasher and should be used to +// create Client objects configured for SHA-1 hashing. +var SHA1 HashGeneratorFcn = func() hash.Hash { return sha1.New() } + +// SHA256 is a function that returns a crypto/sha256 hasher and should be used +// to create Client objects configured for SHA-256 hashing. +var SHA256 HashGeneratorFcn = func() hash.Hash { return sha256.New() } + +// NewClient constructs a SCRAM client component based on a given hash.Hash +// factory receiver. This constructor will normalize the username, password +// and authzID via the SASLprep algorithm, as recommended by RFC-5802. If +// SASLprep fails, the method returns an error. +func (f HashGeneratorFcn) NewClient(username, password, authzID string) (*Client, error) { + var userprep, passprep, authprep string + var err error + + if userprep, err = stringprep.SASLprep.Prepare(username); err != nil { + return nil, fmt.Errorf("Error SASLprepping username '%s': %v", username, err) + } + if passprep, err = stringprep.SASLprep.Prepare(password); err != nil { + return nil, fmt.Errorf("Error SASLprepping password '%s': %v", password, err) + } + if authprep, err = stringprep.SASLprep.Prepare(authzID); err != nil { + return nil, fmt.Errorf("Error SASLprepping authzID '%s': %v", authzID, err) + } + + return newClient(userprep, passprep, authprep, f), nil +} + +// NewClientUnprepped acts like NewClient, except none of the arguments will +// be normalized via SASLprep. This is not generally recommended, but is +// provided for users that may have custom normalization needs. +func (f HashGeneratorFcn) NewClientUnprepped(username, password, authzID string) (*Client, error) { + return newClient(username, password, authzID, f), nil +} + +// NewServer constructs a SCRAM server component based on a given hash.Hash +// factory receiver. To be maximally generic, it uses dependency injection to +// handle credential lookup, which is the process of turning a username string +// into a struct with stored credentials for authentication. +func (f HashGeneratorFcn) NewServer(cl CredentialLookup) (*Server, error) { + return newServer(cl, f) +} diff --git a/vendor/github.com/xdg/scram/server.go b/vendor/github.com/xdg/scram/server.go new file mode 100644 index 0000000..b119b36 --- /dev/null +++ b/vendor/github.com/xdg/scram/server.go @@ -0,0 +1,50 @@ +// Copyright 2018 by David A. Golden. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package scram + +import "sync" + +// Server implements the server side of SCRAM authentication. It holds +// configuration values needed to initialize new server-side conversations. +// Generally, this can be persistent within an application. +type Server struct { + sync.RWMutex + credentialCB CredentialLookup + nonceGen NonceGeneratorFcn + hashGen HashGeneratorFcn +} + +func newServer(cl CredentialLookup, fcn HashGeneratorFcn) (*Server, error) { + return &Server{ + credentialCB: cl, + nonceGen: defaultNonceGenerator, + hashGen: fcn, + }, nil +} + +// WithNonceGenerator replaces the default nonce generator (base64 encoding of +// 24 bytes from crypto/rand) with a custom generator. This is provided for +// testing or for users with custom nonce requirements. +func (s *Server) WithNonceGenerator(ng NonceGeneratorFcn) *Server { + s.Lock() + defer s.Unlock() + s.nonceGen = ng + return s +} + +// NewConversation constructs a server-side authentication conversation. +// Conversations cannot be reused, so this must be called for each new +// authentication attempt. +func (s *Server) NewConversation() *ServerConversation { + s.RLock() + defer s.RUnlock() + return &ServerConversation{ + nonceGen: s.nonceGen, + hashGen: s.hashGen, + credentialCB: s.credentialCB, + } +} diff --git a/vendor/github.com/xdg/scram/server_conv.go b/vendor/github.com/xdg/scram/server_conv.go new file mode 100644 index 0000000..9c8838c --- /dev/null +++ b/vendor/github.com/xdg/scram/server_conv.go @@ -0,0 +1,151 @@ +// Copyright 2018 by David A. Golden. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package scram + +import ( + "crypto/hmac" + "encoding/base64" + "errors" + "fmt" +) + +type serverState int + +const ( + serverFirst serverState = iota + serverFinal + serverDone +) + +// ServerConversation implements the server-side of an authentication +// conversation with a client. A new conversation must be created for +// each authentication attempt. +type ServerConversation struct { + nonceGen NonceGeneratorFcn + hashGen HashGeneratorFcn + credentialCB CredentialLookup + state serverState + credential StoredCredentials + valid bool + gs2Header string + username string + authzID string + nonce string + c1b string + s1 string +} + +// Step takes a string provided from a client and attempts to move the +// authentication conversation forward. It returns a string to be sent to the +// client or an error if the client message is invalid. Calling Step after a +// conversation completes is also an error. +func (sc *ServerConversation) Step(challenge string) (response string, err error) { + switch sc.state { + case serverFirst: + sc.state = serverFinal + response, err = sc.firstMsg(challenge) + case serverFinal: + sc.state = serverDone + response, err = sc.finalMsg(challenge) + default: + response, err = "", errors.New("Conversation already completed") + } + return +} + +// Done returns true if the conversation is completed or has errored. +func (sc *ServerConversation) Done() bool { + return sc.state == serverDone +} + +// Valid returns true if the conversation successfully authenticated the +// client. +func (sc *ServerConversation) Valid() bool { + return sc.valid +} + +// Username returns the client-provided username. This is valid to call +// if the first conversation Step() is successful. +func (sc *ServerConversation) Username() string { + return sc.username +} + +// AuthzID returns the (optional) client-provided authorization identity, if +// any. If one was not provided, it returns the empty string. This is valid +// to call if the first conversation Step() is successful. +func (sc *ServerConversation) AuthzID() string { + return sc.authzID +} + +func (sc *ServerConversation) firstMsg(c1 string) (string, error) { + msg, err := parseClientFirst(c1) + if err != nil { + sc.state = serverDone + return "", err + } + + sc.gs2Header = msg.gs2Header + sc.username = msg.username + sc.authzID = msg.authzID + + sc.credential, err = sc.credentialCB(msg.username) + if err != nil { + sc.state = serverDone + return "e=unknown-user", err + } + + sc.nonce = msg.nonce + sc.nonceGen() + sc.c1b = msg.c1b + sc.s1 = fmt.Sprintf("r=%s,s=%s,i=%d", + sc.nonce, + base64.StdEncoding.EncodeToString([]byte(sc.credential.Salt)), + sc.credential.Iters, + ) + + return sc.s1, nil +} + +// For errors, returns server error message as well as non-nil error. Callers +// can choose whether to send server error or not. +func (sc *ServerConversation) finalMsg(c2 string) (string, error) { + msg, err := parseClientFinal(c2) + if err != nil { + return "", err + } + + // Check channel binding matches what we expect; in this case, we expect + // just the gs2 header we received as we don't support channel binding + // with a data payload. If we add binding, we need to independently + // compute the header to match here. + if string(msg.cbind) != sc.gs2Header { + return "e=channel-bindings-dont-match", fmt.Errorf("channel binding received '%s' doesn't match expected '%s'", msg.cbind, sc.gs2Header) + } + + // Check nonce received matches what we sent + if msg.nonce != sc.nonce { + return "e=other-error", errors.New("nonce received did not match nonce sent") + } + + // Create auth message + authMsg := sc.c1b + "," + sc.s1 + "," + msg.c2wop + + // Retrieve ClientKey from proof and verify it + clientSignature := computeHMAC(sc.hashGen, sc.credential.StoredKey, []byte(authMsg)) + clientKey := xorBytes([]byte(msg.proof), clientSignature) + storedKey := computeHash(sc.hashGen, clientKey) + + // Compare with constant-time function + if !hmac.Equal(storedKey, sc.credential.StoredKey) { + return "e=invalid-proof", errors.New("challenge proof invalid") + } + + sc.valid = true + + // Compute and return server verifier + serverSignature := computeHMAC(sc.hashGen, sc.credential.ServerKey, []byte(authMsg)) + return "v=" + base64.StdEncoding.EncodeToString(serverSignature), nil +} diff --git a/vendor/github.com/xdg/scram/server_conv_test.go b/vendor/github.com/xdg/scram/server_conv_test.go new file mode 100644 index 0000000..cebd2a4 --- /dev/null +++ b/vendor/github.com/xdg/scram/server_conv_test.go @@ -0,0 +1,137 @@ +// Copyright 2018 by David A. Golden. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package scram + +import ( + "encoding/base64" + "fmt" + "strconv" + "testing" + + "github.com/xdg/stringprep" +) + +func TestServerConv(t *testing.T) { + cases, err := getTestData("good", "bad-client") + if err != nil { + t.Fatal(err) + } + + for _, v := range cases { + t.Run(v.Label, genServerSubTest(v)) + } +} + +// Prep user credential callback for the case from Client +func genServerCallback(c TestCase) (CredentialLookup, error) { + salt, err := base64.StdEncoding.DecodeString(c.Salt64) + if err != nil { + return nil, fmt.Errorf("error decoding salt: %v", err) + } + + hgf, err := getHGF(c.Digest) + if err != nil { + return nil, fmt.Errorf("error getting digest for credential callback: %v", err) + } + + kf := KeyFactors{Salt: string(salt), Iters: c.Iters} + + var client *Client + var userprep string + if c.SkipSASLprep { + client, err = hgf.NewClientUnprepped(c.User, c.Pass, c.AuthzID) + userprep = c.User + } else { + client, err = hgf.NewClient(c.User, c.Pass, c.AuthzID) + if userprep, err = stringprep.SASLprep.Prepare(c.User); err != nil { + return nil, fmt.Errorf("Error SASLprepping username '%s': %v", c.User, err) + } + } + if err != nil { + return nil, fmt.Errorf("error generating client for credential callback: %v", err) + } + + stored := client.GetStoredCredentials(kf) + + cbFcn := func(s string) (StoredCredentials, error) { + if s == userprep { + return stored, nil + } + return StoredCredentials{}, fmt.Errorf("Unknown user %s", s) + } + + return cbFcn, nil +} + +func genServerSubTest(c TestCase) func(t *testing.T) { + return func(t *testing.T) { + hgf, err := getHGF(c.Digest) + if err != nil { + t.Fatal(err) + } + + cbFcn, err := genServerCallback(c) + if err != nil { + t.Fatal(err) + } + + server, err := hgf.NewServer(cbFcn) + if err != nil { + t.Fatalf("%s: expected no error from NewServer, but got '%v'", c.Label, err) + } + if c.ServerNonce != "" { + server = server.WithNonceGenerator(func() string { return c.ServerNonce }) + } + conv := server.NewConversation() + + for i, s := range serverSteps(c) { + if conv.Done() { + t.Errorf("%s: Premature end of conversation before step %d", c.Label, i+1) + return + } + got, err := conv.Step(s.Input) + if s.IsError && err == nil { + t.Errorf("%s: step %d: expected error but didn't get one", c.Label, i+1) + return + } else if !s.IsError && err != nil { + t.Errorf("%s: step %d: expected no error but got '%v'", c.Label, i+1, err) + return + } + if got != s.Expect { + t.Errorf("%s: step %d: incorrect step message; got %s, expected %s", + c.Label, + i+1, + strconv.QuoteToASCII(got), + strconv.QuoteToASCII(s.Expect), + ) + return + } + } + + if c.Valid != conv.Valid() { + t.Errorf("%s: Conversation Valid() incorrect: got '%v', expected '%v'", c.Label, conv.Valid(), c.Valid) + return + } + + if !conv.Done() { + t.Errorf("%s: Conversation not marked done after last step", c.Label) + } + + var expectedUser string + if c.SkipSASLprep { + expectedUser = c.User + } else { + if expectedUser, err = stringprep.SASLprep.Prepare(c.User); err != nil { + t.Errorf("Error SASLprepping username '%s': %v", c.User, err) + } + } + + if conv.Valid() && conv.Username() != expectedUser { + t.Errorf("%s: Conversation didn't record proper username: got '%s', expected '%s'", c.Label, conv.Username(), expectedUser) + } + } +} diff --git a/vendor/github.com/xdg/scram/testdata/bad-client/bad-user.json b/vendor/github.com/xdg/scram/testdata/bad-client/bad-user.json new file mode 100644 index 0000000..9c0ae03 --- /dev/null +++ b/vendor/github.com/xdg/scram/testdata/bad-client/bad-user.json @@ -0,0 +1,17 @@ +{ + "label": "unknown user", + "digest": "SHA-1", + "user": "user", + "pass": "pencil", + "authID": "", + "skipSASLprep": false, + "salt64": "QSXCR+Q6sek8bf92", + "iters": 4096, + "clientNonce": "fyko+d2lbbFgONRv9qkxdawL", + "serverNonce": "3rfcNHYJY1ZVvWVs7j", + "valid": false, + "steps" : [ + "n,,n=doesntexist,r=fyko+d2lbbFgONRv9qkxdawL", + "e=unknown-user" + ] +} diff --git a/vendor/github.com/xdg/scram/testdata/bad-client/rfc5802-bad-proof.json b/vendor/github.com/xdg/scram/testdata/bad-client/rfc5802-bad-proof.json new file mode 100644 index 0000000..3f021ad --- /dev/null +++ b/vendor/github.com/xdg/scram/testdata/bad-client/rfc5802-bad-proof.json @@ -0,0 +1,19 @@ +{ + "label": "RFC 5802 example with bad proof", + "digest": "SHA-1", + "user": "user", + "pass": "pencil", + "authID": "", + "skipSASLprep": false, + "salt64": "QSXCR+Q6sek8bf92", + "iters": 4096, + "clientNonce": "fyko+d2lbbFgONRv9qkxdawL", + "serverNonce": "3rfcNHYJY1ZVvWVs7j", + "valid": false, + "steps" : [ + "n,,n=user,r=fyko+d2lbbFgONRv9qkxdawL", + "r=fyko+d2lbbFgONRv9qkxdawL3rfcNHYJY1ZVvWVs7j,s=QSXCR+Q6sek8bf92,i=4096", + "c=biws,r=fyko+d2lbbFgONRv9qkxdawL3rfcNHYJY1ZVvWVs7j,p=AAAAAAAAAAAAAAAAAAAAAAAAAAA=", + "e=invalid-proof" + ] +} diff --git a/vendor/github.com/xdg/scram/testdata/bad-client/rfc7677-bad-proof.json b/vendor/github.com/xdg/scram/testdata/bad-client/rfc7677-bad-proof.json new file mode 100644 index 0000000..60c0bd0 --- /dev/null +++ b/vendor/github.com/xdg/scram/testdata/bad-client/rfc7677-bad-proof.json @@ -0,0 +1,19 @@ +{ + "label": "RFC 7677 example with bad proof", + "digest": "SHA-256", + "user": "user", + "pass": "pencil", + "authID": "", + "skipSASLprep": false, + "salt64": "W22ZaJ0SNY7soEsUEjb6gQ==", + "iters": 4096, + "clientNonce": "rOprNGfwEbeRWgbNEkqO", + "serverNonce": "%hvYDpWUa2RaTCAfuxFIlj)hNlF$k0", + "valid": false, + "steps" : [ + "n,,n=user,r=rOprNGfwEbeRWgbNEkqO", + "r=rOprNGfwEbeRWgbNEkqO%hvYDpWUa2RaTCAfuxFIlj)hNlF$k0,s=W22ZaJ0SNY7soEsUEjb6gQ==,i=4096", + "c=biws,r=rOprNGfwEbeRWgbNEkqO%hvYDpWUa2RaTCAfuxFIlj)hNlF$k0,p=AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=", + "e=invalid-proof" + ] +} diff --git a/vendor/github.com/xdg/scram/testdata/bad-server/rfc5802-bad-validator.json b/vendor/github.com/xdg/scram/testdata/bad-server/rfc5802-bad-validator.json new file mode 100644 index 0000000..467cbe6 --- /dev/null +++ b/vendor/github.com/xdg/scram/testdata/bad-server/rfc5802-bad-validator.json @@ -0,0 +1,19 @@ +{ + "label": "RFC 5802 example with bad validation", + "digest": "SHA-1", + "user": "user", + "pass": "pencil", + "authID": "", + "skipSASLprep": false, + "salt64": "QSXCR+Q6sek8bf92", + "iters": 4096, + "clientNonce": "fyko+d2lbbFgONRv9qkxdawL", + "serverNonce": "3rfcNHYJY1ZVvWVs7j", + "valid": false, + "steps" : [ + "n,,n=user,r=fyko+d2lbbFgONRv9qkxdawL", + "r=fyko+d2lbbFgONRv9qkxdawL3rfcNHYJY1ZVvWVs7j,s=QSXCR+Q6sek8bf92,i=4096", + "c=biws,r=fyko+d2lbbFgONRv9qkxdawL3rfcNHYJY1ZVvWVs7j,p=v0X8v3Bz2T0CJGbJQyF0X+HI4Ts=", + "v=AAAAAAAAAAAAAAAAAAAAAAAAAAA=" + ] +} diff --git a/vendor/github.com/xdg/scram/testdata/bad-server/rfc7677-bad-validator.json b/vendor/github.com/xdg/scram/testdata/bad-server/rfc7677-bad-validator.json new file mode 100644 index 0000000..57c287b --- /dev/null +++ b/vendor/github.com/xdg/scram/testdata/bad-server/rfc7677-bad-validator.json @@ -0,0 +1,19 @@ +{ + "label": "RFC 7677 example with bad validation", + "digest": "SHA-256", + "user": "user", + "pass": "pencil", + "authID": "", + "skipSASLprep": false, + "salt64": "W22ZaJ0SNY7soEsUEjb6gQ==", + "iters": 4096, + "clientNonce": "rOprNGfwEbeRWgbNEkqO", + "serverNonce": "%hvYDpWUa2RaTCAfuxFIlj)hNlF$k0", + "valid": false, + "steps" : [ + "n,,n=user,r=rOprNGfwEbeRWgbNEkqO", + "r=rOprNGfwEbeRWgbNEkqO%hvYDpWUa2RaTCAfuxFIlj)hNlF$k0,s=W22ZaJ0SNY7soEsUEjb6gQ==,i=4096", + "c=biws,r=rOprNGfwEbeRWgbNEkqO%hvYDpWUa2RaTCAfuxFIlj)hNlF$k0,p=dHzbZapWIk4jUhN+Ute9ytag9zjfMHgsqmmiz7AndVQ=", + "v=AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + ] +} diff --git a/vendor/github.com/xdg/scram/testdata/good/rfc5802.json b/vendor/github.com/xdg/scram/testdata/good/rfc5802.json new file mode 100644 index 0000000..121761d --- /dev/null +++ b/vendor/github.com/xdg/scram/testdata/good/rfc5802.json @@ -0,0 +1,20 @@ +{ + "label": "RFC 5802 example", + "digest": "SHA-1", + "user": "user", + "pass": "pencil", + "authID": "", + "skipSASLprep": false, + "salt64": "QSXCR+Q6sek8bf92", + "iters": 4096, + "clientNonce": "fyko+d2lbbFgONRv9qkxdawL", + "serverNonce": "3rfcNHYJY1ZVvWVs7j", + "valid": true, + "steps" : [ + "n,,n=user,r=fyko+d2lbbFgONRv9qkxdawL", + "r=fyko+d2lbbFgONRv9qkxdawL3rfcNHYJY1ZVvWVs7j,s=QSXCR+Q6sek8bf92,i=4096", + "c=biws,r=fyko+d2lbbFgONRv9qkxdawL3rfcNHYJY1ZVvWVs7j,p=v0X8v3Bz2T0CJGbJQyF0X+HI4Ts=", + "v=rmF9pqV8S7suAoZWja4dJRkFsKQ=", + "" + ] +} diff --git a/vendor/github.com/xdg/scram/testdata/good/rfc7677.json b/vendor/github.com/xdg/scram/testdata/good/rfc7677.json new file mode 100644 index 0000000..af2a008 --- /dev/null +++ b/vendor/github.com/xdg/scram/testdata/good/rfc7677.json @@ -0,0 +1,20 @@ +{ + "label": "RFC 7677 example", + "digest": "SHA-256", + "user": "user", + "pass": "pencil", + "authID": "", + "skipSASLprep": false, + "salt64": "W22ZaJ0SNY7soEsUEjb6gQ==", + "iters": 4096, + "clientNonce": "rOprNGfwEbeRWgbNEkqO", + "serverNonce": "%hvYDpWUa2RaTCAfuxFIlj)hNlF$k0", + "valid": true, + "steps" : [ + "n,,n=user,r=rOprNGfwEbeRWgbNEkqO", + "r=rOprNGfwEbeRWgbNEkqO%hvYDpWUa2RaTCAfuxFIlj)hNlF$k0,s=W22ZaJ0SNY7soEsUEjb6gQ==,i=4096", + "c=biws,r=rOprNGfwEbeRWgbNEkqO%hvYDpWUa2RaTCAfuxFIlj)hNlF$k0,p=dHzbZapWIk4jUhN+Ute9ytag9zjfMHgsqmmiz7AndVQ=", + "v=6rriTRBi23WpRR/wtup+mMhUZUn/dB5nLTJRsjl95G4=", + "" + ] +} diff --git a/vendor/github.com/xdg/scram/testdata/good/sha-1-ascii-pass.json b/vendor/github.com/xdg/scram/testdata/good/sha-1-ascii-pass.json new file mode 100644 index 0000000..ab86b03 --- /dev/null +++ b/vendor/github.com/xdg/scram/testdata/good/sha-1-ascii-pass.json @@ -0,0 +1,20 @@ +{ + "label" : "SHA-1 ASCII pass", + "digest" : "SHA-1", + "user" : "ram\u00f5n", + "pass" : "pencil", + "authID" : "", + "skipSASLprep" : false, + "salt64" : "c2FsdFNBTFRzYWx0\n", + "iters" : 4096, + "clientNonce" : "clientNONCE", + "serverNonce" : "serverNONCE", + "valid" : true, + "steps" : [ + "n,,n=ram\u00f5n,r=clientNONCE", + "r=clientNONCEserverNONCE,s=c2FsdFNBTFRzYWx0,i=4096", + "c=biws,r=clientNONCEserverNONCE,p=kvH02DJiH7oHwk+SKpN4plfpF04=", + "v=BoA2mAPlV/b9A5WPDbHmHZi3EGc=", + "" + ] +} diff --git a/vendor/github.com/xdg/scram/testdata/good/sha-1-ascii-user.json b/vendor/github.com/xdg/scram/testdata/good/sha-1-ascii-user.json new file mode 100644 index 0000000..d383104 --- /dev/null +++ b/vendor/github.com/xdg/scram/testdata/good/sha-1-ascii-user.json @@ -0,0 +1,20 @@ +{ + "label" : "SHA-1 ASCII user", + "digest" : "SHA-1", + "user" : "user", + "pass" : "p\u00e8ncil", + "authID" : "", + "skipSASLprep" : false, + "salt64" : "c2FsdFNBTFRzYWx0\n", + "iters" : 4096, + "clientNonce" : "clientNONCE", + "serverNonce" : "serverNONCE", + "valid" : true, + "steps" : [ + "n,,n=user,r=clientNONCE", + "r=clientNONCEserverNONCE,s=c2FsdFNBTFRzYWx0,i=4096", + "c=biws,r=clientNONCEserverNONCE,p=yn797N2/XhIwZBB29LhEs6D6XVw=", + "v=a6QRQikpGygizEM4/rCOvkgdglI=", + "" + ] +} diff --git a/vendor/github.com/xdg/scram/testdata/good/sha-1-ascii.json b/vendor/github.com/xdg/scram/testdata/good/sha-1-ascii.json new file mode 100644 index 0000000..d24a443 --- /dev/null +++ b/vendor/github.com/xdg/scram/testdata/good/sha-1-ascii.json @@ -0,0 +1,20 @@ +{ + "label" : "SHA-1 ASCII", + "digest" : "SHA-1", + "user" : "user", + "pass" : "pencil", + "authID" : "", + "skipSASLprep" : false, + "salt64" : "c2FsdFNBTFRzYWx0\n", + "iters" : 4096, + "clientNonce" : "clientNONCE", + "serverNonce" : "serverNONCE", + "valid" : true, + "steps" : [ + "n,,n=user,r=clientNONCE", + "r=clientNONCEserverNONCE,s=c2FsdFNBTFRzYWx0,i=4096", + "c=biws,r=clientNONCEserverNONCE,p=I4oktcY7BOL0Agn0NlWRXlRP1mg=", + "v=oKPvB1bE/9ydptJ+kohMgL+NdM0=", + "" + ] +} diff --git a/vendor/github.com/xdg/scram/testdata/good/sha-1-no-saslprep.json b/vendor/github.com/xdg/scram/testdata/good/sha-1-no-saslprep.json new file mode 100644 index 0000000..a593087 --- /dev/null +++ b/vendor/github.com/xdg/scram/testdata/good/sha-1-no-saslprep.json @@ -0,0 +1,20 @@ +{ + "label" : "SHA-1 no-SASLprep", + "digest" : "SHA-1", + "user" : "ramo\u0301n", + "pass" : "p\u212bssword", + "authID" : "", + "skipSASLprep" : true, + "salt64" : "c2FsdFNBTFRzYWx0\n", + "iters" : 4096, + "clientNonce" : "clientNONCE", + "serverNonce" : "serverNONCE", + "valid" : true, + "steps" : [ + "n,,n=ramo\u0301n,r=clientNONCE", + "r=clientNONCEserverNONCE,s=c2FsdFNBTFRzYWx0,i=4096", + "c=biws,r=clientNONCEserverNONCE,p=zLg8AlljNXeGOwWk0G2ay6a6qiM=", + "v=sVH5eR1tapz4QrMVCIGAlrUCAfc=", + "" + ] +} diff --git a/vendor/github.com/xdg/scram/testdata/good/sha-1-saslprep-non-normal.json b/vendor/github.com/xdg/scram/testdata/good/sha-1-saslprep-non-normal.json new file mode 100644 index 0000000..4e0aef1 --- /dev/null +++ b/vendor/github.com/xdg/scram/testdata/good/sha-1-saslprep-non-normal.json @@ -0,0 +1,20 @@ +{ + "label" : "SHA-1 SASLprep non-normal", + "digest" : "SHA-1", + "user" : "ramo\u0301n", + "pass" : "p\u212bssword", + "authID" : "", + "skipSASLprep" : false, + "salt64" : "c2FsdFNBTFRzYWx0\n", + "iters" : 4096, + "clientNonce" : "clientNONCE", + "serverNonce" : "serverNONCE", + "valid" : true, + "steps" : [ + "n,,n=ram\u00f3n,r=clientNONCE", + "r=clientNONCEserverNONCE,s=c2FsdFNBTFRzYWx0,i=4096", + "c=biws,r=clientNONCEserverNONCE,p=92sLIo0pB5IdEBOhBXx+t6Ew4pA=", + "v=xS0F7g5YU4fvigpFAb8jTE8/S0E=", + "" + ] +} diff --git a/vendor/github.com/xdg/scram/testdata/good/sha-1-saslprep-normal.json b/vendor/github.com/xdg/scram/testdata/good/sha-1-saslprep-normal.json new file mode 100644 index 0000000..80b8138 --- /dev/null +++ b/vendor/github.com/xdg/scram/testdata/good/sha-1-saslprep-normal.json @@ -0,0 +1,20 @@ +{ + "label" : "SHA-1 SASLprep normal", + "digest" : "SHA-1", + "user" : "ram\u00f5n", + "pass" : "p\u00c5assword", + "authID" : "", + "skipSASLprep" : false, + "salt64" : "c2FsdFNBTFRzYWx0\n", + "iters" : 4096, + "clientNonce" : "clientNONCE", + "serverNonce" : "serverNONCE", + "valid" : true, + "steps" : [ + "n,,n=ram\u00f5n,r=clientNONCE", + "r=clientNONCEserverNONCE,s=c2FsdFNBTFRzYWx0,i=4096", + "c=biws,r=clientNONCEserverNONCE,p=A1/CIzRGDxwgLpXqQ0CHSSOKX08=", + "v=aCt2W88clBMnoAQauVf677Rjpho=", + "" + ] +} diff --git a/vendor/github.com/xdg/scram/testdata/good/sha-256-ascii-pass.json b/vendor/github.com/xdg/scram/testdata/good/sha-256-ascii-pass.json new file mode 100644 index 0000000..864f85f --- /dev/null +++ b/vendor/github.com/xdg/scram/testdata/good/sha-256-ascii-pass.json @@ -0,0 +1,20 @@ +{ + "label" : "SHA-256 ASCII pass", + "digest" : "SHA-256", + "user" : "ram\u00f5n", + "pass" : "pencil", + "authID" : "", + "skipSASLprep" : false, + "salt64" : "c2FsdFNBTFRzYWx0\n", + "iters" : 4096, + "clientNonce" : "clientNONCE", + "serverNonce" : "serverNONCE", + "valid" : true, + "steps" : [ + "n,,n=ram\u00f5n,r=clientNONCE", + "r=clientNONCEserverNONCE,s=c2FsdFNBTFRzYWx0,i=4096", + "c=biws,r=clientNONCEserverNONCE,p=vRdD7SqiY5kMyAFX2enPOJK9BL+3YIVyuzCt1H2qc4o=", + "v=sh7QPwVuquMatYobYpYOaPiNS+lqwTCmy3rdexRDDkE=", + "" + ] +} diff --git a/vendor/github.com/xdg/scram/testdata/good/sha-256-ascii-user.json b/vendor/github.com/xdg/scram/testdata/good/sha-256-ascii-user.json new file mode 100644 index 0000000..29e8d06 --- /dev/null +++ b/vendor/github.com/xdg/scram/testdata/good/sha-256-ascii-user.json @@ -0,0 +1,20 @@ +{ + "label" : "SHA-256 ASCII user", + "digest" : "SHA-256", + "user" : "user", + "pass" : "p\u00e8ncil", + "authID" : "", + "skipSASLprep" : false, + "salt64" : "c2FsdFNBTFRzYWx0\n", + "iters" : 4096, + "clientNonce" : "clientNONCE", + "serverNonce" : "serverNONCE", + "valid" : true, + "steps" : [ + "n,,n=user,r=clientNONCE", + "r=clientNONCEserverNONCE,s=c2FsdFNBTFRzYWx0,i=4096", + "c=biws,r=clientNONCEserverNONCE,p=o6rKPfQCKSGHClFxHjdSeiVCPA6K53++gpY3XlP8lI8=", + "v=rsyNAwnHfclZKxAKx1tKfInH3xPVAzCy237DQo5n/N8=", + "" + ] +} diff --git a/vendor/github.com/xdg/scram/testdata/good/sha-256-ascii.json b/vendor/github.com/xdg/scram/testdata/good/sha-256-ascii.json new file mode 100644 index 0000000..6c20a3e --- /dev/null +++ b/vendor/github.com/xdg/scram/testdata/good/sha-256-ascii.json @@ -0,0 +1,20 @@ +{ + "label" : "SHA-256 ASCII", + "digest" : "SHA-256", + "user" : "user", + "pass" : "pencil", + "authID" : "", + "skipSASLprep" : false, + "salt64" : "c2FsdFNBTFRzYWx0\n", + "iters" : 4096, + "clientNonce" : "clientNONCE", + "serverNonce" : "serverNONCE", + "valid" : true, + "steps" : [ + "n,,n=user,r=clientNONCE", + "r=clientNONCEserverNONCE,s=c2FsdFNBTFRzYWx0,i=4096", + "c=biws,r=clientNONCEserverNONCE,p=ItXnHvCDW7VGij6H+4rv2o93HvkLwrQaLkfVjeSMfrc=", + "v=P61v8wxOu6B9J7Uij+Sk4zewSK1e6en6f5rCFO4OUNE=", + "" + ] +} diff --git a/vendor/github.com/xdg/scram/testdata/good/sha-256-no-saslprep.json b/vendor/github.com/xdg/scram/testdata/good/sha-256-no-saslprep.json new file mode 100644 index 0000000..663a066 --- /dev/null +++ b/vendor/github.com/xdg/scram/testdata/good/sha-256-no-saslprep.json @@ -0,0 +1,20 @@ +{ + "label" : "SHA-256 no-SASLprep", + "digest" : "SHA-256", + "user" : "ramo\u0301n", + "pass" : "p\u212bssword", + "authID" : "", + "skipSASLprep" : true, + "salt64" : "c2FsdFNBTFRzYWx0\n", + "iters" : 4096, + "clientNonce" : "clientNONCE", + "serverNonce" : "serverNONCE", + "valid" : true, + "steps" : [ + "n,,n=ramo\u0301n,r=clientNONCE", + "r=clientNONCEserverNONCE,s=c2FsdFNBTFRzYWx0,i=4096", + "c=biws,r=clientNONCEserverNONCE,p=oTfTL+YxW2HglmsPRO5VLdQk+oVt48HHrKppt+kYP2Y=", + "v=mtXS1UbPSI9Ks9flMJwHBDfnmwcUwjpI8A/NlAT5c98=", + "" + ] +} diff --git a/vendor/github.com/xdg/scram/testdata/good/sha-256-saslprep-non-normal.json b/vendor/github.com/xdg/scram/testdata/good/sha-256-saslprep-non-normal.json new file mode 100644 index 0000000..5bea5e6 --- /dev/null +++ b/vendor/github.com/xdg/scram/testdata/good/sha-256-saslprep-non-normal.json @@ -0,0 +1,20 @@ +{ + "label" : "SHA-256 SASLprep non-normal", + "digest" : "SHA-256", + "user" : "ramo\u0301n", + "pass" : "p\u212bssword", + "authID" : "", + "skipSASLprep" : false, + "salt64" : "c2FsdFNBTFRzYWx0\n", + "iters" : 4096, + "clientNonce" : "clientNONCE", + "serverNonce" : "serverNONCE", + "valid" : true, + "steps" : [ + "n,,n=ram\u00f3n,r=clientNONCE", + "r=clientNONCEserverNONCE,s=c2FsdFNBTFRzYWx0,i=4096", + "c=biws,r=clientNONCEserverNONCE,p=KXgIc8B+d5k3zx1P4rfs4TiybIlv11O85Jl1TrzEsfI=", + "v=zG9u+MI5GPTROhnW/W1PUCKV4Uvp2SHzwFOZV9Hth/c=", + "" + ] +} diff --git a/vendor/github.com/xdg/scram/testdata/good/sha-256-saslprep-normal.json b/vendor/github.com/xdg/scram/testdata/good/sha-256-saslprep-normal.json new file mode 100644 index 0000000..cddf783 --- /dev/null +++ b/vendor/github.com/xdg/scram/testdata/good/sha-256-saslprep-normal.json @@ -0,0 +1,20 @@ +{ + "label" : "SHA-256 SASLprep normal", + "digest" : "SHA-256", + "user" : "ram\u00f5n", + "pass" : "p\u00c5assword", + "authID" : "", + "skipSASLprep" : false, + "salt64" : "c2FsdFNBTFRzYWx0\n", + "iters" : 4096, + "clientNonce" : "clientNONCE", + "serverNonce" : "serverNONCE", + "valid" : true, + "steps" : [ + "n,,n=ram\u00f5n,r=clientNONCE", + "r=clientNONCEserverNONCE,s=c2FsdFNBTFRzYWx0,i=4096", + "c=biws,r=clientNONCEserverNONCE,p=Km2zqmf/GbLdkItzscNI5D0c1f+GmLDi2fScTPm6d4k=", + "v=30soY0l2BiInoDyrHxIuamz2LBvci1lFKo/tOMpqo98=", + "" + ] +} diff --git a/vendor/github.com/xdg/scram/testdata_test.go b/vendor/github.com/xdg/scram/testdata_test.go new file mode 100644 index 0000000..03a1f8f --- /dev/null +++ b/vendor/github.com/xdg/scram/testdata_test.go @@ -0,0 +1,162 @@ +// Copyright 2018 by David A. Golden. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package scram + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "path/filepath" + "strings" +) + +type TestCase struct { + Label string + Digest string + User string + Pass string + AuthzID string + SkipSASLprep bool + Salt64 string + Iters int + ClientNonce string + ServerNonce string + Valid bool + Steps []string +} + +type testStep struct { + Input string + Expect string + IsError bool +} + +func getHGF(s string) (HashGeneratorFcn, error) { + switch s { + case "SHA-1": + return SHA1, nil + case "SHA-256": + return SHA256, nil + default: + panic(fmt.Sprintf("Unknown hash function '%s'", s)) + } +} + +func decodeFile(s string) (TestCase, error) { + var tc TestCase + + data, err := ioutil.ReadFile(s) + if err != nil { + return tc, err + } + + err = json.Unmarshal(data, &tc) + if err != nil { + return tc, fmt.Errorf("error unmarshaling '%s': %v", s, err) + } + + return tc, nil +} + +func getTestFiles(dir string) ([]string, error) { + subdir := filepath.Join("testdata", dir) + files, err := ioutil.ReadDir(subdir) + if err != nil { + return nil, err + } + + filenames := make([]string, len(files)) + for i, v := range files { + filenames[i] = filepath.Join(subdir, v.Name()) + } + + return filenames, nil +} + +func getTestData(dirs ...string) ([]TestCase, error) { + var err error + filenames := make([]string, 0) + for _, v := range dirs { + names, err := getTestFiles(v) + if err != nil { + return nil, err + } + filenames = append(filenames, names...) + } + + cases := make([]TestCase, len(filenames)) + for i, v := range filenames { + cases[i], err = decodeFile(v) + if err != nil { + return nil, err + } + } + + return cases, nil +} + +// Even steps are client messages; odd steps are server responses. +func clientSteps(c TestCase) []testStep { + n := len(c.Steps) + + // Test case requires at least two steps: the first client step + // (which cannot fail) and the first server response -- after which + // an error would prevent further client steps. + if n < 2 { + panic("Incomplete conversation for this test case") + } + + // First step needs empty input. + steps := []testStep{{Input: "", Expect: c.Steps[0]}} + + // From i==1 until end, construct conversations from pairs of steps. We + // know that (n >= 2). If the last pair is incomplete (no client Expect) + // that indicates error. + last := n - 1 + for i := 1; i <= last; i += 2 { + steps = append(steps, assembleStep(c, i, last)) + } + + return steps +} + +// Even steps are client messages; odd steps are server responses. +func serverSteps(c TestCase) []testStep { + n := len(c.Steps) + + // Test case requires at least one step: the first client step + // after which an error would prevent further server steps. + if n == 0 { + panic("Incomplete conversation for this test case") + } + + steps := make([]testStep, 0, 1) + + // From i==0 until end, construct conversations from pairs of steps. We + // know that (n >= 1). If the last pair is incomplete (no server Expect) + // that indicates error. + last := n - 1 + for i := 0; i < last; i += 2 { + ts := assembleStep(c, i, last) + steps = append(steps, ts) + } + + return steps +} + +func assembleStep(c TestCase, i int, last int) testStep { + ts := testStep{Input: c.Steps[i]} + if i == last { + ts.IsError = true + } else { + ts.Expect = c.Steps[i+1] + if strings.HasPrefix(ts.Expect, "e=") { + ts.IsError = true + } + } + return ts +} diff --git a/vendor/github.com/xdg/stringprep/.gitignore b/vendor/github.com/xdg/stringprep/.gitignore new file mode 100644 index 0000000..e69de29 diff --git a/vendor/github.com/xdg/stringprep/.travis.yml b/vendor/github.com/xdg/stringprep/.travis.yml new file mode 100644 index 0000000..f391327 --- /dev/null +++ b/vendor/github.com/xdg/stringprep/.travis.yml @@ -0,0 +1,11 @@ +language: go +sudo: false +go: + - "1.7" + - "1.8" + - "1.9" + - "1.10" + - master +matrix: + allow_failures: + - go: master diff --git a/vendor/github.com/xdg/stringprep/LICENSE b/vendor/github.com/xdg/stringprep/LICENSE new file mode 100644 index 0000000..67db858 --- /dev/null +++ b/vendor/github.com/xdg/stringprep/LICENSE @@ -0,0 +1,175 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. diff --git a/vendor/github.com/xdg/stringprep/README.md b/vendor/github.com/xdg/stringprep/README.md new file mode 100644 index 0000000..87279e3 --- /dev/null +++ b/vendor/github.com/xdg/stringprep/README.md @@ -0,0 +1,27 @@ +[![GoDoc](https://godoc.org/github.com/xdg/stringprep?status.svg)](https://godoc.org/github.com/xdg/stringprep) +[![Build Status](https://travis-ci.org/xdg/stringprep.svg?branch=master)](https://travis-ci.org/xdg/stringprep) + +# stringprep – Go implementation of RFC-3454 stringprep and RFC-4013 SASLprep + +## Synopsis + +``` + import "github.com/xdg/stringprep" + + prepped := stringprep.SASLprep.Prepare("TrustNô1") + +``` + +## Description + +This library provides an implementation of the stringprep algorithm +(RFC-3454) in Go, including all data tables. + +A pre-built SASLprep (RFC-4013) profile is provided as well. + +## Copyright and License + +Copyright 2018 by David A. Golden. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"). You may +obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 diff --git a/vendor/github.com/xdg/stringprep/bidi.go b/vendor/github.com/xdg/stringprep/bidi.go new file mode 100644 index 0000000..6f6d321 --- /dev/null +++ b/vendor/github.com/xdg/stringprep/bidi.go @@ -0,0 +1,73 @@ +// Copyright 2018 by David A. Golden. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package stringprep + +var errHasLCat = "BiDi string can't have runes from category L" +var errFirstRune = "BiDi string first rune must have category R or AL" +var errLastRune = "BiDi string last rune must have category R or AL" + +// Check for prohibited characters from table C.8 +func checkBiDiProhibitedRune(s string) error { + for _, r := range s { + if TableC8.Contains(r) { + return Error{Msg: errProhibited, Rune: r} + } + } + return nil +} + +// Check for LCat characters from table D.2 +func checkBiDiLCat(s string) error { + for _, r := range s { + if TableD2.Contains(r) { + return Error{Msg: errHasLCat, Rune: r} + } + } + return nil +} + +// Check first and last characters are in table D.1; requires non-empty string +func checkBadFirstAndLastRandALCat(s string) error { + rs := []rune(s) + if !TableD1.Contains(rs[0]) { + return Error{Msg: errFirstRune, Rune: rs[0]} + } + n := len(rs) - 1 + if !TableD1.Contains(rs[n]) { + return Error{Msg: errLastRune, Rune: rs[n]} + } + return nil +} + +// Look for RandALCat characters from table D.1 +func hasBiDiRandALCat(s string) bool { + for _, r := range s { + if TableD1.Contains(r) { + return true + } + } + return false +} + +// Check that BiDi rules are satisfied ; let empty string pass this rule +func passesBiDiRules(s string) error { + if len(s) == 0 { + return nil + } + if err := checkBiDiProhibitedRune(s); err != nil { + return err + } + if hasBiDiRandALCat(s) { + if err := checkBiDiLCat(s); err != nil { + return err + } + if err := checkBadFirstAndLastRandALCat(s); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/xdg/stringprep/doc.go b/vendor/github.com/xdg/stringprep/doc.go new file mode 100644 index 0000000..b319e08 --- /dev/null +++ b/vendor/github.com/xdg/stringprep/doc.go @@ -0,0 +1,10 @@ +// Copyright 2018 by David A. Golden. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +// Package stringprep provides data tables and algorithms for RFC-3454, +// including errata (as of 2018-02). It also provides a profile for +// SASLprep as defined in RFC-4013. +package stringprep diff --git a/vendor/github.com/xdg/stringprep/error.go b/vendor/github.com/xdg/stringprep/error.go new file mode 100644 index 0000000..7403e49 --- /dev/null +++ b/vendor/github.com/xdg/stringprep/error.go @@ -0,0 +1,14 @@ +package stringprep + +import "fmt" + +// Error describes problems encountered during stringprep, including what rune +// was problematic. +type Error struct { + Msg string + Rune rune +} + +func (e Error) Error() string { + return fmt.Sprintf("%s (rune: '\\u%04x')", e.Msg, e.Rune) +} diff --git a/vendor/github.com/xdg/stringprep/examples_test.go b/vendor/github.com/xdg/stringprep/examples_test.go new file mode 100644 index 0000000..7de1ed1 --- /dev/null +++ b/vendor/github.com/xdg/stringprep/examples_test.go @@ -0,0 +1,39 @@ +package stringprep_test + +import ( + "fmt" + + "github.com/xdg/stringprep" +) + +func Example_customProfile() { + customProfile := stringprep.Profile{ + Mappings: []stringprep.Mapping{ + stringprep.TableB1, + stringprep.TableB2, + }, + Normalize: true, + Prohibits: []stringprep.Set{ + stringprep.TableC1_1, + stringprep.TableC1_2, + }, + CheckBiDi: true, + } + + prepped, err := customProfile.Prepare("TrustNô1") + if err != nil { + panic("stringprep failed") + } + + fmt.Print(prepped) + // Output: trustnô1 +} + +func Example_sASLprep() { + prepped, err := stringprep.SASLprep.Prepare("TrustNô1") + if err != nil { + panic("SASLprep failed") + } + fmt.Print(prepped) + // Output: TrustNô1 +} diff --git a/vendor/github.com/xdg/stringprep/map.go b/vendor/github.com/xdg/stringprep/map.go new file mode 100644 index 0000000..e56a0dd --- /dev/null +++ b/vendor/github.com/xdg/stringprep/map.go @@ -0,0 +1,21 @@ +// Copyright 2018 by David A. Golden. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package stringprep + +// Mapping represents a stringprep mapping, from a single rune to zero or more +// runes. +type Mapping map[rune][]rune + +// Map maps a rune to a (possibly empty) rune slice via a stringprep Mapping. +// The ok return value is false if the rune was not found. +func (m Mapping) Map(r rune) (replacement []rune, ok bool) { + rs, ok := m[r] + if !ok { + return nil, false + } + return rs, true +} diff --git a/vendor/github.com/xdg/stringprep/map_test.go b/vendor/github.com/xdg/stringprep/map_test.go new file mode 100644 index 0000000..6969c99 --- /dev/null +++ b/vendor/github.com/xdg/stringprep/map_test.go @@ -0,0 +1,51 @@ +package stringprep + +import ( + "fmt" + "reflect" + "testing" +) + +func TestMapping(t *testing.T) { + mappingTests := []struct { + label string + table Mapping + in rune + exists bool + out []rune + }{ + // Table B1 + {label: "B1", table: TableB1, in: 0x00AD, exists: true, out: []rune{}}, + {label: "B1", table: TableB1, in: 0x0040, exists: false, out: nil}, + // Table B2 + {label: "B2", table: TableB2, in: 0x0043, exists: true, out: []rune{0x0063}}, + {label: "B2", table: TableB2, in: 0x00DF, exists: true, out: []rune{0x0073, 0x0073}}, + {label: "B2", table: TableB2, in: 0x1F56, exists: true, out: []rune{0x03C5, 0x0313, 0x0342}}, + {label: "B2", table: TableB2, in: 0x0040, exists: false, out: nil}, + // Table B3 + {label: "B3", table: TableB3, in: 0x1FF7, exists: true, out: []rune{0x03C9, 0x0342, 0x03B9}}, + {label: "B3", table: TableB3, in: 0x0040, exists: false, out: nil}, + } + + for _, c := range mappingTests { + t.Run(fmt.Sprintf("%s 0x%04x", c.label, c.in), func(t *testing.T) { + got, ok := c.table.Map(c.in) + switch c.exists { + case true: + if !ok { + t.Errorf("input '0x%04x' was not found, but should have been", c.in) + } + if !reflect.DeepEqual(got, c.out) { + t.Errorf("input '0x%04x' was %v, expected %v", c.in, got, c.out) + } + case false: + if ok { + t.Errorf("input '0x%04x' was found, but should not have been", c.in) + } + if got != nil { + t.Errorf("input '0x%04x' was %v, expected %v", c.in, got, c.out) + } + } + }) + } +} diff --git a/vendor/github.com/xdg/stringprep/profile.go b/vendor/github.com/xdg/stringprep/profile.go new file mode 100644 index 0000000..5a73be9 --- /dev/null +++ b/vendor/github.com/xdg/stringprep/profile.go @@ -0,0 +1,75 @@ +package stringprep + +import ( + "golang.org/x/text/unicode/norm" +) + +// Profile represents a stringprep profile. +type Profile struct { + Mappings []Mapping + Normalize bool + Prohibits []Set + CheckBiDi bool +} + +var errProhibited = "prohibited character" + +// Prepare transforms an input string to an output string following +// the rules defined in the profile as defined by RFC-3454. +func (p Profile) Prepare(s string) (string, error) { + // Optimistically, assume output will be same length as input + temp := make([]rune, 0, len(s)) + + // Apply maps + for _, r := range s { + rs, ok := p.applyMaps(r) + if ok { + temp = append(temp, rs...) + } else { + temp = append(temp, r) + } + } + + // Normalize + var out string + if p.Normalize { + out = norm.NFKC.String(string(temp)) + } else { + out = string(temp) + } + + // Check prohibited + for _, r := range out { + if p.runeIsProhibited(r) { + return "", Error{Msg: errProhibited, Rune: r} + } + } + + // Check BiDi allowed + if p.CheckBiDi { + if err := passesBiDiRules(out); err != nil { + return "", err + } + } + + return out, nil +} + +func (p Profile) applyMaps(r rune) ([]rune, bool) { + for _, m := range p.Mappings { + rs, ok := m.Map(r) + if ok { + return rs, true + } + } + return nil, false +} + +func (p Profile) runeIsProhibited(r rune) bool { + for _, s := range p.Prohibits { + if s.Contains(r) { + return true + } + } + return false +} diff --git a/vendor/github.com/xdg/stringprep/profile_test.go b/vendor/github.com/xdg/stringprep/profile_test.go new file mode 100644 index 0000000..a9ee813 --- /dev/null +++ b/vendor/github.com/xdg/stringprep/profile_test.go @@ -0,0 +1,107 @@ +package stringprep + +import ( + "reflect" + "strconv" + "testing" +) + +func TestProfiles(t *testing.T) { + p1 := Profile{ + Mappings: []Mapping{TableB1, TableB2}, + Normalize: true, + Prohibits: []Set{TableC1_1, TableC1_2, TableC6}, + CheckBiDi: true, + } + + p2 := Profile{ + Mappings: []Mapping{}, + Normalize: false, + Prohibits: []Set{}, + CheckBiDi: false, + } + + profileTests := []struct { + label string + profile Profile + in string + out string + err error + }{ + {label: "p1: empty", profile: p1, in: "", out: "", err: nil}, + {label: "p1: ascii", profile: p1, in: "user", out: "user", err: nil}, + {label: "p1: zws", profile: p1, in: "u\u200Ber", out: "uer", err: nil}, + {label: "p1: sharp", profile: p1, in: "u\u00DFer", out: "usser", err: nil}, + {label: "p1: nfkc", profile: p1, in: "ua\u030Aer", out: "u\u00e5er", err: nil}, + { + label: "p1: replacement", + profile: p1, + in: "u\uFFFDer", + out: "", + err: Error{Msg: errProhibited, Rune: '\uFFFD'}, + }, + { + label: "p1: bidi ok", + profile: p1, + in: "\u0627\u0031\u0628", + out: "\u0627\u0031\u0628", + err: nil, + }, + { + label: "p1: bidi not ok RandAL with L", + profile: p1, + in: "\u0627\u0589\u0628", + out: "", + err: Error{Msg: errHasLCat, Rune: '\u0589'}, + }, + { + label: "p1: bidi bad first rune", + profile: p1, + in: "\u0031\u0627", + out: "", + err: Error{Msg: errFirstRune, Rune: '\u0031'}, + }, + { + label: "p1: bidi bad last rune", + profile: p1, + in: "\u0627\u0031", + out: "", + err: Error{Msg: errLastRune, Rune: '\u0031'}, + }, + { + label: "p1: bidi bad chars", + profile: p1, + in: "\u206D", + out: "", + err: Error{Msg: errProhibited, Rune: '\u206D'}, + }, + { + label: "p2: bidi not checked", + profile: p2, + in: "\u0627\u0031", + out: "\u0627\u0031", + err: nil, + }, + {label: "p2: no nfkc", profile: p2, in: "ua\u030Aer", out: "ua\u030Aer", err: nil}, + } + + for _, c := range profileTests { + t.Run(c.label, func(t *testing.T) { + got, err := c.profile.Prepare(c.in) + if c.err == nil { + if got != c.out { + t.Errorf("input '%s': got '%s', expected '%s'", + strconv.QuoteToASCII(c.in), + strconv.QuoteToASCII(got), + strconv.QuoteToASCII(c.out)) + } + } else { + if !reflect.DeepEqual(err, c.err) { + t.Errorf("input '%s': got error '%v', expected '%v'", + strconv.QuoteToASCII(c.in), err, c.err) + } + } + + }) + } +} diff --git a/vendor/github.com/xdg/stringprep/saslprep.go b/vendor/github.com/xdg/stringprep/saslprep.go new file mode 100644 index 0000000..4001348 --- /dev/null +++ b/vendor/github.com/xdg/stringprep/saslprep.go @@ -0,0 +1,52 @@ +package stringprep + +var mapNonASCIISpaceToASCIISpace = Mapping{ + 0x00A0: []rune{0x0020}, + 0x1680: []rune{0x0020}, + 0x2000: []rune{0x0020}, + 0x2001: []rune{0x0020}, + 0x2002: []rune{0x0020}, + 0x2003: []rune{0x0020}, + 0x2004: []rune{0x0020}, + 0x2005: []rune{0x0020}, + 0x2006: []rune{0x0020}, + 0x2007: []rune{0x0020}, + 0x2008: []rune{0x0020}, + 0x2009: []rune{0x0020}, + 0x200A: []rune{0x0020}, + 0x200B: []rune{0x0020}, + 0x202F: []rune{0x0020}, + 0x205F: []rune{0x0020}, + 0x3000: []rune{0x0020}, +} + +// SASLprep is a pre-defined stringprep profile for user names and passwords +// as described in RFC-4013. +// +// Because the stringprep distinction between query and stored strings was +// intended for compatibility across profile versions, but SASLprep was never +// updated and is now deprecated, this profile only operates in stored +// strings mode, prohibiting unassigned code points. +var SASLprep Profile = saslprep + +var saslprep = Profile{ + Mappings: []Mapping{ + TableB1, + mapNonASCIISpaceToASCIISpace, + }, + Normalize: true, + Prohibits: []Set{ + TableA1, + TableC1_2, + TableC2_1, + TableC2_2, + TableC3, + TableC4, + TableC5, + TableC6, + TableC7, + TableC8, + TableC9, + }, + CheckBiDi: true, +} diff --git a/vendor/github.com/xdg/stringprep/saslprep_test.go b/vendor/github.com/xdg/stringprep/saslprep_test.go new file mode 100644 index 0000000..92db001 --- /dev/null +++ b/vendor/github.com/xdg/stringprep/saslprep_test.go @@ -0,0 +1,56 @@ +package stringprep + +import ( + "reflect" + "strconv" + "testing" +) + +func TestSASLprep(t *testing.T) { + saslTests := []struct { + label string + in string + out string + err error + }{ + {label: "soft hyphen", in: "I\u00ADX", out: "IX", err: nil}, + {label: "non ASCII space", in: "I\u2000X", out: "I X", err: nil}, + {label: "no transform", in: "user", out: "user", err: nil}, + {label: "case preserve", in: "USER", out: "USER", err: nil}, + {label: "8859-1 to NFKC", in: "\u00AA", out: "a", err: nil}, + {label: "NFKC", in: "\u2168", out: "IX", err: nil}, + { + label: "prohibited", + in: "\u0007", + out: "", + err: Error{Msg: errProhibited, Rune: '\u0007'}, + }, + { + label: "bidi not ok", + in: "\u0627\u0031", + out: "", + err: Error{Msg: errLastRune, Rune: '\u0031'}, + }, + } + + for _, c := range saslTests { + t.Run(c.label, func(t *testing.T) { + got, err := SASLprep.Prepare(c.in) + t.Logf("err is '%v'", err) + if c.err == nil { + if got != c.out { + t.Errorf("input '%s': got '%s', expected '%s'", + strconv.QuoteToASCII(c.in), + strconv.QuoteToASCII(got), + strconv.QuoteToASCII(c.out)) + } + } else { + if !reflect.DeepEqual(err, c.err) { + t.Errorf("input '%s': got error '%v', expected '%v'", + strconv.QuoteToASCII(c.in), err, c.err) + } + } + + }) + } +} diff --git a/vendor/github.com/xdg/stringprep/set.go b/vendor/github.com/xdg/stringprep/set.go new file mode 100644 index 0000000..c837e28 --- /dev/null +++ b/vendor/github.com/xdg/stringprep/set.go @@ -0,0 +1,36 @@ +// Copyright 2018 by David A. Golden. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package stringprep + +import "sort" + +// RuneRange represents a close-ended range of runes: [N,M]. For a range +// consisting of a single rune, N and M will be equal. +type RuneRange [2]rune + +// Contains returns true if a rune is within the bounds of the RuneRange. +func (rr RuneRange) Contains(r rune) bool { + return rr[0] <= r && r <= rr[1] +} + +func (rr RuneRange) isAbove(r rune) bool { + return r <= rr[0] +} + +// Set represents a stringprep data table used to identify runes of a +// particular type. +type Set []RuneRange + +// Contains returns true if a rune is within any of the RuneRanges in the +// Set. +func (s Set) Contains(r rune) bool { + i := sort.Search(len(s), func(i int) bool { return s[i].Contains(r) || s[i].isAbove(r) }) + if i < len(s) && s[i].Contains(r) { + return true + } + return false +} diff --git a/vendor/github.com/xdg/stringprep/set_test.go b/vendor/github.com/xdg/stringprep/set_test.go new file mode 100644 index 0000000..139bdc3 --- /dev/null +++ b/vendor/github.com/xdg/stringprep/set_test.go @@ -0,0 +1,85 @@ +package stringprep + +import ( + "fmt" + "testing" +) + +func TestRanges(t *testing.T) { + rangeTests := []struct { + label string + table Set + in rune + out bool + }{ + // Table A.1 + {label: "A1", table: TableA1, in: 0x0221, out: true}, + {label: "A1", table: TableA1, in: 0x0955, out: true}, + {label: "A1", table: TableA1, in: 0x0956, out: true}, + {label: "A1", table: TableA1, in: 0x0957, out: true}, + {label: "A1", table: TableA1, in: 0x0020, out: false}, + // Table C.1.1 + {label: "C1.1", table: TableC1_1, in: 0x0020, out: true}, + {label: "C1.1", table: TableC1_1, in: 0x0040, out: false}, + // Table C.1.2 + {label: "C1.2", table: TableC1_2, in: 0x200A, out: true}, + {label: "C1.2", table: TableC1_2, in: 0x0040, out: false}, + // Table C.2.1 + {label: "C2.1", table: TableC2_1, in: 0x0000, out: true}, + {label: "C2.1", table: TableC2_1, in: 0x0010, out: true}, + {label: "C2.1", table: TableC2_1, in: 0x001F, out: true}, + {label: "C2.1", table: TableC2_1, in: 0x007F, out: true}, + {label: "C2.1", table: TableC2_1, in: 0x0040, out: false}, + // Table C.2.2 + {label: "C2.2", table: TableC2_2, in: 0x0080, out: true}, + {label: "C2.2", table: TableC2_2, in: 0x0090, out: true}, + {label: "C2.2", table: TableC2_2, in: 0x009F, out: true}, + {label: "C2.2", table: TableC2_2, in: 0x2028, out: true}, + {label: "C2.2", table: TableC2_2, in: 0xFEFF, out: true}, + {label: "C2.2", table: TableC2_2, in: 0x1D173, out: true}, + {label: "C2.2", table: TableC2_2, in: 0x1D17A, out: true}, + {label: "C2.2", table: TableC2_2, in: 0x0040, out: false}, + // Table C.3 + {label: "C3", table: TableC3, in: 0xE000, out: true}, + {label: "C3", table: TableC3, in: 0xF000, out: true}, + {label: "C3", table: TableC3, in: 0xF8FF, out: true}, + {label: "C3", table: TableC3, in: 0xF0000, out: true}, + {label: "C3", table: TableC3, in: 0xF1000, out: true}, + {label: "C3", table: TableC3, in: 0xFFFFD, out: true}, + {label: "C3", table: TableC3, in: 0x100000, out: true}, + {label: "C3", table: TableC3, in: 0x10ABCD, out: true}, + {label: "C3", table: TableC3, in: 0x10FFFD, out: true}, + {label: "C3", table: TableC3, in: 0x0040, out: false}, + // Table C.4 + {label: "C4", table: TableC4, in: 0xFDD0, out: true}, + {label: "C4", table: TableC4, in: 0xFFFF, out: true}, + {label: "C4", table: TableC4, in: 0x0040, out: false}, + // Table C.5 + {label: "C5", table: TableC5, in: 0xD801, out: true}, + {label: "C5", table: TableC5, in: 0x0040, out: false}, + // Table C.6 + {label: "C6", table: TableC6, in: 0xFFFA, out: true}, + {label: "C6", table: TableC6, in: 0x0040, out: false}, + // Table C.7 + {label: "C7", table: TableC7, in: 0x2FFB, out: true}, + {label: "C7", table: TableC7, in: 0x0040, out: false}, + // Table C.8 + {label: "C8", table: TableC8, in: 0x0341, out: true}, + {label: "C8", table: TableC8, in: 0x0040, out: false}, + // Table C.9 + {label: "C9", table: TableC9, in: 0xE0001, out: true}, + {label: "C9", table: TableC9, in: 0xE007E, out: true}, + {label: "C9", table: TableC9, in: 0x0040, out: false}, + // Table D.1 + {label: "D1", table: TableD1, in: 0x200F, out: true}, + {label: "D1", table: TableD1, in: 0x0040, out: false}, + } + + for _, c := range rangeTests { + t.Run(fmt.Sprintf("%s 0x%04x", c.label, c.in), func(t *testing.T) { + if got := c.table.Contains(c.in); got != c.out { + t.Errorf("input '0x%04x' was %v, expected %v", c.in, got, c.out) + } + }) + } +} diff --git a/vendor/github.com/xdg/stringprep/tables.go b/vendor/github.com/xdg/stringprep/tables.go new file mode 100644 index 0000000..c3fc1fa --- /dev/null +++ b/vendor/github.com/xdg/stringprep/tables.go @@ -0,0 +1,3215 @@ +// Copyright 2018 by David A. Golden. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package stringprep + +var tableA1 = Set{ + RuneRange{0x0221, 0x0221}, + RuneRange{0x0234, 0x024F}, + RuneRange{0x02AE, 0x02AF}, + RuneRange{0x02EF, 0x02FF}, + RuneRange{0x0350, 0x035F}, + RuneRange{0x0370, 0x0373}, + RuneRange{0x0376, 0x0379}, + RuneRange{0x037B, 0x037D}, + RuneRange{0x037F, 0x0383}, + RuneRange{0x038B, 0x038B}, + RuneRange{0x038D, 0x038D}, + RuneRange{0x03A2, 0x03A2}, + RuneRange{0x03CF, 0x03CF}, + RuneRange{0x03F7, 0x03FF}, + RuneRange{0x0487, 0x0487}, + RuneRange{0x04CF, 0x04CF}, + RuneRange{0x04F6, 0x04F7}, + RuneRange{0x04FA, 0x04FF}, + RuneRange{0x0510, 0x0530}, + RuneRange{0x0557, 0x0558}, + RuneRange{0x0560, 0x0560}, + RuneRange{0x0588, 0x0588}, + RuneRange{0x058B, 0x0590}, + RuneRange{0x05A2, 0x05A2}, + RuneRange{0x05BA, 0x05BA}, + RuneRange{0x05C5, 0x05CF}, + RuneRange{0x05EB, 0x05EF}, + RuneRange{0x05F5, 0x060B}, + RuneRange{0x060D, 0x061A}, + RuneRange{0x061C, 0x061E}, + RuneRange{0x0620, 0x0620}, + RuneRange{0x063B, 0x063F}, + RuneRange{0x0656, 0x065F}, + RuneRange{0x06EE, 0x06EF}, + RuneRange{0x06FF, 0x06FF}, + RuneRange{0x070E, 0x070E}, + RuneRange{0x072D, 0x072F}, + RuneRange{0x074B, 0x077F}, + RuneRange{0x07B2, 0x0900}, + RuneRange{0x0904, 0x0904}, + RuneRange{0x093A, 0x093B}, + RuneRange{0x094E, 0x094F}, + RuneRange{0x0955, 0x0957}, + RuneRange{0x0971, 0x0980}, + RuneRange{0x0984, 0x0984}, + RuneRange{0x098D, 0x098E}, + RuneRange{0x0991, 0x0992}, + RuneRange{0x09A9, 0x09A9}, + RuneRange{0x09B1, 0x09B1}, + RuneRange{0x09B3, 0x09B5}, + RuneRange{0x09BA, 0x09BB}, + RuneRange{0x09BD, 0x09BD}, + RuneRange{0x09C5, 0x09C6}, + RuneRange{0x09C9, 0x09CA}, + RuneRange{0x09CE, 0x09D6}, + RuneRange{0x09D8, 0x09DB}, + RuneRange{0x09DE, 0x09DE}, + RuneRange{0x09E4, 0x09E5}, + RuneRange{0x09FB, 0x0A01}, + RuneRange{0x0A03, 0x0A04}, + RuneRange{0x0A0B, 0x0A0E}, + RuneRange{0x0A11, 0x0A12}, + RuneRange{0x0A29, 0x0A29}, + RuneRange{0x0A31, 0x0A31}, + RuneRange{0x0A34, 0x0A34}, + RuneRange{0x0A37, 0x0A37}, + RuneRange{0x0A3A, 0x0A3B}, + RuneRange{0x0A3D, 0x0A3D}, + RuneRange{0x0A43, 0x0A46}, + RuneRange{0x0A49, 0x0A4A}, + RuneRange{0x0A4E, 0x0A58}, + RuneRange{0x0A5D, 0x0A5D}, + RuneRange{0x0A5F, 0x0A65}, + RuneRange{0x0A75, 0x0A80}, + RuneRange{0x0A84, 0x0A84}, + RuneRange{0x0A8C, 0x0A8C}, + RuneRange{0x0A8E, 0x0A8E}, + RuneRange{0x0A92, 0x0A92}, + RuneRange{0x0AA9, 0x0AA9}, + RuneRange{0x0AB1, 0x0AB1}, + RuneRange{0x0AB4, 0x0AB4}, + RuneRange{0x0ABA, 0x0ABB}, + RuneRange{0x0AC6, 0x0AC6}, + RuneRange{0x0ACA, 0x0ACA}, + RuneRange{0x0ACE, 0x0ACF}, + RuneRange{0x0AD1, 0x0ADF}, + RuneRange{0x0AE1, 0x0AE5}, + RuneRange{0x0AF0, 0x0B00}, + RuneRange{0x0B04, 0x0B04}, + RuneRange{0x0B0D, 0x0B0E}, + RuneRange{0x0B11, 0x0B12}, + RuneRange{0x0B29, 0x0B29}, + RuneRange{0x0B31, 0x0B31}, + RuneRange{0x0B34, 0x0B35}, + RuneRange{0x0B3A, 0x0B3B}, + RuneRange{0x0B44, 0x0B46}, + RuneRange{0x0B49, 0x0B4A}, + RuneRange{0x0B4E, 0x0B55}, + RuneRange{0x0B58, 0x0B5B}, + RuneRange{0x0B5E, 0x0B5E}, + RuneRange{0x0B62, 0x0B65}, + RuneRange{0x0B71, 0x0B81}, + RuneRange{0x0B84, 0x0B84}, + RuneRange{0x0B8B, 0x0B8D}, + RuneRange{0x0B91, 0x0B91}, + RuneRange{0x0B96, 0x0B98}, + RuneRange{0x0B9B, 0x0B9B}, + RuneRange{0x0B9D, 0x0B9D}, + RuneRange{0x0BA0, 0x0BA2}, + RuneRange{0x0BA5, 0x0BA7}, + RuneRange{0x0BAB, 0x0BAD}, + RuneRange{0x0BB6, 0x0BB6}, + RuneRange{0x0BBA, 0x0BBD}, + RuneRange{0x0BC3, 0x0BC5}, + RuneRange{0x0BC9, 0x0BC9}, + RuneRange{0x0BCE, 0x0BD6}, + RuneRange{0x0BD8, 0x0BE6}, + RuneRange{0x0BF3, 0x0C00}, + RuneRange{0x0C04, 0x0C04}, + RuneRange{0x0C0D, 0x0C0D}, + RuneRange{0x0C11, 0x0C11}, + RuneRange{0x0C29, 0x0C29}, + RuneRange{0x0C34, 0x0C34}, + RuneRange{0x0C3A, 0x0C3D}, + RuneRange{0x0C45, 0x0C45}, + RuneRange{0x0C49, 0x0C49}, + RuneRange{0x0C4E, 0x0C54}, + RuneRange{0x0C57, 0x0C5F}, + RuneRange{0x0C62, 0x0C65}, + RuneRange{0x0C70, 0x0C81}, + RuneRange{0x0C84, 0x0C84}, + RuneRange{0x0C8D, 0x0C8D}, + RuneRange{0x0C91, 0x0C91}, + RuneRange{0x0CA9, 0x0CA9}, + RuneRange{0x0CB4, 0x0CB4}, + RuneRange{0x0CBA, 0x0CBD}, + RuneRange{0x0CC5, 0x0CC5}, + RuneRange{0x0CC9, 0x0CC9}, + RuneRange{0x0CCE, 0x0CD4}, + RuneRange{0x0CD7, 0x0CDD}, + RuneRange{0x0CDF, 0x0CDF}, + RuneRange{0x0CE2, 0x0CE5}, + RuneRange{0x0CF0, 0x0D01}, + RuneRange{0x0D04, 0x0D04}, + RuneRange{0x0D0D, 0x0D0D}, + RuneRange{0x0D11, 0x0D11}, + RuneRange{0x0D29, 0x0D29}, + RuneRange{0x0D3A, 0x0D3D}, + RuneRange{0x0D44, 0x0D45}, + RuneRange{0x0D49, 0x0D49}, + RuneRange{0x0D4E, 0x0D56}, + RuneRange{0x0D58, 0x0D5F}, + RuneRange{0x0D62, 0x0D65}, + RuneRange{0x0D70, 0x0D81}, + RuneRange{0x0D84, 0x0D84}, + RuneRange{0x0D97, 0x0D99}, + RuneRange{0x0DB2, 0x0DB2}, + RuneRange{0x0DBC, 0x0DBC}, + RuneRange{0x0DBE, 0x0DBF}, + RuneRange{0x0DC7, 0x0DC9}, + RuneRange{0x0DCB, 0x0DCE}, + RuneRange{0x0DD5, 0x0DD5}, + RuneRange{0x0DD7, 0x0DD7}, + RuneRange{0x0DE0, 0x0DF1}, + RuneRange{0x0DF5, 0x0E00}, + RuneRange{0x0E3B, 0x0E3E}, + RuneRange{0x0E5C, 0x0E80}, + RuneRange{0x0E83, 0x0E83}, + RuneRange{0x0E85, 0x0E86}, + RuneRange{0x0E89, 0x0E89}, + RuneRange{0x0E8B, 0x0E8C}, + RuneRange{0x0E8E, 0x0E93}, + RuneRange{0x0E98, 0x0E98}, + RuneRange{0x0EA0, 0x0EA0}, + RuneRange{0x0EA4, 0x0EA4}, + RuneRange{0x0EA6, 0x0EA6}, + RuneRange{0x0EA8, 0x0EA9}, + RuneRange{0x0EAC, 0x0EAC}, + RuneRange{0x0EBA, 0x0EBA}, + RuneRange{0x0EBE, 0x0EBF}, + RuneRange{0x0EC5, 0x0EC5}, + RuneRange{0x0EC7, 0x0EC7}, + RuneRange{0x0ECE, 0x0ECF}, + RuneRange{0x0EDA, 0x0EDB}, + RuneRange{0x0EDE, 0x0EFF}, + RuneRange{0x0F48, 0x0F48}, + RuneRange{0x0F6B, 0x0F70}, + RuneRange{0x0F8C, 0x0F8F}, + RuneRange{0x0F98, 0x0F98}, + RuneRange{0x0FBD, 0x0FBD}, + RuneRange{0x0FCD, 0x0FCE}, + RuneRange{0x0FD0, 0x0FFF}, + RuneRange{0x1022, 0x1022}, + RuneRange{0x1028, 0x1028}, + RuneRange{0x102B, 0x102B}, + RuneRange{0x1033, 0x1035}, + RuneRange{0x103A, 0x103F}, + RuneRange{0x105A, 0x109F}, + RuneRange{0x10C6, 0x10CF}, + RuneRange{0x10F9, 0x10FA}, + RuneRange{0x10FC, 0x10FF}, + RuneRange{0x115A, 0x115E}, + RuneRange{0x11A3, 0x11A7}, + RuneRange{0x11FA, 0x11FF}, + RuneRange{0x1207, 0x1207}, + RuneRange{0x1247, 0x1247}, + RuneRange{0x1249, 0x1249}, + RuneRange{0x124E, 0x124F}, + RuneRange{0x1257, 0x1257}, + RuneRange{0x1259, 0x1259}, + RuneRange{0x125E, 0x125F}, + RuneRange{0x1287, 0x1287}, + RuneRange{0x1289, 0x1289}, + RuneRange{0x128E, 0x128F}, + RuneRange{0x12AF, 0x12AF}, + RuneRange{0x12B1, 0x12B1}, + RuneRange{0x12B6, 0x12B7}, + RuneRange{0x12BF, 0x12BF}, + RuneRange{0x12C1, 0x12C1}, + RuneRange{0x12C6, 0x12C7}, + RuneRange{0x12CF, 0x12CF}, + RuneRange{0x12D7, 0x12D7}, + RuneRange{0x12EF, 0x12EF}, + RuneRange{0x130F, 0x130F}, + RuneRange{0x1311, 0x1311}, + RuneRange{0x1316, 0x1317}, + RuneRange{0x131F, 0x131F}, + RuneRange{0x1347, 0x1347}, + RuneRange{0x135B, 0x1360}, + RuneRange{0x137D, 0x139F}, + RuneRange{0x13F5, 0x1400}, + RuneRange{0x1677, 0x167F}, + RuneRange{0x169D, 0x169F}, + RuneRange{0x16F1, 0x16FF}, + RuneRange{0x170D, 0x170D}, + RuneRange{0x1715, 0x171F}, + RuneRange{0x1737, 0x173F}, + RuneRange{0x1754, 0x175F}, + RuneRange{0x176D, 0x176D}, + RuneRange{0x1771, 0x1771}, + RuneRange{0x1774, 0x177F}, + RuneRange{0x17DD, 0x17DF}, + RuneRange{0x17EA, 0x17FF}, + RuneRange{0x180F, 0x180F}, + RuneRange{0x181A, 0x181F}, + RuneRange{0x1878, 0x187F}, + RuneRange{0x18AA, 0x1DFF}, + RuneRange{0x1E9C, 0x1E9F}, + RuneRange{0x1EFA, 0x1EFF}, + RuneRange{0x1F16, 0x1F17}, + RuneRange{0x1F1E, 0x1F1F}, + RuneRange{0x1F46, 0x1F47}, + RuneRange{0x1F4E, 0x1F4F}, + RuneRange{0x1F58, 0x1F58}, + RuneRange{0x1F5A, 0x1F5A}, + RuneRange{0x1F5C, 0x1F5C}, + RuneRange{0x1F5E, 0x1F5E}, + RuneRange{0x1F7E, 0x1F7F}, + RuneRange{0x1FB5, 0x1FB5}, + RuneRange{0x1FC5, 0x1FC5}, + RuneRange{0x1FD4, 0x1FD5}, + RuneRange{0x1FDC, 0x1FDC}, + RuneRange{0x1FF0, 0x1FF1}, + RuneRange{0x1FF5, 0x1FF5}, + RuneRange{0x1FFF, 0x1FFF}, + RuneRange{0x2053, 0x2056}, + RuneRange{0x2058, 0x205E}, + RuneRange{0x2064, 0x2069}, + RuneRange{0x2072, 0x2073}, + RuneRange{0x208F, 0x209F}, + RuneRange{0x20B2, 0x20CF}, + RuneRange{0x20EB, 0x20FF}, + RuneRange{0x213B, 0x213C}, + RuneRange{0x214C, 0x2152}, + RuneRange{0x2184, 0x218F}, + RuneRange{0x23CF, 0x23FF}, + RuneRange{0x2427, 0x243F}, + RuneRange{0x244B, 0x245F}, + RuneRange{0x24FF, 0x24FF}, + RuneRange{0x2614, 0x2615}, + RuneRange{0x2618, 0x2618}, + RuneRange{0x267E, 0x267F}, + RuneRange{0x268A, 0x2700}, + RuneRange{0x2705, 0x2705}, + RuneRange{0x270A, 0x270B}, + RuneRange{0x2728, 0x2728}, + RuneRange{0x274C, 0x274C}, + RuneRange{0x274E, 0x274E}, + RuneRange{0x2753, 0x2755}, + RuneRange{0x2757, 0x2757}, + RuneRange{0x275F, 0x2760}, + RuneRange{0x2795, 0x2797}, + RuneRange{0x27B0, 0x27B0}, + RuneRange{0x27BF, 0x27CF}, + RuneRange{0x27EC, 0x27EF}, + RuneRange{0x2B00, 0x2E7F}, + RuneRange{0x2E9A, 0x2E9A}, + RuneRange{0x2EF4, 0x2EFF}, + RuneRange{0x2FD6, 0x2FEF}, + RuneRange{0x2FFC, 0x2FFF}, + RuneRange{0x3040, 0x3040}, + RuneRange{0x3097, 0x3098}, + RuneRange{0x3100, 0x3104}, + RuneRange{0x312D, 0x3130}, + RuneRange{0x318F, 0x318F}, + RuneRange{0x31B8, 0x31EF}, + RuneRange{0x321D, 0x321F}, + RuneRange{0x3244, 0x3250}, + RuneRange{0x327C, 0x327E}, + RuneRange{0x32CC, 0x32CF}, + RuneRange{0x32FF, 0x32FF}, + RuneRange{0x3377, 0x337A}, + RuneRange{0x33DE, 0x33DF}, + RuneRange{0x33FF, 0x33FF}, + RuneRange{0x4DB6, 0x4DFF}, + RuneRange{0x9FA6, 0x9FFF}, + RuneRange{0xA48D, 0xA48F}, + RuneRange{0xA4C7, 0xABFF}, + RuneRange{0xD7A4, 0xD7FF}, + RuneRange{0xFA2E, 0xFA2F}, + RuneRange{0xFA6B, 0xFAFF}, + RuneRange{0xFB07, 0xFB12}, + RuneRange{0xFB18, 0xFB1C}, + RuneRange{0xFB37, 0xFB37}, + RuneRange{0xFB3D, 0xFB3D}, + RuneRange{0xFB3F, 0xFB3F}, + RuneRange{0xFB42, 0xFB42}, + RuneRange{0xFB45, 0xFB45}, + RuneRange{0xFBB2, 0xFBD2}, + RuneRange{0xFD40, 0xFD4F}, + RuneRange{0xFD90, 0xFD91}, + RuneRange{0xFDC8, 0xFDCF}, + RuneRange{0xFDFD, 0xFDFF}, + RuneRange{0xFE10, 0xFE1F}, + RuneRange{0xFE24, 0xFE2F}, + RuneRange{0xFE47, 0xFE48}, + RuneRange{0xFE53, 0xFE53}, + RuneRange{0xFE67, 0xFE67}, + RuneRange{0xFE6C, 0xFE6F}, + RuneRange{0xFE75, 0xFE75}, + RuneRange{0xFEFD, 0xFEFE}, + RuneRange{0xFF00, 0xFF00}, + RuneRange{0xFFBF, 0xFFC1}, + RuneRange{0xFFC8, 0xFFC9}, + RuneRange{0xFFD0, 0xFFD1}, + RuneRange{0xFFD8, 0xFFD9}, + RuneRange{0xFFDD, 0xFFDF}, + RuneRange{0xFFE7, 0xFFE7}, + RuneRange{0xFFEF, 0xFFF8}, + RuneRange{0x10000, 0x102FF}, + RuneRange{0x1031F, 0x1031F}, + RuneRange{0x10324, 0x1032F}, + RuneRange{0x1034B, 0x103FF}, + RuneRange{0x10426, 0x10427}, + RuneRange{0x1044E, 0x1CFFF}, + RuneRange{0x1D0F6, 0x1D0FF}, + RuneRange{0x1D127, 0x1D129}, + RuneRange{0x1D1DE, 0x1D3FF}, + RuneRange{0x1D455, 0x1D455}, + RuneRange{0x1D49D, 0x1D49D}, + RuneRange{0x1D4A0, 0x1D4A1}, + RuneRange{0x1D4A3, 0x1D4A4}, + RuneRange{0x1D4A7, 0x1D4A8}, + RuneRange{0x1D4AD, 0x1D4AD}, + RuneRange{0x1D4BA, 0x1D4BA}, + RuneRange{0x1D4BC, 0x1D4BC}, + RuneRange{0x1D4C1, 0x1D4C1}, + RuneRange{0x1D4C4, 0x1D4C4}, + RuneRange{0x1D506, 0x1D506}, + RuneRange{0x1D50B, 0x1D50C}, + RuneRange{0x1D515, 0x1D515}, + RuneRange{0x1D51D, 0x1D51D}, + RuneRange{0x1D53A, 0x1D53A}, + RuneRange{0x1D53F, 0x1D53F}, + RuneRange{0x1D545, 0x1D545}, + RuneRange{0x1D547, 0x1D549}, + RuneRange{0x1D551, 0x1D551}, + RuneRange{0x1D6A4, 0x1D6A7}, + RuneRange{0x1D7CA, 0x1D7CD}, + RuneRange{0x1D800, 0x1FFFD}, + RuneRange{0x2A6D7, 0x2F7FF}, + RuneRange{0x2FA1E, 0x2FFFD}, + RuneRange{0x30000, 0x3FFFD}, + RuneRange{0x40000, 0x4FFFD}, + RuneRange{0x50000, 0x5FFFD}, + RuneRange{0x60000, 0x6FFFD}, + RuneRange{0x70000, 0x7FFFD}, + RuneRange{0x80000, 0x8FFFD}, + RuneRange{0x90000, 0x9FFFD}, + RuneRange{0xA0000, 0xAFFFD}, + RuneRange{0xB0000, 0xBFFFD}, + RuneRange{0xC0000, 0xCFFFD}, + RuneRange{0xD0000, 0xDFFFD}, + RuneRange{0xE0000, 0xE0000}, + RuneRange{0xE0002, 0xE001F}, + RuneRange{0xE0080, 0xEFFFD}, +} + +// TableA1 represents RFC-3454 Table A.1. +var TableA1 Set = tableA1 + +var tableB1 = Mapping{ + 0x00AD: []rune{}, // Map to nothing + 0x034F: []rune{}, // Map to nothing + 0x180B: []rune{}, // Map to nothing + 0x180C: []rune{}, // Map to nothing + 0x180D: []rune{}, // Map to nothing + 0x200B: []rune{}, // Map to nothing + 0x200C: []rune{}, // Map to nothing + 0x200D: []rune{}, // Map to nothing + 0x2060: []rune{}, // Map to nothing + 0xFE00: []rune{}, // Map to nothing + 0xFE01: []rune{}, // Map to nothing + 0xFE02: []rune{}, // Map to nothing + 0xFE03: []rune{}, // Map to nothing + 0xFE04: []rune{}, // Map to nothing + 0xFE05: []rune{}, // Map to nothing + 0xFE06: []rune{}, // Map to nothing + 0xFE07: []rune{}, // Map to nothing + 0xFE08: []rune{}, // Map to nothing + 0xFE09: []rune{}, // Map to nothing + 0xFE0A: []rune{}, // Map to nothing + 0xFE0B: []rune{}, // Map to nothing + 0xFE0C: []rune{}, // Map to nothing + 0xFE0D: []rune{}, // Map to nothing + 0xFE0E: []rune{}, // Map to nothing + 0xFE0F: []rune{}, // Map to nothing + 0xFEFF: []rune{}, // Map to nothing +} + +// TableB1 represents RFC-3454 Table B.1. +var TableB1 Mapping = tableB1 + +var tableB2 = Mapping{ + 0x0041: []rune{0x0061}, // Case map + 0x0042: []rune{0x0062}, // Case map + 0x0043: []rune{0x0063}, // Case map + 0x0044: []rune{0x0064}, // Case map + 0x0045: []rune{0x0065}, // Case map + 0x0046: []rune{0x0066}, // Case map + 0x0047: []rune{0x0067}, // Case map + 0x0048: []rune{0x0068}, // Case map + 0x0049: []rune{0x0069}, // Case map + 0x004A: []rune{0x006A}, // Case map + 0x004B: []rune{0x006B}, // Case map + 0x004C: []rune{0x006C}, // Case map + 0x004D: []rune{0x006D}, // Case map + 0x004E: []rune{0x006E}, // Case map + 0x004F: []rune{0x006F}, // Case map + 0x0050: []rune{0x0070}, // Case map + 0x0051: []rune{0x0071}, // Case map + 0x0052: []rune{0x0072}, // Case map + 0x0053: []rune{0x0073}, // Case map + 0x0054: []rune{0x0074}, // Case map + 0x0055: []rune{0x0075}, // Case map + 0x0056: []rune{0x0076}, // Case map + 0x0057: []rune{0x0077}, // Case map + 0x0058: []rune{0x0078}, // Case map + 0x0059: []rune{0x0079}, // Case map + 0x005A: []rune{0x007A}, // Case map + 0x00B5: []rune{0x03BC}, // Case map + 0x00C0: []rune{0x00E0}, // Case map + 0x00C1: []rune{0x00E1}, // Case map + 0x00C2: []rune{0x00E2}, // Case map + 0x00C3: []rune{0x00E3}, // Case map + 0x00C4: []rune{0x00E4}, // Case map + 0x00C5: []rune{0x00E5}, // Case map + 0x00C6: []rune{0x00E6}, // Case map + 0x00C7: []rune{0x00E7}, // Case map + 0x00C8: []rune{0x00E8}, // Case map + 0x00C9: []rune{0x00E9}, // Case map + 0x00CA: []rune{0x00EA}, // Case map + 0x00CB: []rune{0x00EB}, // Case map + 0x00CC: []rune{0x00EC}, // Case map + 0x00CD: []rune{0x00ED}, // Case map + 0x00CE: []rune{0x00EE}, // Case map + 0x00CF: []rune{0x00EF}, // Case map + 0x00D0: []rune{0x00F0}, // Case map + 0x00D1: []rune{0x00F1}, // Case map + 0x00D2: []rune{0x00F2}, // Case map + 0x00D3: []rune{0x00F3}, // Case map + 0x00D4: []rune{0x00F4}, // Case map + 0x00D5: []rune{0x00F5}, // Case map + 0x00D6: []rune{0x00F6}, // Case map + 0x00D8: []rune{0x00F8}, // Case map + 0x00D9: []rune{0x00F9}, // Case map + 0x00DA: []rune{0x00FA}, // Case map + 0x00DB: []rune{0x00FB}, // Case map + 0x00DC: []rune{0x00FC}, // Case map + 0x00DD: []rune{0x00FD}, // Case map + 0x00DE: []rune{0x00FE}, // Case map + 0x00DF: []rune{0x0073, 0x0073}, // Case map + 0x0100: []rune{0x0101}, // Case map + 0x0102: []rune{0x0103}, // Case map + 0x0104: []rune{0x0105}, // Case map + 0x0106: []rune{0x0107}, // Case map + 0x0108: []rune{0x0109}, // Case map + 0x010A: []rune{0x010B}, // Case map + 0x010C: []rune{0x010D}, // Case map + 0x010E: []rune{0x010F}, // Case map + 0x0110: []rune{0x0111}, // Case map + 0x0112: []rune{0x0113}, // Case map + 0x0114: []rune{0x0115}, // Case map + 0x0116: []rune{0x0117}, // Case map + 0x0118: []rune{0x0119}, // Case map + 0x011A: []rune{0x011B}, // Case map + 0x011C: []rune{0x011D}, // Case map + 0x011E: []rune{0x011F}, // Case map + 0x0120: []rune{0x0121}, // Case map + 0x0122: []rune{0x0123}, // Case map + 0x0124: []rune{0x0125}, // Case map + 0x0126: []rune{0x0127}, // Case map + 0x0128: []rune{0x0129}, // Case map + 0x012A: []rune{0x012B}, // Case map + 0x012C: []rune{0x012D}, // Case map + 0x012E: []rune{0x012F}, // Case map + 0x0130: []rune{0x0069, 0x0307}, // Case map + 0x0132: []rune{0x0133}, // Case map + 0x0134: []rune{0x0135}, // Case map + 0x0136: []rune{0x0137}, // Case map + 0x0139: []rune{0x013A}, // Case map + 0x013B: []rune{0x013C}, // Case map + 0x013D: []rune{0x013E}, // Case map + 0x013F: []rune{0x0140}, // Case map + 0x0141: []rune{0x0142}, // Case map + 0x0143: []rune{0x0144}, // Case map + 0x0145: []rune{0x0146}, // Case map + 0x0147: []rune{0x0148}, // Case map + 0x0149: []rune{0x02BC, 0x006E}, // Case map + 0x014A: []rune{0x014B}, // Case map + 0x014C: []rune{0x014D}, // Case map + 0x014E: []rune{0x014F}, // Case map + 0x0150: []rune{0x0151}, // Case map + 0x0152: []rune{0x0153}, // Case map + 0x0154: []rune{0x0155}, // Case map + 0x0156: []rune{0x0157}, // Case map + 0x0158: []rune{0x0159}, // Case map + 0x015A: []rune{0x015B}, // Case map + 0x015C: []rune{0x015D}, // Case map + 0x015E: []rune{0x015F}, // Case map + 0x0160: []rune{0x0161}, // Case map + 0x0162: []rune{0x0163}, // Case map + 0x0164: []rune{0x0165}, // Case map + 0x0166: []rune{0x0167}, // Case map + 0x0168: []rune{0x0169}, // Case map + 0x016A: []rune{0x016B}, // Case map + 0x016C: []rune{0x016D}, // Case map + 0x016E: []rune{0x016F}, // Case map + 0x0170: []rune{0x0171}, // Case map + 0x0172: []rune{0x0173}, // Case map + 0x0174: []rune{0x0175}, // Case map + 0x0176: []rune{0x0177}, // Case map + 0x0178: []rune{0x00FF}, // Case map + 0x0179: []rune{0x017A}, // Case map + 0x017B: []rune{0x017C}, // Case map + 0x017D: []rune{0x017E}, // Case map + 0x017F: []rune{0x0073}, // Case map + 0x0181: []rune{0x0253}, // Case map + 0x0182: []rune{0x0183}, // Case map + 0x0184: []rune{0x0185}, // Case map + 0x0186: []rune{0x0254}, // Case map + 0x0187: []rune{0x0188}, // Case map + 0x0189: []rune{0x0256}, // Case map + 0x018A: []rune{0x0257}, // Case map + 0x018B: []rune{0x018C}, // Case map + 0x018E: []rune{0x01DD}, // Case map + 0x018F: []rune{0x0259}, // Case map + 0x0190: []rune{0x025B}, // Case map + 0x0191: []rune{0x0192}, // Case map + 0x0193: []rune{0x0260}, // Case map + 0x0194: []rune{0x0263}, // Case map + 0x0196: []rune{0x0269}, // Case map + 0x0197: []rune{0x0268}, // Case map + 0x0198: []rune{0x0199}, // Case map + 0x019C: []rune{0x026F}, // Case map + 0x019D: []rune{0x0272}, // Case map + 0x019F: []rune{0x0275}, // Case map + 0x01A0: []rune{0x01A1}, // Case map + 0x01A2: []rune{0x01A3}, // Case map + 0x01A4: []rune{0x01A5}, // Case map + 0x01A6: []rune{0x0280}, // Case map + 0x01A7: []rune{0x01A8}, // Case map + 0x01A9: []rune{0x0283}, // Case map + 0x01AC: []rune{0x01AD}, // Case map + 0x01AE: []rune{0x0288}, // Case map + 0x01AF: []rune{0x01B0}, // Case map + 0x01B1: []rune{0x028A}, // Case map + 0x01B2: []rune{0x028B}, // Case map + 0x01B3: []rune{0x01B4}, // Case map + 0x01B5: []rune{0x01B6}, // Case map + 0x01B7: []rune{0x0292}, // Case map + 0x01B8: []rune{0x01B9}, // Case map + 0x01BC: []rune{0x01BD}, // Case map + 0x01C4: []rune{0x01C6}, // Case map + 0x01C5: []rune{0x01C6}, // Case map + 0x01C7: []rune{0x01C9}, // Case map + 0x01C8: []rune{0x01C9}, // Case map + 0x01CA: []rune{0x01CC}, // Case map + 0x01CB: []rune{0x01CC}, // Case map + 0x01CD: []rune{0x01CE}, // Case map + 0x01CF: []rune{0x01D0}, // Case map + 0x01D1: []rune{0x01D2}, // Case map + 0x01D3: []rune{0x01D4}, // Case map + 0x01D5: []rune{0x01D6}, // Case map + 0x01D7: []rune{0x01D8}, // Case map + 0x01D9: []rune{0x01DA}, // Case map + 0x01DB: []rune{0x01DC}, // Case map + 0x01DE: []rune{0x01DF}, // Case map + 0x01E0: []rune{0x01E1}, // Case map + 0x01E2: []rune{0x01E3}, // Case map + 0x01E4: []rune{0x01E5}, // Case map + 0x01E6: []rune{0x01E7}, // Case map + 0x01E8: []rune{0x01E9}, // Case map + 0x01EA: []rune{0x01EB}, // Case map + 0x01EC: []rune{0x01ED}, // Case map + 0x01EE: []rune{0x01EF}, // Case map + 0x01F0: []rune{0x006A, 0x030C}, // Case map + 0x01F1: []rune{0x01F3}, // Case map + 0x01F2: []rune{0x01F3}, // Case map + 0x01F4: []rune{0x01F5}, // Case map + 0x01F6: []rune{0x0195}, // Case map + 0x01F7: []rune{0x01BF}, // Case map + 0x01F8: []rune{0x01F9}, // Case map + 0x01FA: []rune{0x01FB}, // Case map + 0x01FC: []rune{0x01FD}, // Case map + 0x01FE: []rune{0x01FF}, // Case map + 0x0200: []rune{0x0201}, // Case map + 0x0202: []rune{0x0203}, // Case map + 0x0204: []rune{0x0205}, // Case map + 0x0206: []rune{0x0207}, // Case map + 0x0208: []rune{0x0209}, // Case map + 0x020A: []rune{0x020B}, // Case map + 0x020C: []rune{0x020D}, // Case map + 0x020E: []rune{0x020F}, // Case map + 0x0210: []rune{0x0211}, // Case map + 0x0212: []rune{0x0213}, // Case map + 0x0214: []rune{0x0215}, // Case map + 0x0216: []rune{0x0217}, // Case map + 0x0218: []rune{0x0219}, // Case map + 0x021A: []rune{0x021B}, // Case map + 0x021C: []rune{0x021D}, // Case map + 0x021E: []rune{0x021F}, // Case map + 0x0220: []rune{0x019E}, // Case map + 0x0222: []rune{0x0223}, // Case map + 0x0224: []rune{0x0225}, // Case map + 0x0226: []rune{0x0227}, // Case map + 0x0228: []rune{0x0229}, // Case map + 0x022A: []rune{0x022B}, // Case map + 0x022C: []rune{0x022D}, // Case map + 0x022E: []rune{0x022F}, // Case map + 0x0230: []rune{0x0231}, // Case map + 0x0232: []rune{0x0233}, // Case map + 0x0345: []rune{0x03B9}, // Case map + 0x037A: []rune{0x0020, 0x03B9}, // Additional folding + 0x0386: []rune{0x03AC}, // Case map + 0x0388: []rune{0x03AD}, // Case map + 0x0389: []rune{0x03AE}, // Case map + 0x038A: []rune{0x03AF}, // Case map + 0x038C: []rune{0x03CC}, // Case map + 0x038E: []rune{0x03CD}, // Case map + 0x038F: []rune{0x03CE}, // Case map + 0x0390: []rune{0x03B9, 0x0308, 0x0301}, // Case map + 0x0391: []rune{0x03B1}, // Case map + 0x0392: []rune{0x03B2}, // Case map + 0x0393: []rune{0x03B3}, // Case map + 0x0394: []rune{0x03B4}, // Case map + 0x0395: []rune{0x03B5}, // Case map + 0x0396: []rune{0x03B6}, // Case map + 0x0397: []rune{0x03B7}, // Case map + 0x0398: []rune{0x03B8}, // Case map + 0x0399: []rune{0x03B9}, // Case map + 0x039A: []rune{0x03BA}, // Case map + 0x039B: []rune{0x03BB}, // Case map + 0x039C: []rune{0x03BC}, // Case map + 0x039D: []rune{0x03BD}, // Case map + 0x039E: []rune{0x03BE}, // Case map + 0x039F: []rune{0x03BF}, // Case map + 0x03A0: []rune{0x03C0}, // Case map + 0x03A1: []rune{0x03C1}, // Case map + 0x03A3: []rune{0x03C3}, // Case map + 0x03A4: []rune{0x03C4}, // Case map + 0x03A5: []rune{0x03C5}, // Case map + 0x03A6: []rune{0x03C6}, // Case map + 0x03A7: []rune{0x03C7}, // Case map + 0x03A8: []rune{0x03C8}, // Case map + 0x03A9: []rune{0x03C9}, // Case map + 0x03AA: []rune{0x03CA}, // Case map + 0x03AB: []rune{0x03CB}, // Case map + 0x03B0: []rune{0x03C5, 0x0308, 0x0301}, // Case map + 0x03C2: []rune{0x03C3}, // Case map + 0x03D0: []rune{0x03B2}, // Case map + 0x03D1: []rune{0x03B8}, // Case map + 0x03D2: []rune{0x03C5}, // Additional folding + 0x03D3: []rune{0x03CD}, // Additional folding + 0x03D4: []rune{0x03CB}, // Additional folding + 0x03D5: []rune{0x03C6}, // Case map + 0x03D6: []rune{0x03C0}, // Case map + 0x03D8: []rune{0x03D9}, // Case map + 0x03DA: []rune{0x03DB}, // Case map + 0x03DC: []rune{0x03DD}, // Case map + 0x03DE: []rune{0x03DF}, // Case map + 0x03E0: []rune{0x03E1}, // Case map + 0x03E2: []rune{0x03E3}, // Case map + 0x03E4: []rune{0x03E5}, // Case map + 0x03E6: []rune{0x03E7}, // Case map + 0x03E8: []rune{0x03E9}, // Case map + 0x03EA: []rune{0x03EB}, // Case map + 0x03EC: []rune{0x03ED}, // Case map + 0x03EE: []rune{0x03EF}, // Case map + 0x03F0: []rune{0x03BA}, // Case map + 0x03F1: []rune{0x03C1}, // Case map + 0x03F2: []rune{0x03C3}, // Case map + 0x03F4: []rune{0x03B8}, // Case map + 0x03F5: []rune{0x03B5}, // Case map + 0x0400: []rune{0x0450}, // Case map + 0x0401: []rune{0x0451}, // Case map + 0x0402: []rune{0x0452}, // Case map + 0x0403: []rune{0x0453}, // Case map + 0x0404: []rune{0x0454}, // Case map + 0x0405: []rune{0x0455}, // Case map + 0x0406: []rune{0x0456}, // Case map + 0x0407: []rune{0x0457}, // Case map + 0x0408: []rune{0x0458}, // Case map + 0x0409: []rune{0x0459}, // Case map + 0x040A: []rune{0x045A}, // Case map + 0x040B: []rune{0x045B}, // Case map + 0x040C: []rune{0x045C}, // Case map + 0x040D: []rune{0x045D}, // Case map + 0x040E: []rune{0x045E}, // Case map + 0x040F: []rune{0x045F}, // Case map + 0x0410: []rune{0x0430}, // Case map + 0x0411: []rune{0x0431}, // Case map + 0x0412: []rune{0x0432}, // Case map + 0x0413: []rune{0x0433}, // Case map + 0x0414: []rune{0x0434}, // Case map + 0x0415: []rune{0x0435}, // Case map + 0x0416: []rune{0x0436}, // Case map + 0x0417: []rune{0x0437}, // Case map + 0x0418: []rune{0x0438}, // Case map + 0x0419: []rune{0x0439}, // Case map + 0x041A: []rune{0x043A}, // Case map + 0x041B: []rune{0x043B}, // Case map + 0x041C: []rune{0x043C}, // Case map + 0x041D: []rune{0x043D}, // Case map + 0x041E: []rune{0x043E}, // Case map + 0x041F: []rune{0x043F}, // Case map + 0x0420: []rune{0x0440}, // Case map + 0x0421: []rune{0x0441}, // Case map + 0x0422: []rune{0x0442}, // Case map + 0x0423: []rune{0x0443}, // Case map + 0x0424: []rune{0x0444}, // Case map + 0x0425: []rune{0x0445}, // Case map + 0x0426: []rune{0x0446}, // Case map + 0x0427: []rune{0x0447}, // Case map + 0x0428: []rune{0x0448}, // Case map + 0x0429: []rune{0x0449}, // Case map + 0x042A: []rune{0x044A}, // Case map + 0x042B: []rune{0x044B}, // Case map + 0x042C: []rune{0x044C}, // Case map + 0x042D: []rune{0x044D}, // Case map + 0x042E: []rune{0x044E}, // Case map + 0x042F: []rune{0x044F}, // Case map + 0x0460: []rune{0x0461}, // Case map + 0x0462: []rune{0x0463}, // Case map + 0x0464: []rune{0x0465}, // Case map + 0x0466: []rune{0x0467}, // Case map + 0x0468: []rune{0x0469}, // Case map + 0x046A: []rune{0x046B}, // Case map + 0x046C: []rune{0x046D}, // Case map + 0x046E: []rune{0x046F}, // Case map + 0x0470: []rune{0x0471}, // Case map + 0x0472: []rune{0x0473}, // Case map + 0x0474: []rune{0x0475}, // Case map + 0x0476: []rune{0x0477}, // Case map + 0x0478: []rune{0x0479}, // Case map + 0x047A: []rune{0x047B}, // Case map + 0x047C: []rune{0x047D}, // Case map + 0x047E: []rune{0x047F}, // Case map + 0x0480: []rune{0x0481}, // Case map + 0x048A: []rune{0x048B}, // Case map + 0x048C: []rune{0x048D}, // Case map + 0x048E: []rune{0x048F}, // Case map + 0x0490: []rune{0x0491}, // Case map + 0x0492: []rune{0x0493}, // Case map + 0x0494: []rune{0x0495}, // Case map + 0x0496: []rune{0x0497}, // Case map + 0x0498: []rune{0x0499}, // Case map + 0x049A: []rune{0x049B}, // Case map + 0x049C: []rune{0x049D}, // Case map + 0x049E: []rune{0x049F}, // Case map + 0x04A0: []rune{0x04A1}, // Case map + 0x04A2: []rune{0x04A3}, // Case map + 0x04A4: []rune{0x04A5}, // Case map + 0x04A6: []rune{0x04A7}, // Case map + 0x04A8: []rune{0x04A9}, // Case map + 0x04AA: []rune{0x04AB}, // Case map + 0x04AC: []rune{0x04AD}, // Case map + 0x04AE: []rune{0x04AF}, // Case map + 0x04B0: []rune{0x04B1}, // Case map + 0x04B2: []rune{0x04B3}, // Case map + 0x04B4: []rune{0x04B5}, // Case map + 0x04B6: []rune{0x04B7}, // Case map + 0x04B8: []rune{0x04B9}, // Case map + 0x04BA: []rune{0x04BB}, // Case map + 0x04BC: []rune{0x04BD}, // Case map + 0x04BE: []rune{0x04BF}, // Case map + 0x04C1: []rune{0x04C2}, // Case map + 0x04C3: []rune{0x04C4}, // Case map + 0x04C5: []rune{0x04C6}, // Case map + 0x04C7: []rune{0x04C8}, // Case map + 0x04C9: []rune{0x04CA}, // Case map + 0x04CB: []rune{0x04CC}, // Case map + 0x04CD: []rune{0x04CE}, // Case map + 0x04D0: []rune{0x04D1}, // Case map + 0x04D2: []rune{0x04D3}, // Case map + 0x04D4: []rune{0x04D5}, // Case map + 0x04D6: []rune{0x04D7}, // Case map + 0x04D8: []rune{0x04D9}, // Case map + 0x04DA: []rune{0x04DB}, // Case map + 0x04DC: []rune{0x04DD}, // Case map + 0x04DE: []rune{0x04DF}, // Case map + 0x04E0: []rune{0x04E1}, // Case map + 0x04E2: []rune{0x04E3}, // Case map + 0x04E4: []rune{0x04E5}, // Case map + 0x04E6: []rune{0x04E7}, // Case map + 0x04E8: []rune{0x04E9}, // Case map + 0x04EA: []rune{0x04EB}, // Case map + 0x04EC: []rune{0x04ED}, // Case map + 0x04EE: []rune{0x04EF}, // Case map + 0x04F0: []rune{0x04F1}, // Case map + 0x04F2: []rune{0x04F3}, // Case map + 0x04F4: []rune{0x04F5}, // Case map + 0x04F8: []rune{0x04F9}, // Case map + 0x0500: []rune{0x0501}, // Case map + 0x0502: []rune{0x0503}, // Case map + 0x0504: []rune{0x0505}, // Case map + 0x0506: []rune{0x0507}, // Case map + 0x0508: []rune{0x0509}, // Case map + 0x050A: []rune{0x050B}, // Case map + 0x050C: []rune{0x050D}, // Case map + 0x050E: []rune{0x050F}, // Case map + 0x0531: []rune{0x0561}, // Case map + 0x0532: []rune{0x0562}, // Case map + 0x0533: []rune{0x0563}, // Case map + 0x0534: []rune{0x0564}, // Case map + 0x0535: []rune{0x0565}, // Case map + 0x0536: []rune{0x0566}, // Case map + 0x0537: []rune{0x0567}, // Case map + 0x0538: []rune{0x0568}, // Case map + 0x0539: []rune{0x0569}, // Case map + 0x053A: []rune{0x056A}, // Case map + 0x053B: []rune{0x056B}, // Case map + 0x053C: []rune{0x056C}, // Case map + 0x053D: []rune{0x056D}, // Case map + 0x053E: []rune{0x056E}, // Case map + 0x053F: []rune{0x056F}, // Case map + 0x0540: []rune{0x0570}, // Case map + 0x0541: []rune{0x0571}, // Case map + 0x0542: []rune{0x0572}, // Case map + 0x0543: []rune{0x0573}, // Case map + 0x0544: []rune{0x0574}, // Case map + 0x0545: []rune{0x0575}, // Case map + 0x0546: []rune{0x0576}, // Case map + 0x0547: []rune{0x0577}, // Case map + 0x0548: []rune{0x0578}, // Case map + 0x0549: []rune{0x0579}, // Case map + 0x054A: []rune{0x057A}, // Case map + 0x054B: []rune{0x057B}, // Case map + 0x054C: []rune{0x057C}, // Case map + 0x054D: []rune{0x057D}, // Case map + 0x054E: []rune{0x057E}, // Case map + 0x054F: []rune{0x057F}, // Case map + 0x0550: []rune{0x0580}, // Case map + 0x0551: []rune{0x0581}, // Case map + 0x0552: []rune{0x0582}, // Case map + 0x0553: []rune{0x0583}, // Case map + 0x0554: []rune{0x0584}, // Case map + 0x0555: []rune{0x0585}, // Case map + 0x0556: []rune{0x0586}, // Case map + 0x0587: []rune{0x0565, 0x0582}, // Case map + 0x1E00: []rune{0x1E01}, // Case map + 0x1E02: []rune{0x1E03}, // Case map + 0x1E04: []rune{0x1E05}, // Case map + 0x1E06: []rune{0x1E07}, // Case map + 0x1E08: []rune{0x1E09}, // Case map + 0x1E0A: []rune{0x1E0B}, // Case map + 0x1E0C: []rune{0x1E0D}, // Case map + 0x1E0E: []rune{0x1E0F}, // Case map + 0x1E10: []rune{0x1E11}, // Case map + 0x1E12: []rune{0x1E13}, // Case map + 0x1E14: []rune{0x1E15}, // Case map + 0x1E16: []rune{0x1E17}, // Case map + 0x1E18: []rune{0x1E19}, // Case map + 0x1E1A: []rune{0x1E1B}, // Case map + 0x1E1C: []rune{0x1E1D}, // Case map + 0x1E1E: []rune{0x1E1F}, // Case map + 0x1E20: []rune{0x1E21}, // Case map + 0x1E22: []rune{0x1E23}, // Case map + 0x1E24: []rune{0x1E25}, // Case map + 0x1E26: []rune{0x1E27}, // Case map + 0x1E28: []rune{0x1E29}, // Case map + 0x1E2A: []rune{0x1E2B}, // Case map + 0x1E2C: []rune{0x1E2D}, // Case map + 0x1E2E: []rune{0x1E2F}, // Case map + 0x1E30: []rune{0x1E31}, // Case map + 0x1E32: []rune{0x1E33}, // Case map + 0x1E34: []rune{0x1E35}, // Case map + 0x1E36: []rune{0x1E37}, // Case map + 0x1E38: []rune{0x1E39}, // Case map + 0x1E3A: []rune{0x1E3B}, // Case map + 0x1E3C: []rune{0x1E3D}, // Case map + 0x1E3E: []rune{0x1E3F}, // Case map + 0x1E40: []rune{0x1E41}, // Case map + 0x1E42: []rune{0x1E43}, // Case map + 0x1E44: []rune{0x1E45}, // Case map + 0x1E46: []rune{0x1E47}, // Case map + 0x1E48: []rune{0x1E49}, // Case map + 0x1E4A: []rune{0x1E4B}, // Case map + 0x1E4C: []rune{0x1E4D}, // Case map + 0x1E4E: []rune{0x1E4F}, // Case map + 0x1E50: []rune{0x1E51}, // Case map + 0x1E52: []rune{0x1E53}, // Case map + 0x1E54: []rune{0x1E55}, // Case map + 0x1E56: []rune{0x1E57}, // Case map + 0x1E58: []rune{0x1E59}, // Case map + 0x1E5A: []rune{0x1E5B}, // Case map + 0x1E5C: []rune{0x1E5D}, // Case map + 0x1E5E: []rune{0x1E5F}, // Case map + 0x1E60: []rune{0x1E61}, // Case map + 0x1E62: []rune{0x1E63}, // Case map + 0x1E64: []rune{0x1E65}, // Case map + 0x1E66: []rune{0x1E67}, // Case map + 0x1E68: []rune{0x1E69}, // Case map + 0x1E6A: []rune{0x1E6B}, // Case map + 0x1E6C: []rune{0x1E6D}, // Case map + 0x1E6E: []rune{0x1E6F}, // Case map + 0x1E70: []rune{0x1E71}, // Case map + 0x1E72: []rune{0x1E73}, // Case map + 0x1E74: []rune{0x1E75}, // Case map + 0x1E76: []rune{0x1E77}, // Case map + 0x1E78: []rune{0x1E79}, // Case map + 0x1E7A: []rune{0x1E7B}, // Case map + 0x1E7C: []rune{0x1E7D}, // Case map + 0x1E7E: []rune{0x1E7F}, // Case map + 0x1E80: []rune{0x1E81}, // Case map + 0x1E82: []rune{0x1E83}, // Case map + 0x1E84: []rune{0x1E85}, // Case map + 0x1E86: []rune{0x1E87}, // Case map + 0x1E88: []rune{0x1E89}, // Case map + 0x1E8A: []rune{0x1E8B}, // Case map + 0x1E8C: []rune{0x1E8D}, // Case map + 0x1E8E: []rune{0x1E8F}, // Case map + 0x1E90: []rune{0x1E91}, // Case map + 0x1E92: []rune{0x1E93}, // Case map + 0x1E94: []rune{0x1E95}, // Case map + 0x1E96: []rune{0x0068, 0x0331}, // Case map + 0x1E97: []rune{0x0074, 0x0308}, // Case map + 0x1E98: []rune{0x0077, 0x030A}, // Case map + 0x1E99: []rune{0x0079, 0x030A}, // Case map + 0x1E9A: []rune{0x0061, 0x02BE}, // Case map + 0x1E9B: []rune{0x1E61}, // Case map + 0x1EA0: []rune{0x1EA1}, // Case map + 0x1EA2: []rune{0x1EA3}, // Case map + 0x1EA4: []rune{0x1EA5}, // Case map + 0x1EA6: []rune{0x1EA7}, // Case map + 0x1EA8: []rune{0x1EA9}, // Case map + 0x1EAA: []rune{0x1EAB}, // Case map + 0x1EAC: []rune{0x1EAD}, // Case map + 0x1EAE: []rune{0x1EAF}, // Case map + 0x1EB0: []rune{0x1EB1}, // Case map + 0x1EB2: []rune{0x1EB3}, // Case map + 0x1EB4: []rune{0x1EB5}, // Case map + 0x1EB6: []rune{0x1EB7}, // Case map + 0x1EB8: []rune{0x1EB9}, // Case map + 0x1EBA: []rune{0x1EBB}, // Case map + 0x1EBC: []rune{0x1EBD}, // Case map + 0x1EBE: []rune{0x1EBF}, // Case map + 0x1EC0: []rune{0x1EC1}, // Case map + 0x1EC2: []rune{0x1EC3}, // Case map + 0x1EC4: []rune{0x1EC5}, // Case map + 0x1EC6: []rune{0x1EC7}, // Case map + 0x1EC8: []rune{0x1EC9}, // Case map + 0x1ECA: []rune{0x1ECB}, // Case map + 0x1ECC: []rune{0x1ECD}, // Case map + 0x1ECE: []rune{0x1ECF}, // Case map + 0x1ED0: []rune{0x1ED1}, // Case map + 0x1ED2: []rune{0x1ED3}, // Case map + 0x1ED4: []rune{0x1ED5}, // Case map + 0x1ED6: []rune{0x1ED7}, // Case map + 0x1ED8: []rune{0x1ED9}, // Case map + 0x1EDA: []rune{0x1EDB}, // Case map + 0x1EDC: []rune{0x1EDD}, // Case map + 0x1EDE: []rune{0x1EDF}, // Case map + 0x1EE0: []rune{0x1EE1}, // Case map + 0x1EE2: []rune{0x1EE3}, // Case map + 0x1EE4: []rune{0x1EE5}, // Case map + 0x1EE6: []rune{0x1EE7}, // Case map + 0x1EE8: []rune{0x1EE9}, // Case map + 0x1EEA: []rune{0x1EEB}, // Case map + 0x1EEC: []rune{0x1EED}, // Case map + 0x1EEE: []rune{0x1EEF}, // Case map + 0x1EF0: []rune{0x1EF1}, // Case map + 0x1EF2: []rune{0x1EF3}, // Case map + 0x1EF4: []rune{0x1EF5}, // Case map + 0x1EF6: []rune{0x1EF7}, // Case map + 0x1EF8: []rune{0x1EF9}, // Case map + 0x1F08: []rune{0x1F00}, // Case map + 0x1F09: []rune{0x1F01}, // Case map + 0x1F0A: []rune{0x1F02}, // Case map + 0x1F0B: []rune{0x1F03}, // Case map + 0x1F0C: []rune{0x1F04}, // Case map + 0x1F0D: []rune{0x1F05}, // Case map + 0x1F0E: []rune{0x1F06}, // Case map + 0x1F0F: []rune{0x1F07}, // Case map + 0x1F18: []rune{0x1F10}, // Case map + 0x1F19: []rune{0x1F11}, // Case map + 0x1F1A: []rune{0x1F12}, // Case map + 0x1F1B: []rune{0x1F13}, // Case map + 0x1F1C: []rune{0x1F14}, // Case map + 0x1F1D: []rune{0x1F15}, // Case map + 0x1F28: []rune{0x1F20}, // Case map + 0x1F29: []rune{0x1F21}, // Case map + 0x1F2A: []rune{0x1F22}, // Case map + 0x1F2B: []rune{0x1F23}, // Case map + 0x1F2C: []rune{0x1F24}, // Case map + 0x1F2D: []rune{0x1F25}, // Case map + 0x1F2E: []rune{0x1F26}, // Case map + 0x1F2F: []rune{0x1F27}, // Case map + 0x1F38: []rune{0x1F30}, // Case map + 0x1F39: []rune{0x1F31}, // Case map + 0x1F3A: []rune{0x1F32}, // Case map + 0x1F3B: []rune{0x1F33}, // Case map + 0x1F3C: []rune{0x1F34}, // Case map + 0x1F3D: []rune{0x1F35}, // Case map + 0x1F3E: []rune{0x1F36}, // Case map + 0x1F3F: []rune{0x1F37}, // Case map + 0x1F48: []rune{0x1F40}, // Case map + 0x1F49: []rune{0x1F41}, // Case map + 0x1F4A: []rune{0x1F42}, // Case map + 0x1F4B: []rune{0x1F43}, // Case map + 0x1F4C: []rune{0x1F44}, // Case map + 0x1F4D: []rune{0x1F45}, // Case map + 0x1F50: []rune{0x03C5, 0x0313}, // Case map + 0x1F52: []rune{0x03C5, 0x0313, 0x0300}, // Case map + 0x1F54: []rune{0x03C5, 0x0313, 0x0301}, // Case map + 0x1F56: []rune{0x03C5, 0x0313, 0x0342}, // Case map + 0x1F59: []rune{0x1F51}, // Case map + 0x1F5B: []rune{0x1F53}, // Case map + 0x1F5D: []rune{0x1F55}, // Case map + 0x1F5F: []rune{0x1F57}, // Case map + 0x1F68: []rune{0x1F60}, // Case map + 0x1F69: []rune{0x1F61}, // Case map + 0x1F6A: []rune{0x1F62}, // Case map + 0x1F6B: []rune{0x1F63}, // Case map + 0x1F6C: []rune{0x1F64}, // Case map + 0x1F6D: []rune{0x1F65}, // Case map + 0x1F6E: []rune{0x1F66}, // Case map + 0x1F6F: []rune{0x1F67}, // Case map + 0x1F80: []rune{0x1F00, 0x03B9}, // Case map + 0x1F81: []rune{0x1F01, 0x03B9}, // Case map + 0x1F82: []rune{0x1F02, 0x03B9}, // Case map + 0x1F83: []rune{0x1F03, 0x03B9}, // Case map + 0x1F84: []rune{0x1F04, 0x03B9}, // Case map + 0x1F85: []rune{0x1F05, 0x03B9}, // Case map + 0x1F86: []rune{0x1F06, 0x03B9}, // Case map + 0x1F87: []rune{0x1F07, 0x03B9}, // Case map + 0x1F88: []rune{0x1F00, 0x03B9}, // Case map + 0x1F89: []rune{0x1F01, 0x03B9}, // Case map + 0x1F8A: []rune{0x1F02, 0x03B9}, // Case map + 0x1F8B: []rune{0x1F03, 0x03B9}, // Case map + 0x1F8C: []rune{0x1F04, 0x03B9}, // Case map + 0x1F8D: []rune{0x1F05, 0x03B9}, // Case map + 0x1F8E: []rune{0x1F06, 0x03B9}, // Case map + 0x1F8F: []rune{0x1F07, 0x03B9}, // Case map + 0x1F90: []rune{0x1F20, 0x03B9}, // Case map + 0x1F91: []rune{0x1F21, 0x03B9}, // Case map + 0x1F92: []rune{0x1F22, 0x03B9}, // Case map + 0x1F93: []rune{0x1F23, 0x03B9}, // Case map + 0x1F94: []rune{0x1F24, 0x03B9}, // Case map + 0x1F95: []rune{0x1F25, 0x03B9}, // Case map + 0x1F96: []rune{0x1F26, 0x03B9}, // Case map + 0x1F97: []rune{0x1F27, 0x03B9}, // Case map + 0x1F98: []rune{0x1F20, 0x03B9}, // Case map + 0x1F99: []rune{0x1F21, 0x03B9}, // Case map + 0x1F9A: []rune{0x1F22, 0x03B9}, // Case map + 0x1F9B: []rune{0x1F23, 0x03B9}, // Case map + 0x1F9C: []rune{0x1F24, 0x03B9}, // Case map + 0x1F9D: []rune{0x1F25, 0x03B9}, // Case map + 0x1F9E: []rune{0x1F26, 0x03B9}, // Case map + 0x1F9F: []rune{0x1F27, 0x03B9}, // Case map + 0x1FA0: []rune{0x1F60, 0x03B9}, // Case map + 0x1FA1: []rune{0x1F61, 0x03B9}, // Case map + 0x1FA2: []rune{0x1F62, 0x03B9}, // Case map + 0x1FA3: []rune{0x1F63, 0x03B9}, // Case map + 0x1FA4: []rune{0x1F64, 0x03B9}, // Case map + 0x1FA5: []rune{0x1F65, 0x03B9}, // Case map + 0x1FA6: []rune{0x1F66, 0x03B9}, // Case map + 0x1FA7: []rune{0x1F67, 0x03B9}, // Case map + 0x1FA8: []rune{0x1F60, 0x03B9}, // Case map + 0x1FA9: []rune{0x1F61, 0x03B9}, // Case map + 0x1FAA: []rune{0x1F62, 0x03B9}, // Case map + 0x1FAB: []rune{0x1F63, 0x03B9}, // Case map + 0x1FAC: []rune{0x1F64, 0x03B9}, // Case map + 0x1FAD: []rune{0x1F65, 0x03B9}, // Case map + 0x1FAE: []rune{0x1F66, 0x03B9}, // Case map + 0x1FAF: []rune{0x1F67, 0x03B9}, // Case map + 0x1FB2: []rune{0x1F70, 0x03B9}, // Case map + 0x1FB3: []rune{0x03B1, 0x03B9}, // Case map + 0x1FB4: []rune{0x03AC, 0x03B9}, // Case map + 0x1FB6: []rune{0x03B1, 0x0342}, // Case map + 0x1FB7: []rune{0x03B1, 0x0342, 0x03B9}, // Case map + 0x1FB8: []rune{0x1FB0}, // Case map + 0x1FB9: []rune{0x1FB1}, // Case map + 0x1FBA: []rune{0x1F70}, // Case map + 0x1FBB: []rune{0x1F71}, // Case map + 0x1FBC: []rune{0x03B1, 0x03B9}, // Case map + 0x1FBE: []rune{0x03B9}, // Case map + 0x1FC2: []rune{0x1F74, 0x03B9}, // Case map + 0x1FC3: []rune{0x03B7, 0x03B9}, // Case map + 0x1FC4: []rune{0x03AE, 0x03B9}, // Case map + 0x1FC6: []rune{0x03B7, 0x0342}, // Case map + 0x1FC7: []rune{0x03B7, 0x0342, 0x03B9}, // Case map + 0x1FC8: []rune{0x1F72}, // Case map + 0x1FC9: []rune{0x1F73}, // Case map + 0x1FCA: []rune{0x1F74}, // Case map + 0x1FCB: []rune{0x1F75}, // Case map + 0x1FCC: []rune{0x03B7, 0x03B9}, // Case map + 0x1FD2: []rune{0x03B9, 0x0308, 0x0300}, // Case map + 0x1FD3: []rune{0x03B9, 0x0308, 0x0301}, // Case map + 0x1FD6: []rune{0x03B9, 0x0342}, // Case map + 0x1FD7: []rune{0x03B9, 0x0308, 0x0342}, // Case map + 0x1FD8: []rune{0x1FD0}, // Case map + 0x1FD9: []rune{0x1FD1}, // Case map + 0x1FDA: []rune{0x1F76}, // Case map + 0x1FDB: []rune{0x1F77}, // Case map + 0x1FE2: []rune{0x03C5, 0x0308, 0x0300}, // Case map + 0x1FE3: []rune{0x03C5, 0x0308, 0x0301}, // Case map + 0x1FE4: []rune{0x03C1, 0x0313}, // Case map + 0x1FE6: []rune{0x03C5, 0x0342}, // Case map + 0x1FE7: []rune{0x03C5, 0x0308, 0x0342}, // Case map + 0x1FE8: []rune{0x1FE0}, // Case map + 0x1FE9: []rune{0x1FE1}, // Case map + 0x1FEA: []rune{0x1F7A}, // Case map + 0x1FEB: []rune{0x1F7B}, // Case map + 0x1FEC: []rune{0x1FE5}, // Case map + 0x1FF2: []rune{0x1F7C, 0x03B9}, // Case map + 0x1FF3: []rune{0x03C9, 0x03B9}, // Case map + 0x1FF4: []rune{0x03CE, 0x03B9}, // Case map + 0x1FF6: []rune{0x03C9, 0x0342}, // Case map + 0x1FF7: []rune{0x03C9, 0x0342, 0x03B9}, // Case map + 0x1FF8: []rune{0x1F78}, // Case map + 0x1FF9: []rune{0x1F79}, // Case map + 0x1FFA: []rune{0x1F7C}, // Case map + 0x1FFB: []rune{0x1F7D}, // Case map + 0x1FFC: []rune{0x03C9, 0x03B9}, // Case map + 0x20A8: []rune{0x0072, 0x0073}, // Additional folding + 0x2102: []rune{0x0063}, // Additional folding + 0x2103: []rune{0x00B0, 0x0063}, // Additional folding + 0x2107: []rune{0x025B}, // Additional folding + 0x2109: []rune{0x00B0, 0x0066}, // Additional folding + 0x210B: []rune{0x0068}, // Additional folding + 0x210C: []rune{0x0068}, // Additional folding + 0x210D: []rune{0x0068}, // Additional folding + 0x2110: []rune{0x0069}, // Additional folding + 0x2111: []rune{0x0069}, // Additional folding + 0x2112: []rune{0x006C}, // Additional folding + 0x2115: []rune{0x006E}, // Additional folding + 0x2116: []rune{0x006E, 0x006F}, // Additional folding + 0x2119: []rune{0x0070}, // Additional folding + 0x211A: []rune{0x0071}, // Additional folding + 0x211B: []rune{0x0072}, // Additional folding + 0x211C: []rune{0x0072}, // Additional folding + 0x211D: []rune{0x0072}, // Additional folding + 0x2120: []rune{0x0073, 0x006D}, // Additional folding + 0x2121: []rune{0x0074, 0x0065, 0x006C}, // Additional folding + 0x2122: []rune{0x0074, 0x006D}, // Additional folding + 0x2124: []rune{0x007A}, // Additional folding + 0x2126: []rune{0x03C9}, // Case map + 0x2128: []rune{0x007A}, // Additional folding + 0x212A: []rune{0x006B}, // Case map + 0x212B: []rune{0x00E5}, // Case map + 0x212C: []rune{0x0062}, // Additional folding + 0x212D: []rune{0x0063}, // Additional folding + 0x2130: []rune{0x0065}, // Additional folding + 0x2131: []rune{0x0066}, // Additional folding + 0x2133: []rune{0x006D}, // Additional folding + 0x213E: []rune{0x03B3}, // Additional folding + 0x213F: []rune{0x03C0}, // Additional folding + 0x2145: []rune{0x0064}, // Additional folding + 0x2160: []rune{0x2170}, // Case map + 0x2161: []rune{0x2171}, // Case map + 0x2162: []rune{0x2172}, // Case map + 0x2163: []rune{0x2173}, // Case map + 0x2164: []rune{0x2174}, // Case map + 0x2165: []rune{0x2175}, // Case map + 0x2166: []rune{0x2176}, // Case map + 0x2167: []rune{0x2177}, // Case map + 0x2168: []rune{0x2178}, // Case map + 0x2169: []rune{0x2179}, // Case map + 0x216A: []rune{0x217A}, // Case map + 0x216B: []rune{0x217B}, // Case map + 0x216C: []rune{0x217C}, // Case map + 0x216D: []rune{0x217D}, // Case map + 0x216E: []rune{0x217E}, // Case map + 0x216F: []rune{0x217F}, // Case map + 0x24B6: []rune{0x24D0}, // Case map + 0x24B7: []rune{0x24D1}, // Case map + 0x24B8: []rune{0x24D2}, // Case map + 0x24B9: []rune{0x24D3}, // Case map + 0x24BA: []rune{0x24D4}, // Case map + 0x24BB: []rune{0x24D5}, // Case map + 0x24BC: []rune{0x24D6}, // Case map + 0x24BD: []rune{0x24D7}, // Case map + 0x24BE: []rune{0x24D8}, // Case map + 0x24BF: []rune{0x24D9}, // Case map + 0x24C0: []rune{0x24DA}, // Case map + 0x24C1: []rune{0x24DB}, // Case map + 0x24C2: []rune{0x24DC}, // Case map + 0x24C3: []rune{0x24DD}, // Case map + 0x24C4: []rune{0x24DE}, // Case map + 0x24C5: []rune{0x24DF}, // Case map + 0x24C6: []rune{0x24E0}, // Case map + 0x24C7: []rune{0x24E1}, // Case map + 0x24C8: []rune{0x24E2}, // Case map + 0x24C9: []rune{0x24E3}, // Case map + 0x24CA: []rune{0x24E4}, // Case map + 0x24CB: []rune{0x24E5}, // Case map + 0x24CC: []rune{0x24E6}, // Case map + 0x24CD: []rune{0x24E7}, // Case map + 0x24CE: []rune{0x24E8}, // Case map + 0x24CF: []rune{0x24E9}, // Case map + 0x3371: []rune{0x0068, 0x0070, 0x0061}, // Additional folding + 0x3373: []rune{0x0061, 0x0075}, // Additional folding + 0x3375: []rune{0x006F, 0x0076}, // Additional folding + 0x3380: []rune{0x0070, 0x0061}, // Additional folding + 0x3381: []rune{0x006E, 0x0061}, // Additional folding + 0x3382: []rune{0x03BC, 0x0061}, // Additional folding + 0x3383: []rune{0x006D, 0x0061}, // Additional folding + 0x3384: []rune{0x006B, 0x0061}, // Additional folding + 0x3385: []rune{0x006B, 0x0062}, // Additional folding + 0x3386: []rune{0x006D, 0x0062}, // Additional folding + 0x3387: []rune{0x0067, 0x0062}, // Additional folding + 0x338A: []rune{0x0070, 0x0066}, // Additional folding + 0x338B: []rune{0x006E, 0x0066}, // Additional folding + 0x338C: []rune{0x03BC, 0x0066}, // Additional folding + 0x3390: []rune{0x0068, 0x007A}, // Additional folding + 0x3391: []rune{0x006B, 0x0068, 0x007A}, // Additional folding + 0x3392: []rune{0x006D, 0x0068, 0x007A}, // Additional folding + 0x3393: []rune{0x0067, 0x0068, 0x007A}, // Additional folding + 0x3394: []rune{0x0074, 0x0068, 0x007A}, // Additional folding + 0x33A9: []rune{0x0070, 0x0061}, // Additional folding + 0x33AA: []rune{0x006B, 0x0070, 0x0061}, // Additional folding + 0x33AB: []rune{0x006D, 0x0070, 0x0061}, // Additional folding + 0x33AC: []rune{0x0067, 0x0070, 0x0061}, // Additional folding + 0x33B4: []rune{0x0070, 0x0076}, // Additional folding + 0x33B5: []rune{0x006E, 0x0076}, // Additional folding + 0x33B6: []rune{0x03BC, 0x0076}, // Additional folding + 0x33B7: []rune{0x006D, 0x0076}, // Additional folding + 0x33B8: []rune{0x006B, 0x0076}, // Additional folding + 0x33B9: []rune{0x006D, 0x0076}, // Additional folding + 0x33BA: []rune{0x0070, 0x0077}, // Additional folding + 0x33BB: []rune{0x006E, 0x0077}, // Additional folding + 0x33BC: []rune{0x03BC, 0x0077}, // Additional folding + 0x33BD: []rune{0x006D, 0x0077}, // Additional folding + 0x33BE: []rune{0x006B, 0x0077}, // Additional folding + 0x33BF: []rune{0x006D, 0x0077}, // Additional folding + 0x33C0: []rune{0x006B, 0x03C9}, // Additional folding + 0x33C1: []rune{0x006D, 0x03C9}, // Additional folding + 0x33C3: []rune{0x0062, 0x0071}, // Additional folding + 0x33C6: []rune{0x0063, 0x2215, 0x006B, 0x0067}, // Additional folding + 0x33C7: []rune{0x0063, 0x006F, 0x002E}, // Additional folding + 0x33C8: []rune{0x0064, 0x0062}, // Additional folding + 0x33C9: []rune{0x0067, 0x0079}, // Additional folding + 0x33CB: []rune{0x0068, 0x0070}, // Additional folding + 0x33CD: []rune{0x006B, 0x006B}, // Additional folding + 0x33CE: []rune{0x006B, 0x006D}, // Additional folding + 0x33D7: []rune{0x0070, 0x0068}, // Additional folding + 0x33D9: []rune{0x0070, 0x0070, 0x006D}, // Additional folding + 0x33DA: []rune{0x0070, 0x0072}, // Additional folding + 0x33DC: []rune{0x0073, 0x0076}, // Additional folding + 0x33DD: []rune{0x0077, 0x0062}, // Additional folding + 0xFB00: []rune{0x0066, 0x0066}, // Case map + 0xFB01: []rune{0x0066, 0x0069}, // Case map + 0xFB02: []rune{0x0066, 0x006C}, // Case map + 0xFB03: []rune{0x0066, 0x0066, 0x0069}, // Case map + 0xFB04: []rune{0x0066, 0x0066, 0x006C}, // Case map + 0xFB05: []rune{0x0073, 0x0074}, // Case map + 0xFB06: []rune{0x0073, 0x0074}, // Case map + 0xFB13: []rune{0x0574, 0x0576}, // Case map + 0xFB14: []rune{0x0574, 0x0565}, // Case map + 0xFB15: []rune{0x0574, 0x056B}, // Case map + 0xFB16: []rune{0x057E, 0x0576}, // Case map + 0xFB17: []rune{0x0574, 0x056D}, // Case map + 0xFF21: []rune{0xFF41}, // Case map + 0xFF22: []rune{0xFF42}, // Case map + 0xFF23: []rune{0xFF43}, // Case map + 0xFF24: []rune{0xFF44}, // Case map + 0xFF25: []rune{0xFF45}, // Case map + 0xFF26: []rune{0xFF46}, // Case map + 0xFF27: []rune{0xFF47}, // Case map + 0xFF28: []rune{0xFF48}, // Case map + 0xFF29: []rune{0xFF49}, // Case map + 0xFF2A: []rune{0xFF4A}, // Case map + 0xFF2B: []rune{0xFF4B}, // Case map + 0xFF2C: []rune{0xFF4C}, // Case map + 0xFF2D: []rune{0xFF4D}, // Case map + 0xFF2E: []rune{0xFF4E}, // Case map + 0xFF2F: []rune{0xFF4F}, // Case map + 0xFF30: []rune{0xFF50}, // Case map + 0xFF31: []rune{0xFF51}, // Case map + 0xFF32: []rune{0xFF52}, // Case map + 0xFF33: []rune{0xFF53}, // Case map + 0xFF34: []rune{0xFF54}, // Case map + 0xFF35: []rune{0xFF55}, // Case map + 0xFF36: []rune{0xFF56}, // Case map + 0xFF37: []rune{0xFF57}, // Case map + 0xFF38: []rune{0xFF58}, // Case map + 0xFF39: []rune{0xFF59}, // Case map + 0xFF3A: []rune{0xFF5A}, // Case map + 0x10400: []rune{0x10428}, // Case map + 0x10401: []rune{0x10429}, // Case map + 0x10402: []rune{0x1042A}, // Case map + 0x10403: []rune{0x1042B}, // Case map + 0x10404: []rune{0x1042C}, // Case map + 0x10405: []rune{0x1042D}, // Case map + 0x10406: []rune{0x1042E}, // Case map + 0x10407: []rune{0x1042F}, // Case map + 0x10408: []rune{0x10430}, // Case map + 0x10409: []rune{0x10431}, // Case map + 0x1040A: []rune{0x10432}, // Case map + 0x1040B: []rune{0x10433}, // Case map + 0x1040C: []rune{0x10434}, // Case map + 0x1040D: []rune{0x10435}, // Case map + 0x1040E: []rune{0x10436}, // Case map + 0x1040F: []rune{0x10437}, // Case map + 0x10410: []rune{0x10438}, // Case map + 0x10411: []rune{0x10439}, // Case map + 0x10412: []rune{0x1043A}, // Case map + 0x10413: []rune{0x1043B}, // Case map + 0x10414: []rune{0x1043C}, // Case map + 0x10415: []rune{0x1043D}, // Case map + 0x10416: []rune{0x1043E}, // Case map + 0x10417: []rune{0x1043F}, // Case map + 0x10418: []rune{0x10440}, // Case map + 0x10419: []rune{0x10441}, // Case map + 0x1041A: []rune{0x10442}, // Case map + 0x1041B: []rune{0x10443}, // Case map + 0x1041C: []rune{0x10444}, // Case map + 0x1041D: []rune{0x10445}, // Case map + 0x1041E: []rune{0x10446}, // Case map + 0x1041F: []rune{0x10447}, // Case map + 0x10420: []rune{0x10448}, // Case map + 0x10421: []rune{0x10449}, // Case map + 0x10422: []rune{0x1044A}, // Case map + 0x10423: []rune{0x1044B}, // Case map + 0x10424: []rune{0x1044C}, // Case map + 0x10425: []rune{0x1044D}, // Case map + 0x1D400: []rune{0x0061}, // Additional folding + 0x1D401: []rune{0x0062}, // Additional folding + 0x1D402: []rune{0x0063}, // Additional folding + 0x1D403: []rune{0x0064}, // Additional folding + 0x1D404: []rune{0x0065}, // Additional folding + 0x1D405: []rune{0x0066}, // Additional folding + 0x1D406: []rune{0x0067}, // Additional folding + 0x1D407: []rune{0x0068}, // Additional folding + 0x1D408: []rune{0x0069}, // Additional folding + 0x1D409: []rune{0x006A}, // Additional folding + 0x1D40A: []rune{0x006B}, // Additional folding + 0x1D40B: []rune{0x006C}, // Additional folding + 0x1D40C: []rune{0x006D}, // Additional folding + 0x1D40D: []rune{0x006E}, // Additional folding + 0x1D40E: []rune{0x006F}, // Additional folding + 0x1D40F: []rune{0x0070}, // Additional folding + 0x1D410: []rune{0x0071}, // Additional folding + 0x1D411: []rune{0x0072}, // Additional folding + 0x1D412: []rune{0x0073}, // Additional folding + 0x1D413: []rune{0x0074}, // Additional folding + 0x1D414: []rune{0x0075}, // Additional folding + 0x1D415: []rune{0x0076}, // Additional folding + 0x1D416: []rune{0x0077}, // Additional folding + 0x1D417: []rune{0x0078}, // Additional folding + 0x1D418: []rune{0x0079}, // Additional folding + 0x1D419: []rune{0x007A}, // Additional folding + 0x1D434: []rune{0x0061}, // Additional folding + 0x1D435: []rune{0x0062}, // Additional folding + 0x1D436: []rune{0x0063}, // Additional folding + 0x1D437: []rune{0x0064}, // Additional folding + 0x1D438: []rune{0x0065}, // Additional folding + 0x1D439: []rune{0x0066}, // Additional folding + 0x1D43A: []rune{0x0067}, // Additional folding + 0x1D43B: []rune{0x0068}, // Additional folding + 0x1D43C: []rune{0x0069}, // Additional folding + 0x1D43D: []rune{0x006A}, // Additional folding + 0x1D43E: []rune{0x006B}, // Additional folding + 0x1D43F: []rune{0x006C}, // Additional folding + 0x1D440: []rune{0x006D}, // Additional folding + 0x1D441: []rune{0x006E}, // Additional folding + 0x1D442: []rune{0x006F}, // Additional folding + 0x1D443: []rune{0x0070}, // Additional folding + 0x1D444: []rune{0x0071}, // Additional folding + 0x1D445: []rune{0x0072}, // Additional folding + 0x1D446: []rune{0x0073}, // Additional folding + 0x1D447: []rune{0x0074}, // Additional folding + 0x1D448: []rune{0x0075}, // Additional folding + 0x1D449: []rune{0x0076}, // Additional folding + 0x1D44A: []rune{0x0077}, // Additional folding + 0x1D44B: []rune{0x0078}, // Additional folding + 0x1D44C: []rune{0x0079}, // Additional folding + 0x1D44D: []rune{0x007A}, // Additional folding + 0x1D468: []rune{0x0061}, // Additional folding + 0x1D469: []rune{0x0062}, // Additional folding + 0x1D46A: []rune{0x0063}, // Additional folding + 0x1D46B: []rune{0x0064}, // Additional folding + 0x1D46C: []rune{0x0065}, // Additional folding + 0x1D46D: []rune{0x0066}, // Additional folding + 0x1D46E: []rune{0x0067}, // Additional folding + 0x1D46F: []rune{0x0068}, // Additional folding + 0x1D470: []rune{0x0069}, // Additional folding + 0x1D471: []rune{0x006A}, // Additional folding + 0x1D472: []rune{0x006B}, // Additional folding + 0x1D473: []rune{0x006C}, // Additional folding + 0x1D474: []rune{0x006D}, // Additional folding + 0x1D475: []rune{0x006E}, // Additional folding + 0x1D476: []rune{0x006F}, // Additional folding + 0x1D477: []rune{0x0070}, // Additional folding + 0x1D478: []rune{0x0071}, // Additional folding + 0x1D479: []rune{0x0072}, // Additional folding + 0x1D47A: []rune{0x0073}, // Additional folding + 0x1D47B: []rune{0x0074}, // Additional folding + 0x1D47C: []rune{0x0075}, // Additional folding + 0x1D47D: []rune{0x0076}, // Additional folding + 0x1D47E: []rune{0x0077}, // Additional folding + 0x1D47F: []rune{0x0078}, // Additional folding + 0x1D480: []rune{0x0079}, // Additional folding + 0x1D481: []rune{0x007A}, // Additional folding + 0x1D49C: []rune{0x0061}, // Additional folding + 0x1D49E: []rune{0x0063}, // Additional folding + 0x1D49F: []rune{0x0064}, // Additional folding + 0x1D4A2: []rune{0x0067}, // Additional folding + 0x1D4A5: []rune{0x006A}, // Additional folding + 0x1D4A6: []rune{0x006B}, // Additional folding + 0x1D4A9: []rune{0x006E}, // Additional folding + 0x1D4AA: []rune{0x006F}, // Additional folding + 0x1D4AB: []rune{0x0070}, // Additional folding + 0x1D4AC: []rune{0x0071}, // Additional folding + 0x1D4AE: []rune{0x0073}, // Additional folding + 0x1D4AF: []rune{0x0074}, // Additional folding + 0x1D4B0: []rune{0x0075}, // Additional folding + 0x1D4B1: []rune{0x0076}, // Additional folding + 0x1D4B2: []rune{0x0077}, // Additional folding + 0x1D4B3: []rune{0x0078}, // Additional folding + 0x1D4B4: []rune{0x0079}, // Additional folding + 0x1D4B5: []rune{0x007A}, // Additional folding + 0x1D4D0: []rune{0x0061}, // Additional folding + 0x1D4D1: []rune{0x0062}, // Additional folding + 0x1D4D2: []rune{0x0063}, // Additional folding + 0x1D4D3: []rune{0x0064}, // Additional folding + 0x1D4D4: []rune{0x0065}, // Additional folding + 0x1D4D5: []rune{0x0066}, // Additional folding + 0x1D4D6: []rune{0x0067}, // Additional folding + 0x1D4D7: []rune{0x0068}, // Additional folding + 0x1D4D8: []rune{0x0069}, // Additional folding + 0x1D4D9: []rune{0x006A}, // Additional folding + 0x1D4DA: []rune{0x006B}, // Additional folding + 0x1D4DB: []rune{0x006C}, // Additional folding + 0x1D4DC: []rune{0x006D}, // Additional folding + 0x1D4DD: []rune{0x006E}, // Additional folding + 0x1D4DE: []rune{0x006F}, // Additional folding + 0x1D4DF: []rune{0x0070}, // Additional folding + 0x1D4E0: []rune{0x0071}, // Additional folding + 0x1D4E1: []rune{0x0072}, // Additional folding + 0x1D4E2: []rune{0x0073}, // Additional folding + 0x1D4E3: []rune{0x0074}, // Additional folding + 0x1D4E4: []rune{0x0075}, // Additional folding + 0x1D4E5: []rune{0x0076}, // Additional folding + 0x1D4E6: []rune{0x0077}, // Additional folding + 0x1D4E7: []rune{0x0078}, // Additional folding + 0x1D4E8: []rune{0x0079}, // Additional folding + 0x1D4E9: []rune{0x007A}, // Additional folding + 0x1D504: []rune{0x0061}, // Additional folding + 0x1D505: []rune{0x0062}, // Additional folding + 0x1D507: []rune{0x0064}, // Additional folding + 0x1D508: []rune{0x0065}, // Additional folding + 0x1D509: []rune{0x0066}, // Additional folding + 0x1D50A: []rune{0x0067}, // Additional folding + 0x1D50D: []rune{0x006A}, // Additional folding + 0x1D50E: []rune{0x006B}, // Additional folding + 0x1D50F: []rune{0x006C}, // Additional folding + 0x1D510: []rune{0x006D}, // Additional folding + 0x1D511: []rune{0x006E}, // Additional folding + 0x1D512: []rune{0x006F}, // Additional folding + 0x1D513: []rune{0x0070}, // Additional folding + 0x1D514: []rune{0x0071}, // Additional folding + 0x1D516: []rune{0x0073}, // Additional folding + 0x1D517: []rune{0x0074}, // Additional folding + 0x1D518: []rune{0x0075}, // Additional folding + 0x1D519: []rune{0x0076}, // Additional folding + 0x1D51A: []rune{0x0077}, // Additional folding + 0x1D51B: []rune{0x0078}, // Additional folding + 0x1D51C: []rune{0x0079}, // Additional folding + 0x1D538: []rune{0x0061}, // Additional folding + 0x1D539: []rune{0x0062}, // Additional folding + 0x1D53B: []rune{0x0064}, // Additional folding + 0x1D53C: []rune{0x0065}, // Additional folding + 0x1D53D: []rune{0x0066}, // Additional folding + 0x1D53E: []rune{0x0067}, // Additional folding + 0x1D540: []rune{0x0069}, // Additional folding + 0x1D541: []rune{0x006A}, // Additional folding + 0x1D542: []rune{0x006B}, // Additional folding + 0x1D543: []rune{0x006C}, // Additional folding + 0x1D544: []rune{0x006D}, // Additional folding + 0x1D546: []rune{0x006F}, // Additional folding + 0x1D54A: []rune{0x0073}, // Additional folding + 0x1D54B: []rune{0x0074}, // Additional folding + 0x1D54C: []rune{0x0075}, // Additional folding + 0x1D54D: []rune{0x0076}, // Additional folding + 0x1D54E: []rune{0x0077}, // Additional folding + 0x1D54F: []rune{0x0078}, // Additional folding + 0x1D550: []rune{0x0079}, // Additional folding + 0x1D56C: []rune{0x0061}, // Additional folding + 0x1D56D: []rune{0x0062}, // Additional folding + 0x1D56E: []rune{0x0063}, // Additional folding + 0x1D56F: []rune{0x0064}, // Additional folding + 0x1D570: []rune{0x0065}, // Additional folding + 0x1D571: []rune{0x0066}, // Additional folding + 0x1D572: []rune{0x0067}, // Additional folding + 0x1D573: []rune{0x0068}, // Additional folding + 0x1D574: []rune{0x0069}, // Additional folding + 0x1D575: []rune{0x006A}, // Additional folding + 0x1D576: []rune{0x006B}, // Additional folding + 0x1D577: []rune{0x006C}, // Additional folding + 0x1D578: []rune{0x006D}, // Additional folding + 0x1D579: []rune{0x006E}, // Additional folding + 0x1D57A: []rune{0x006F}, // Additional folding + 0x1D57B: []rune{0x0070}, // Additional folding + 0x1D57C: []rune{0x0071}, // Additional folding + 0x1D57D: []rune{0x0072}, // Additional folding + 0x1D57E: []rune{0x0073}, // Additional folding + 0x1D57F: []rune{0x0074}, // Additional folding + 0x1D580: []rune{0x0075}, // Additional folding + 0x1D581: []rune{0x0076}, // Additional folding + 0x1D582: []rune{0x0077}, // Additional folding + 0x1D583: []rune{0x0078}, // Additional folding + 0x1D584: []rune{0x0079}, // Additional folding + 0x1D585: []rune{0x007A}, // Additional folding + 0x1D5A0: []rune{0x0061}, // Additional folding + 0x1D5A1: []rune{0x0062}, // Additional folding + 0x1D5A2: []rune{0x0063}, // Additional folding + 0x1D5A3: []rune{0x0064}, // Additional folding + 0x1D5A4: []rune{0x0065}, // Additional folding + 0x1D5A5: []rune{0x0066}, // Additional folding + 0x1D5A6: []rune{0x0067}, // Additional folding + 0x1D5A7: []rune{0x0068}, // Additional folding + 0x1D5A8: []rune{0x0069}, // Additional folding + 0x1D5A9: []rune{0x006A}, // Additional folding + 0x1D5AA: []rune{0x006B}, // Additional folding + 0x1D5AB: []rune{0x006C}, // Additional folding + 0x1D5AC: []rune{0x006D}, // Additional folding + 0x1D5AD: []rune{0x006E}, // Additional folding + 0x1D5AE: []rune{0x006F}, // Additional folding + 0x1D5AF: []rune{0x0070}, // Additional folding + 0x1D5B0: []rune{0x0071}, // Additional folding + 0x1D5B1: []rune{0x0072}, // Additional folding + 0x1D5B2: []rune{0x0073}, // Additional folding + 0x1D5B3: []rune{0x0074}, // Additional folding + 0x1D5B4: []rune{0x0075}, // Additional folding + 0x1D5B5: []rune{0x0076}, // Additional folding + 0x1D5B6: []rune{0x0077}, // Additional folding + 0x1D5B7: []rune{0x0078}, // Additional folding + 0x1D5B8: []rune{0x0079}, // Additional folding + 0x1D5B9: []rune{0x007A}, // Additional folding + 0x1D5D4: []rune{0x0061}, // Additional folding + 0x1D5D5: []rune{0x0062}, // Additional folding + 0x1D5D6: []rune{0x0063}, // Additional folding + 0x1D5D7: []rune{0x0064}, // Additional folding + 0x1D5D8: []rune{0x0065}, // Additional folding + 0x1D5D9: []rune{0x0066}, // Additional folding + 0x1D5DA: []rune{0x0067}, // Additional folding + 0x1D5DB: []rune{0x0068}, // Additional folding + 0x1D5DC: []rune{0x0069}, // Additional folding + 0x1D5DD: []rune{0x006A}, // Additional folding + 0x1D5DE: []rune{0x006B}, // Additional folding + 0x1D5DF: []rune{0x006C}, // Additional folding + 0x1D5E0: []rune{0x006D}, // Additional folding + 0x1D5E1: []rune{0x006E}, // Additional folding + 0x1D5E2: []rune{0x006F}, // Additional folding + 0x1D5E3: []rune{0x0070}, // Additional folding + 0x1D5E4: []rune{0x0071}, // Additional folding + 0x1D5E5: []rune{0x0072}, // Additional folding + 0x1D5E6: []rune{0x0073}, // Additional folding + 0x1D5E7: []rune{0x0074}, // Additional folding + 0x1D5E8: []rune{0x0075}, // Additional folding + 0x1D5E9: []rune{0x0076}, // Additional folding + 0x1D5EA: []rune{0x0077}, // Additional folding + 0x1D5EB: []rune{0x0078}, // Additional folding + 0x1D5EC: []rune{0x0079}, // Additional folding + 0x1D5ED: []rune{0x007A}, // Additional folding + 0x1D608: []rune{0x0061}, // Additional folding + 0x1D609: []rune{0x0062}, // Additional folding + 0x1D60A: []rune{0x0063}, // Additional folding + 0x1D60B: []rune{0x0064}, // Additional folding + 0x1D60C: []rune{0x0065}, // Additional folding + 0x1D60D: []rune{0x0066}, // Additional folding + 0x1D60E: []rune{0x0067}, // Additional folding + 0x1D60F: []rune{0x0068}, // Additional folding + 0x1D610: []rune{0x0069}, // Additional folding + 0x1D611: []rune{0x006A}, // Additional folding + 0x1D612: []rune{0x006B}, // Additional folding + 0x1D613: []rune{0x006C}, // Additional folding + 0x1D614: []rune{0x006D}, // Additional folding + 0x1D615: []rune{0x006E}, // Additional folding + 0x1D616: []rune{0x006F}, // Additional folding + 0x1D617: []rune{0x0070}, // Additional folding + 0x1D618: []rune{0x0071}, // Additional folding + 0x1D619: []rune{0x0072}, // Additional folding + 0x1D61A: []rune{0x0073}, // Additional folding + 0x1D61B: []rune{0x0074}, // Additional folding + 0x1D61C: []rune{0x0075}, // Additional folding + 0x1D61D: []rune{0x0076}, // Additional folding + 0x1D61E: []rune{0x0077}, // Additional folding + 0x1D61F: []rune{0x0078}, // Additional folding + 0x1D620: []rune{0x0079}, // Additional folding + 0x1D621: []rune{0x007A}, // Additional folding + 0x1D63C: []rune{0x0061}, // Additional folding + 0x1D63D: []rune{0x0062}, // Additional folding + 0x1D63E: []rune{0x0063}, // Additional folding + 0x1D63F: []rune{0x0064}, // Additional folding + 0x1D640: []rune{0x0065}, // Additional folding + 0x1D641: []rune{0x0066}, // Additional folding + 0x1D642: []rune{0x0067}, // Additional folding + 0x1D643: []rune{0x0068}, // Additional folding + 0x1D644: []rune{0x0069}, // Additional folding + 0x1D645: []rune{0x006A}, // Additional folding + 0x1D646: []rune{0x006B}, // Additional folding + 0x1D647: []rune{0x006C}, // Additional folding + 0x1D648: []rune{0x006D}, // Additional folding + 0x1D649: []rune{0x006E}, // Additional folding + 0x1D64A: []rune{0x006F}, // Additional folding + 0x1D64B: []rune{0x0070}, // Additional folding + 0x1D64C: []rune{0x0071}, // Additional folding + 0x1D64D: []rune{0x0072}, // Additional folding + 0x1D64E: []rune{0x0073}, // Additional folding + 0x1D64F: []rune{0x0074}, // Additional folding + 0x1D650: []rune{0x0075}, // Additional folding + 0x1D651: []rune{0x0076}, // Additional folding + 0x1D652: []rune{0x0077}, // Additional folding + 0x1D653: []rune{0x0078}, // Additional folding + 0x1D654: []rune{0x0079}, // Additional folding + 0x1D655: []rune{0x007A}, // Additional folding + 0x1D670: []rune{0x0061}, // Additional folding + 0x1D671: []rune{0x0062}, // Additional folding + 0x1D672: []rune{0x0063}, // Additional folding + 0x1D673: []rune{0x0064}, // Additional folding + 0x1D674: []rune{0x0065}, // Additional folding + 0x1D675: []rune{0x0066}, // Additional folding + 0x1D676: []rune{0x0067}, // Additional folding + 0x1D677: []rune{0x0068}, // Additional folding + 0x1D678: []rune{0x0069}, // Additional folding + 0x1D679: []rune{0x006A}, // Additional folding + 0x1D67A: []rune{0x006B}, // Additional folding + 0x1D67B: []rune{0x006C}, // Additional folding + 0x1D67C: []rune{0x006D}, // Additional folding + 0x1D67D: []rune{0x006E}, // Additional folding + 0x1D67E: []rune{0x006F}, // Additional folding + 0x1D67F: []rune{0x0070}, // Additional folding + 0x1D680: []rune{0x0071}, // Additional folding + 0x1D681: []rune{0x0072}, // Additional folding + 0x1D682: []rune{0x0073}, // Additional folding + 0x1D683: []rune{0x0074}, // Additional folding + 0x1D684: []rune{0x0075}, // Additional folding + 0x1D685: []rune{0x0076}, // Additional folding + 0x1D686: []rune{0x0077}, // Additional folding + 0x1D687: []rune{0x0078}, // Additional folding + 0x1D688: []rune{0x0079}, // Additional folding + 0x1D689: []rune{0x007A}, // Additional folding + 0x1D6A8: []rune{0x03B1}, // Additional folding + 0x1D6A9: []rune{0x03B2}, // Additional folding + 0x1D6AA: []rune{0x03B3}, // Additional folding + 0x1D6AB: []rune{0x03B4}, // Additional folding + 0x1D6AC: []rune{0x03B5}, // Additional folding + 0x1D6AD: []rune{0x03B6}, // Additional folding + 0x1D6AE: []rune{0x03B7}, // Additional folding + 0x1D6AF: []rune{0x03B8}, // Additional folding + 0x1D6B0: []rune{0x03B9}, // Additional folding + 0x1D6B1: []rune{0x03BA}, // Additional folding + 0x1D6B2: []rune{0x03BB}, // Additional folding + 0x1D6B3: []rune{0x03BC}, // Additional folding + 0x1D6B4: []rune{0x03BD}, // Additional folding + 0x1D6B5: []rune{0x03BE}, // Additional folding + 0x1D6B6: []rune{0x03BF}, // Additional folding + 0x1D6B7: []rune{0x03C0}, // Additional folding + 0x1D6B8: []rune{0x03C1}, // Additional folding + 0x1D6B9: []rune{0x03B8}, // Additional folding + 0x1D6BA: []rune{0x03C3}, // Additional folding + 0x1D6BB: []rune{0x03C4}, // Additional folding + 0x1D6BC: []rune{0x03C5}, // Additional folding + 0x1D6BD: []rune{0x03C6}, // Additional folding + 0x1D6BE: []rune{0x03C7}, // Additional folding + 0x1D6BF: []rune{0x03C8}, // Additional folding + 0x1D6C0: []rune{0x03C9}, // Additional folding + 0x1D6D3: []rune{0x03C3}, // Additional folding + 0x1D6E2: []rune{0x03B1}, // Additional folding + 0x1D6E3: []rune{0x03B2}, // Additional folding + 0x1D6E4: []rune{0x03B3}, // Additional folding + 0x1D6E5: []rune{0x03B4}, // Additional folding + 0x1D6E6: []rune{0x03B5}, // Additional folding + 0x1D6E7: []rune{0x03B6}, // Additional folding + 0x1D6E8: []rune{0x03B7}, // Additional folding + 0x1D6E9: []rune{0x03B8}, // Additional folding + 0x1D6EA: []rune{0x03B9}, // Additional folding + 0x1D6EB: []rune{0x03BA}, // Additional folding + 0x1D6EC: []rune{0x03BB}, // Additional folding + 0x1D6ED: []rune{0x03BC}, // Additional folding + 0x1D6EE: []rune{0x03BD}, // Additional folding + 0x1D6EF: []rune{0x03BE}, // Additional folding + 0x1D6F0: []rune{0x03BF}, // Additional folding + 0x1D6F1: []rune{0x03C0}, // Additional folding + 0x1D6F2: []rune{0x03C1}, // Additional folding + 0x1D6F3: []rune{0x03B8}, // Additional folding + 0x1D6F4: []rune{0x03C3}, // Additional folding + 0x1D6F5: []rune{0x03C4}, // Additional folding + 0x1D6F6: []rune{0x03C5}, // Additional folding + 0x1D6F7: []rune{0x03C6}, // Additional folding + 0x1D6F8: []rune{0x03C7}, // Additional folding + 0x1D6F9: []rune{0x03C8}, // Additional folding + 0x1D6FA: []rune{0x03C9}, // Additional folding + 0x1D70D: []rune{0x03C3}, // Additional folding + 0x1D71C: []rune{0x03B1}, // Additional folding + 0x1D71D: []rune{0x03B2}, // Additional folding + 0x1D71E: []rune{0x03B3}, // Additional folding + 0x1D71F: []rune{0x03B4}, // Additional folding + 0x1D720: []rune{0x03B5}, // Additional folding + 0x1D721: []rune{0x03B6}, // Additional folding + 0x1D722: []rune{0x03B7}, // Additional folding + 0x1D723: []rune{0x03B8}, // Additional folding + 0x1D724: []rune{0x03B9}, // Additional folding + 0x1D725: []rune{0x03BA}, // Additional folding + 0x1D726: []rune{0x03BB}, // Additional folding + 0x1D727: []rune{0x03BC}, // Additional folding + 0x1D728: []rune{0x03BD}, // Additional folding + 0x1D729: []rune{0x03BE}, // Additional folding + 0x1D72A: []rune{0x03BF}, // Additional folding + 0x1D72B: []rune{0x03C0}, // Additional folding + 0x1D72C: []rune{0x03C1}, // Additional folding + 0x1D72D: []rune{0x03B8}, // Additional folding + 0x1D72E: []rune{0x03C3}, // Additional folding + 0x1D72F: []rune{0x03C4}, // Additional folding + 0x1D730: []rune{0x03C5}, // Additional folding + 0x1D731: []rune{0x03C6}, // Additional folding + 0x1D732: []rune{0x03C7}, // Additional folding + 0x1D733: []rune{0x03C8}, // Additional folding + 0x1D734: []rune{0x03C9}, // Additional folding + 0x1D747: []rune{0x03C3}, // Additional folding + 0x1D756: []rune{0x03B1}, // Additional folding + 0x1D757: []rune{0x03B2}, // Additional folding + 0x1D758: []rune{0x03B3}, // Additional folding + 0x1D759: []rune{0x03B4}, // Additional folding + 0x1D75A: []rune{0x03B5}, // Additional folding + 0x1D75B: []rune{0x03B6}, // Additional folding + 0x1D75C: []rune{0x03B7}, // Additional folding + 0x1D75D: []rune{0x03B8}, // Additional folding + 0x1D75E: []rune{0x03B9}, // Additional folding + 0x1D75F: []rune{0x03BA}, // Additional folding + 0x1D760: []rune{0x03BB}, // Additional folding + 0x1D761: []rune{0x03BC}, // Additional folding + 0x1D762: []rune{0x03BD}, // Additional folding + 0x1D763: []rune{0x03BE}, // Additional folding + 0x1D764: []rune{0x03BF}, // Additional folding + 0x1D765: []rune{0x03C0}, // Additional folding + 0x1D766: []rune{0x03C1}, // Additional folding + 0x1D767: []rune{0x03B8}, // Additional folding + 0x1D768: []rune{0x03C3}, // Additional folding + 0x1D769: []rune{0x03C4}, // Additional folding + 0x1D76A: []rune{0x03C5}, // Additional folding + 0x1D76B: []rune{0x03C6}, // Additional folding + 0x1D76C: []rune{0x03C7}, // Additional folding + 0x1D76D: []rune{0x03C8}, // Additional folding + 0x1D76E: []rune{0x03C9}, // Additional folding + 0x1D781: []rune{0x03C3}, // Additional folding + 0x1D790: []rune{0x03B1}, // Additional folding + 0x1D791: []rune{0x03B2}, // Additional folding + 0x1D792: []rune{0x03B3}, // Additional folding + 0x1D793: []rune{0x03B4}, // Additional folding + 0x1D794: []rune{0x03B5}, // Additional folding + 0x1D795: []rune{0x03B6}, // Additional folding + 0x1D796: []rune{0x03B7}, // Additional folding + 0x1D797: []rune{0x03B8}, // Additional folding + 0x1D798: []rune{0x03B9}, // Additional folding + 0x1D799: []rune{0x03BA}, // Additional folding + 0x1D79A: []rune{0x03BB}, // Additional folding + 0x1D79B: []rune{0x03BC}, // Additional folding + 0x1D79C: []rune{0x03BD}, // Additional folding + 0x1D79D: []rune{0x03BE}, // Additional folding + 0x1D79E: []rune{0x03BF}, // Additional folding + 0x1D79F: []rune{0x03C0}, // Additional folding + 0x1D7A0: []rune{0x03C1}, // Additional folding + 0x1D7A1: []rune{0x03B8}, // Additional folding + 0x1D7A2: []rune{0x03C3}, // Additional folding + 0x1D7A3: []rune{0x03C4}, // Additional folding + 0x1D7A4: []rune{0x03C5}, // Additional folding + 0x1D7A5: []rune{0x03C6}, // Additional folding + 0x1D7A6: []rune{0x03C7}, // Additional folding + 0x1D7A7: []rune{0x03C8}, // Additional folding + 0x1D7A8: []rune{0x03C9}, // Additional folding + 0x1D7BB: []rune{0x03C3}, // Additional folding +} + +// TableB2 represents RFC-3454 Table B.2. +var TableB2 Mapping = tableB2 + +var tableB3 = Mapping{ + 0x0041: []rune{0x0061}, // Case map + 0x0042: []rune{0x0062}, // Case map + 0x0043: []rune{0x0063}, // Case map + 0x0044: []rune{0x0064}, // Case map + 0x0045: []rune{0x0065}, // Case map + 0x0046: []rune{0x0066}, // Case map + 0x0047: []rune{0x0067}, // Case map + 0x0048: []rune{0x0068}, // Case map + 0x0049: []rune{0x0069}, // Case map + 0x004A: []rune{0x006A}, // Case map + 0x004B: []rune{0x006B}, // Case map + 0x004C: []rune{0x006C}, // Case map + 0x004D: []rune{0x006D}, // Case map + 0x004E: []rune{0x006E}, // Case map + 0x004F: []rune{0x006F}, // Case map + 0x0050: []rune{0x0070}, // Case map + 0x0051: []rune{0x0071}, // Case map + 0x0052: []rune{0x0072}, // Case map + 0x0053: []rune{0x0073}, // Case map + 0x0054: []rune{0x0074}, // Case map + 0x0055: []rune{0x0075}, // Case map + 0x0056: []rune{0x0076}, // Case map + 0x0057: []rune{0x0077}, // Case map + 0x0058: []rune{0x0078}, // Case map + 0x0059: []rune{0x0079}, // Case map + 0x005A: []rune{0x007A}, // Case map + 0x00B5: []rune{0x03BC}, // Case map + 0x00C0: []rune{0x00E0}, // Case map + 0x00C1: []rune{0x00E1}, // Case map + 0x00C2: []rune{0x00E2}, // Case map + 0x00C3: []rune{0x00E3}, // Case map + 0x00C4: []rune{0x00E4}, // Case map + 0x00C5: []rune{0x00E5}, // Case map + 0x00C6: []rune{0x00E6}, // Case map + 0x00C7: []rune{0x00E7}, // Case map + 0x00C8: []rune{0x00E8}, // Case map + 0x00C9: []rune{0x00E9}, // Case map + 0x00CA: []rune{0x00EA}, // Case map + 0x00CB: []rune{0x00EB}, // Case map + 0x00CC: []rune{0x00EC}, // Case map + 0x00CD: []rune{0x00ED}, // Case map + 0x00CE: []rune{0x00EE}, // Case map + 0x00CF: []rune{0x00EF}, // Case map + 0x00D0: []rune{0x00F0}, // Case map + 0x00D1: []rune{0x00F1}, // Case map + 0x00D2: []rune{0x00F2}, // Case map + 0x00D3: []rune{0x00F3}, // Case map + 0x00D4: []rune{0x00F4}, // Case map + 0x00D5: []rune{0x00F5}, // Case map + 0x00D6: []rune{0x00F6}, // Case map + 0x00D8: []rune{0x00F8}, // Case map + 0x00D9: []rune{0x00F9}, // Case map + 0x00DA: []rune{0x00FA}, // Case map + 0x00DB: []rune{0x00FB}, // Case map + 0x00DC: []rune{0x00FC}, // Case map + 0x00DD: []rune{0x00FD}, // Case map + 0x00DE: []rune{0x00FE}, // Case map + 0x00DF: []rune{0x0073, 0x0073}, // Case map + 0x0100: []rune{0x0101}, // Case map + 0x0102: []rune{0x0103}, // Case map + 0x0104: []rune{0x0105}, // Case map + 0x0106: []rune{0x0107}, // Case map + 0x0108: []rune{0x0109}, // Case map + 0x010A: []rune{0x010B}, // Case map + 0x010C: []rune{0x010D}, // Case map + 0x010E: []rune{0x010F}, // Case map + 0x0110: []rune{0x0111}, // Case map + 0x0112: []rune{0x0113}, // Case map + 0x0114: []rune{0x0115}, // Case map + 0x0116: []rune{0x0117}, // Case map + 0x0118: []rune{0x0119}, // Case map + 0x011A: []rune{0x011B}, // Case map + 0x011C: []rune{0x011D}, // Case map + 0x011E: []rune{0x011F}, // Case map + 0x0120: []rune{0x0121}, // Case map + 0x0122: []rune{0x0123}, // Case map + 0x0124: []rune{0x0125}, // Case map + 0x0126: []rune{0x0127}, // Case map + 0x0128: []rune{0x0129}, // Case map + 0x012A: []rune{0x012B}, // Case map + 0x012C: []rune{0x012D}, // Case map + 0x012E: []rune{0x012F}, // Case map + 0x0130: []rune{0x0069, 0x0307}, // Case map + 0x0132: []rune{0x0133}, // Case map + 0x0134: []rune{0x0135}, // Case map + 0x0136: []rune{0x0137}, // Case map + 0x0139: []rune{0x013A}, // Case map + 0x013B: []rune{0x013C}, // Case map + 0x013D: []rune{0x013E}, // Case map + 0x013F: []rune{0x0140}, // Case map + 0x0141: []rune{0x0142}, // Case map + 0x0143: []rune{0x0144}, // Case map + 0x0145: []rune{0x0146}, // Case map + 0x0147: []rune{0x0148}, // Case map + 0x0149: []rune{0x02BC, 0x006E}, // Case map + 0x014A: []rune{0x014B}, // Case map + 0x014C: []rune{0x014D}, // Case map + 0x014E: []rune{0x014F}, // Case map + 0x0150: []rune{0x0151}, // Case map + 0x0152: []rune{0x0153}, // Case map + 0x0154: []rune{0x0155}, // Case map + 0x0156: []rune{0x0157}, // Case map + 0x0158: []rune{0x0159}, // Case map + 0x015A: []rune{0x015B}, // Case map + 0x015C: []rune{0x015D}, // Case map + 0x015E: []rune{0x015F}, // Case map + 0x0160: []rune{0x0161}, // Case map + 0x0162: []rune{0x0163}, // Case map + 0x0164: []rune{0x0165}, // Case map + 0x0166: []rune{0x0167}, // Case map + 0x0168: []rune{0x0169}, // Case map + 0x016A: []rune{0x016B}, // Case map + 0x016C: []rune{0x016D}, // Case map + 0x016E: []rune{0x016F}, // Case map + 0x0170: []rune{0x0171}, // Case map + 0x0172: []rune{0x0173}, // Case map + 0x0174: []rune{0x0175}, // Case map + 0x0176: []rune{0x0177}, // Case map + 0x0178: []rune{0x00FF}, // Case map + 0x0179: []rune{0x017A}, // Case map + 0x017B: []rune{0x017C}, // Case map + 0x017D: []rune{0x017E}, // Case map + 0x017F: []rune{0x0073}, // Case map + 0x0181: []rune{0x0253}, // Case map + 0x0182: []rune{0x0183}, // Case map + 0x0184: []rune{0x0185}, // Case map + 0x0186: []rune{0x0254}, // Case map + 0x0187: []rune{0x0188}, // Case map + 0x0189: []rune{0x0256}, // Case map + 0x018A: []rune{0x0257}, // Case map + 0x018B: []rune{0x018C}, // Case map + 0x018E: []rune{0x01DD}, // Case map + 0x018F: []rune{0x0259}, // Case map + 0x0190: []rune{0x025B}, // Case map + 0x0191: []rune{0x0192}, // Case map + 0x0193: []rune{0x0260}, // Case map + 0x0194: []rune{0x0263}, // Case map + 0x0196: []rune{0x0269}, // Case map + 0x0197: []rune{0x0268}, // Case map + 0x0198: []rune{0x0199}, // Case map + 0x019C: []rune{0x026F}, // Case map + 0x019D: []rune{0x0272}, // Case map + 0x019F: []rune{0x0275}, // Case map + 0x01A0: []rune{0x01A1}, // Case map + 0x01A2: []rune{0x01A3}, // Case map + 0x01A4: []rune{0x01A5}, // Case map + 0x01A6: []rune{0x0280}, // Case map + 0x01A7: []rune{0x01A8}, // Case map + 0x01A9: []rune{0x0283}, // Case map + 0x01AC: []rune{0x01AD}, // Case map + 0x01AE: []rune{0x0288}, // Case map + 0x01AF: []rune{0x01B0}, // Case map + 0x01B1: []rune{0x028A}, // Case map + 0x01B2: []rune{0x028B}, // Case map + 0x01B3: []rune{0x01B4}, // Case map + 0x01B5: []rune{0x01B6}, // Case map + 0x01B7: []rune{0x0292}, // Case map + 0x01B8: []rune{0x01B9}, // Case map + 0x01BC: []rune{0x01BD}, // Case map + 0x01C4: []rune{0x01C6}, // Case map + 0x01C5: []rune{0x01C6}, // Case map + 0x01C7: []rune{0x01C9}, // Case map + 0x01C8: []rune{0x01C9}, // Case map + 0x01CA: []rune{0x01CC}, // Case map + 0x01CB: []rune{0x01CC}, // Case map + 0x01CD: []rune{0x01CE}, // Case map + 0x01CF: []rune{0x01D0}, // Case map + 0x01D1: []rune{0x01D2}, // Case map + 0x01D3: []rune{0x01D4}, // Case map + 0x01D5: []rune{0x01D6}, // Case map + 0x01D7: []rune{0x01D8}, // Case map + 0x01D9: []rune{0x01DA}, // Case map + 0x01DB: []rune{0x01DC}, // Case map + 0x01DE: []rune{0x01DF}, // Case map + 0x01E0: []rune{0x01E1}, // Case map + 0x01E2: []rune{0x01E3}, // Case map + 0x01E4: []rune{0x01E5}, // Case map + 0x01E6: []rune{0x01E7}, // Case map + 0x01E8: []rune{0x01E9}, // Case map + 0x01EA: []rune{0x01EB}, // Case map + 0x01EC: []rune{0x01ED}, // Case map + 0x01EE: []rune{0x01EF}, // Case map + 0x01F0: []rune{0x006A, 0x030C}, // Case map + 0x01F1: []rune{0x01F3}, // Case map + 0x01F2: []rune{0x01F3}, // Case map + 0x01F4: []rune{0x01F5}, // Case map + 0x01F6: []rune{0x0195}, // Case map + 0x01F7: []rune{0x01BF}, // Case map + 0x01F8: []rune{0x01F9}, // Case map + 0x01FA: []rune{0x01FB}, // Case map + 0x01FC: []rune{0x01FD}, // Case map + 0x01FE: []rune{0x01FF}, // Case map + 0x0200: []rune{0x0201}, // Case map + 0x0202: []rune{0x0203}, // Case map + 0x0204: []rune{0x0205}, // Case map + 0x0206: []rune{0x0207}, // Case map + 0x0208: []rune{0x0209}, // Case map + 0x020A: []rune{0x020B}, // Case map + 0x020C: []rune{0x020D}, // Case map + 0x020E: []rune{0x020F}, // Case map + 0x0210: []rune{0x0211}, // Case map + 0x0212: []rune{0x0213}, // Case map + 0x0214: []rune{0x0215}, // Case map + 0x0216: []rune{0x0217}, // Case map + 0x0218: []rune{0x0219}, // Case map + 0x021A: []rune{0x021B}, // Case map + 0x021C: []rune{0x021D}, // Case map + 0x021E: []rune{0x021F}, // Case map + 0x0220: []rune{0x019E}, // Case map + 0x0222: []rune{0x0223}, // Case map + 0x0224: []rune{0x0225}, // Case map + 0x0226: []rune{0x0227}, // Case map + 0x0228: []rune{0x0229}, // Case map + 0x022A: []rune{0x022B}, // Case map + 0x022C: []rune{0x022D}, // Case map + 0x022E: []rune{0x022F}, // Case map + 0x0230: []rune{0x0231}, // Case map + 0x0232: []rune{0x0233}, // Case map + 0x0345: []rune{0x03B9}, // Case map + 0x0386: []rune{0x03AC}, // Case map + 0x0388: []rune{0x03AD}, // Case map + 0x0389: []rune{0x03AE}, // Case map + 0x038A: []rune{0x03AF}, // Case map + 0x038C: []rune{0x03CC}, // Case map + 0x038E: []rune{0x03CD}, // Case map + 0x038F: []rune{0x03CE}, // Case map + 0x0390: []rune{0x03B9, 0x0308, 0x0301}, // Case map + 0x0391: []rune{0x03B1}, // Case map + 0x0392: []rune{0x03B2}, // Case map + 0x0393: []rune{0x03B3}, // Case map + 0x0394: []rune{0x03B4}, // Case map + 0x0395: []rune{0x03B5}, // Case map + 0x0396: []rune{0x03B6}, // Case map + 0x0397: []rune{0x03B7}, // Case map + 0x0398: []rune{0x03B8}, // Case map + 0x0399: []rune{0x03B9}, // Case map + 0x039A: []rune{0x03BA}, // Case map + 0x039B: []rune{0x03BB}, // Case map + 0x039C: []rune{0x03BC}, // Case map + 0x039D: []rune{0x03BD}, // Case map + 0x039E: []rune{0x03BE}, // Case map + 0x039F: []rune{0x03BF}, // Case map + 0x03A0: []rune{0x03C0}, // Case map + 0x03A1: []rune{0x03C1}, // Case map + 0x03A3: []rune{0x03C3}, // Case map + 0x03A4: []rune{0x03C4}, // Case map + 0x03A5: []rune{0x03C5}, // Case map + 0x03A6: []rune{0x03C6}, // Case map + 0x03A7: []rune{0x03C7}, // Case map + 0x03A8: []rune{0x03C8}, // Case map + 0x03A9: []rune{0x03C9}, // Case map + 0x03AA: []rune{0x03CA}, // Case map + 0x03AB: []rune{0x03CB}, // Case map + 0x03B0: []rune{0x03C5, 0x0308, 0x0301}, // Case map + 0x03C2: []rune{0x03C3}, // Case map + 0x03D0: []rune{0x03B2}, // Case map + 0x03D1: []rune{0x03B8}, // Case map + 0x03D5: []rune{0x03C6}, // Case map + 0x03D6: []rune{0x03C0}, // Case map + 0x03D8: []rune{0x03D9}, // Case map + 0x03DA: []rune{0x03DB}, // Case map + 0x03DC: []rune{0x03DD}, // Case map + 0x03DE: []rune{0x03DF}, // Case map + 0x03E0: []rune{0x03E1}, // Case map + 0x03E2: []rune{0x03E3}, // Case map + 0x03E4: []rune{0x03E5}, // Case map + 0x03E6: []rune{0x03E7}, // Case map + 0x03E8: []rune{0x03E9}, // Case map + 0x03EA: []rune{0x03EB}, // Case map + 0x03EC: []rune{0x03ED}, // Case map + 0x03EE: []rune{0x03EF}, // Case map + 0x03F0: []rune{0x03BA}, // Case map + 0x03F1: []rune{0x03C1}, // Case map + 0x03F2: []rune{0x03C3}, // Case map + 0x03F4: []rune{0x03B8}, // Case map + 0x03F5: []rune{0x03B5}, // Case map + 0x0400: []rune{0x0450}, // Case map + 0x0401: []rune{0x0451}, // Case map + 0x0402: []rune{0x0452}, // Case map + 0x0403: []rune{0x0453}, // Case map + 0x0404: []rune{0x0454}, // Case map + 0x0405: []rune{0x0455}, // Case map + 0x0406: []rune{0x0456}, // Case map + 0x0407: []rune{0x0457}, // Case map + 0x0408: []rune{0x0458}, // Case map + 0x0409: []rune{0x0459}, // Case map + 0x040A: []rune{0x045A}, // Case map + 0x040B: []rune{0x045B}, // Case map + 0x040C: []rune{0x045C}, // Case map + 0x040D: []rune{0x045D}, // Case map + 0x040E: []rune{0x045E}, // Case map + 0x040F: []rune{0x045F}, // Case map + 0x0410: []rune{0x0430}, // Case map + 0x0411: []rune{0x0431}, // Case map + 0x0412: []rune{0x0432}, // Case map + 0x0413: []rune{0x0433}, // Case map + 0x0414: []rune{0x0434}, // Case map + 0x0415: []rune{0x0435}, // Case map + 0x0416: []rune{0x0436}, // Case map + 0x0417: []rune{0x0437}, // Case map + 0x0418: []rune{0x0438}, // Case map + 0x0419: []rune{0x0439}, // Case map + 0x041A: []rune{0x043A}, // Case map + 0x041B: []rune{0x043B}, // Case map + 0x041C: []rune{0x043C}, // Case map + 0x041D: []rune{0x043D}, // Case map + 0x041E: []rune{0x043E}, // Case map + 0x041F: []rune{0x043F}, // Case map + 0x0420: []rune{0x0440}, // Case map + 0x0421: []rune{0x0441}, // Case map + 0x0422: []rune{0x0442}, // Case map + 0x0423: []rune{0x0443}, // Case map + 0x0424: []rune{0x0444}, // Case map + 0x0425: []rune{0x0445}, // Case map + 0x0426: []rune{0x0446}, // Case map + 0x0427: []rune{0x0447}, // Case map + 0x0428: []rune{0x0448}, // Case map + 0x0429: []rune{0x0449}, // Case map + 0x042A: []rune{0x044A}, // Case map + 0x042B: []rune{0x044B}, // Case map + 0x042C: []rune{0x044C}, // Case map + 0x042D: []rune{0x044D}, // Case map + 0x042E: []rune{0x044E}, // Case map + 0x042F: []rune{0x044F}, // Case map + 0x0460: []rune{0x0461}, // Case map + 0x0462: []rune{0x0463}, // Case map + 0x0464: []rune{0x0465}, // Case map + 0x0466: []rune{0x0467}, // Case map + 0x0468: []rune{0x0469}, // Case map + 0x046A: []rune{0x046B}, // Case map + 0x046C: []rune{0x046D}, // Case map + 0x046E: []rune{0x046F}, // Case map + 0x0470: []rune{0x0471}, // Case map + 0x0472: []rune{0x0473}, // Case map + 0x0474: []rune{0x0475}, // Case map + 0x0476: []rune{0x0477}, // Case map + 0x0478: []rune{0x0479}, // Case map + 0x047A: []rune{0x047B}, // Case map + 0x047C: []rune{0x047D}, // Case map + 0x047E: []rune{0x047F}, // Case map + 0x0480: []rune{0x0481}, // Case map + 0x048A: []rune{0x048B}, // Case map + 0x048C: []rune{0x048D}, // Case map + 0x048E: []rune{0x048F}, // Case map + 0x0490: []rune{0x0491}, // Case map + 0x0492: []rune{0x0493}, // Case map + 0x0494: []rune{0x0495}, // Case map + 0x0496: []rune{0x0497}, // Case map + 0x0498: []rune{0x0499}, // Case map + 0x049A: []rune{0x049B}, // Case map + 0x049C: []rune{0x049D}, // Case map + 0x049E: []rune{0x049F}, // Case map + 0x04A0: []rune{0x04A1}, // Case map + 0x04A2: []rune{0x04A3}, // Case map + 0x04A4: []rune{0x04A5}, // Case map + 0x04A6: []rune{0x04A7}, // Case map + 0x04A8: []rune{0x04A9}, // Case map + 0x04AA: []rune{0x04AB}, // Case map + 0x04AC: []rune{0x04AD}, // Case map + 0x04AE: []rune{0x04AF}, // Case map + 0x04B0: []rune{0x04B1}, // Case map + 0x04B2: []rune{0x04B3}, // Case map + 0x04B4: []rune{0x04B5}, // Case map + 0x04B6: []rune{0x04B7}, // Case map + 0x04B8: []rune{0x04B9}, // Case map + 0x04BA: []rune{0x04BB}, // Case map + 0x04BC: []rune{0x04BD}, // Case map + 0x04BE: []rune{0x04BF}, // Case map + 0x04C1: []rune{0x04C2}, // Case map + 0x04C3: []rune{0x04C4}, // Case map + 0x04C5: []rune{0x04C6}, // Case map + 0x04C7: []rune{0x04C8}, // Case map + 0x04C9: []rune{0x04CA}, // Case map + 0x04CB: []rune{0x04CC}, // Case map + 0x04CD: []rune{0x04CE}, // Case map + 0x04D0: []rune{0x04D1}, // Case map + 0x04D2: []rune{0x04D3}, // Case map + 0x04D4: []rune{0x04D5}, // Case map + 0x04D6: []rune{0x04D7}, // Case map + 0x04D8: []rune{0x04D9}, // Case map + 0x04DA: []rune{0x04DB}, // Case map + 0x04DC: []rune{0x04DD}, // Case map + 0x04DE: []rune{0x04DF}, // Case map + 0x04E0: []rune{0x04E1}, // Case map + 0x04E2: []rune{0x04E3}, // Case map + 0x04E4: []rune{0x04E5}, // Case map + 0x04E6: []rune{0x04E7}, // Case map + 0x04E8: []rune{0x04E9}, // Case map + 0x04EA: []rune{0x04EB}, // Case map + 0x04EC: []rune{0x04ED}, // Case map + 0x04EE: []rune{0x04EF}, // Case map + 0x04F0: []rune{0x04F1}, // Case map + 0x04F2: []rune{0x04F3}, // Case map + 0x04F4: []rune{0x04F5}, // Case map + 0x04F8: []rune{0x04F9}, // Case map + 0x0500: []rune{0x0501}, // Case map + 0x0502: []rune{0x0503}, // Case map + 0x0504: []rune{0x0505}, // Case map + 0x0506: []rune{0x0507}, // Case map + 0x0508: []rune{0x0509}, // Case map + 0x050A: []rune{0x050B}, // Case map + 0x050C: []rune{0x050D}, // Case map + 0x050E: []rune{0x050F}, // Case map + 0x0531: []rune{0x0561}, // Case map + 0x0532: []rune{0x0562}, // Case map + 0x0533: []rune{0x0563}, // Case map + 0x0534: []rune{0x0564}, // Case map + 0x0535: []rune{0x0565}, // Case map + 0x0536: []rune{0x0566}, // Case map + 0x0537: []rune{0x0567}, // Case map + 0x0538: []rune{0x0568}, // Case map + 0x0539: []rune{0x0569}, // Case map + 0x053A: []rune{0x056A}, // Case map + 0x053B: []rune{0x056B}, // Case map + 0x053C: []rune{0x056C}, // Case map + 0x053D: []rune{0x056D}, // Case map + 0x053E: []rune{0x056E}, // Case map + 0x053F: []rune{0x056F}, // Case map + 0x0540: []rune{0x0570}, // Case map + 0x0541: []rune{0x0571}, // Case map + 0x0542: []rune{0x0572}, // Case map + 0x0543: []rune{0x0573}, // Case map + 0x0544: []rune{0x0574}, // Case map + 0x0545: []rune{0x0575}, // Case map + 0x0546: []rune{0x0576}, // Case map + 0x0547: []rune{0x0577}, // Case map + 0x0548: []rune{0x0578}, // Case map + 0x0549: []rune{0x0579}, // Case map + 0x054A: []rune{0x057A}, // Case map + 0x054B: []rune{0x057B}, // Case map + 0x054C: []rune{0x057C}, // Case map + 0x054D: []rune{0x057D}, // Case map + 0x054E: []rune{0x057E}, // Case map + 0x054F: []rune{0x057F}, // Case map + 0x0550: []rune{0x0580}, // Case map + 0x0551: []rune{0x0581}, // Case map + 0x0552: []rune{0x0582}, // Case map + 0x0553: []rune{0x0583}, // Case map + 0x0554: []rune{0x0584}, // Case map + 0x0555: []rune{0x0585}, // Case map + 0x0556: []rune{0x0586}, // Case map + 0x0587: []rune{0x0565, 0x0582}, // Case map + 0x1E00: []rune{0x1E01}, // Case map + 0x1E02: []rune{0x1E03}, // Case map + 0x1E04: []rune{0x1E05}, // Case map + 0x1E06: []rune{0x1E07}, // Case map + 0x1E08: []rune{0x1E09}, // Case map + 0x1E0A: []rune{0x1E0B}, // Case map + 0x1E0C: []rune{0x1E0D}, // Case map + 0x1E0E: []rune{0x1E0F}, // Case map + 0x1E10: []rune{0x1E11}, // Case map + 0x1E12: []rune{0x1E13}, // Case map + 0x1E14: []rune{0x1E15}, // Case map + 0x1E16: []rune{0x1E17}, // Case map + 0x1E18: []rune{0x1E19}, // Case map + 0x1E1A: []rune{0x1E1B}, // Case map + 0x1E1C: []rune{0x1E1D}, // Case map + 0x1E1E: []rune{0x1E1F}, // Case map + 0x1E20: []rune{0x1E21}, // Case map + 0x1E22: []rune{0x1E23}, // Case map + 0x1E24: []rune{0x1E25}, // Case map + 0x1E26: []rune{0x1E27}, // Case map + 0x1E28: []rune{0x1E29}, // Case map + 0x1E2A: []rune{0x1E2B}, // Case map + 0x1E2C: []rune{0x1E2D}, // Case map + 0x1E2E: []rune{0x1E2F}, // Case map + 0x1E30: []rune{0x1E31}, // Case map + 0x1E32: []rune{0x1E33}, // Case map + 0x1E34: []rune{0x1E35}, // Case map + 0x1E36: []rune{0x1E37}, // Case map + 0x1E38: []rune{0x1E39}, // Case map + 0x1E3A: []rune{0x1E3B}, // Case map + 0x1E3C: []rune{0x1E3D}, // Case map + 0x1E3E: []rune{0x1E3F}, // Case map + 0x1E40: []rune{0x1E41}, // Case map + 0x1E42: []rune{0x1E43}, // Case map + 0x1E44: []rune{0x1E45}, // Case map + 0x1E46: []rune{0x1E47}, // Case map + 0x1E48: []rune{0x1E49}, // Case map + 0x1E4A: []rune{0x1E4B}, // Case map + 0x1E4C: []rune{0x1E4D}, // Case map + 0x1E4E: []rune{0x1E4F}, // Case map + 0x1E50: []rune{0x1E51}, // Case map + 0x1E52: []rune{0x1E53}, // Case map + 0x1E54: []rune{0x1E55}, // Case map + 0x1E56: []rune{0x1E57}, // Case map + 0x1E58: []rune{0x1E59}, // Case map + 0x1E5A: []rune{0x1E5B}, // Case map + 0x1E5C: []rune{0x1E5D}, // Case map + 0x1E5E: []rune{0x1E5F}, // Case map + 0x1E60: []rune{0x1E61}, // Case map + 0x1E62: []rune{0x1E63}, // Case map + 0x1E64: []rune{0x1E65}, // Case map + 0x1E66: []rune{0x1E67}, // Case map + 0x1E68: []rune{0x1E69}, // Case map + 0x1E6A: []rune{0x1E6B}, // Case map + 0x1E6C: []rune{0x1E6D}, // Case map + 0x1E6E: []rune{0x1E6F}, // Case map + 0x1E70: []rune{0x1E71}, // Case map + 0x1E72: []rune{0x1E73}, // Case map + 0x1E74: []rune{0x1E75}, // Case map + 0x1E76: []rune{0x1E77}, // Case map + 0x1E78: []rune{0x1E79}, // Case map + 0x1E7A: []rune{0x1E7B}, // Case map + 0x1E7C: []rune{0x1E7D}, // Case map + 0x1E7E: []rune{0x1E7F}, // Case map + 0x1E80: []rune{0x1E81}, // Case map + 0x1E82: []rune{0x1E83}, // Case map + 0x1E84: []rune{0x1E85}, // Case map + 0x1E86: []rune{0x1E87}, // Case map + 0x1E88: []rune{0x1E89}, // Case map + 0x1E8A: []rune{0x1E8B}, // Case map + 0x1E8C: []rune{0x1E8D}, // Case map + 0x1E8E: []rune{0x1E8F}, // Case map + 0x1E90: []rune{0x1E91}, // Case map + 0x1E92: []rune{0x1E93}, // Case map + 0x1E94: []rune{0x1E95}, // Case map + 0x1E96: []rune{0x0068, 0x0331}, // Case map + 0x1E97: []rune{0x0074, 0x0308}, // Case map + 0x1E98: []rune{0x0077, 0x030A}, // Case map + 0x1E99: []rune{0x0079, 0x030A}, // Case map + 0x1E9A: []rune{0x0061, 0x02BE}, // Case map + 0x1E9B: []rune{0x1E61}, // Case map + 0x1EA0: []rune{0x1EA1}, // Case map + 0x1EA2: []rune{0x1EA3}, // Case map + 0x1EA4: []rune{0x1EA5}, // Case map + 0x1EA6: []rune{0x1EA7}, // Case map + 0x1EA8: []rune{0x1EA9}, // Case map + 0x1EAA: []rune{0x1EAB}, // Case map + 0x1EAC: []rune{0x1EAD}, // Case map + 0x1EAE: []rune{0x1EAF}, // Case map + 0x1EB0: []rune{0x1EB1}, // Case map + 0x1EB2: []rune{0x1EB3}, // Case map + 0x1EB4: []rune{0x1EB5}, // Case map + 0x1EB6: []rune{0x1EB7}, // Case map + 0x1EB8: []rune{0x1EB9}, // Case map + 0x1EBA: []rune{0x1EBB}, // Case map + 0x1EBC: []rune{0x1EBD}, // Case map + 0x1EBE: []rune{0x1EBF}, // Case map + 0x1EC0: []rune{0x1EC1}, // Case map + 0x1EC2: []rune{0x1EC3}, // Case map + 0x1EC4: []rune{0x1EC5}, // Case map + 0x1EC6: []rune{0x1EC7}, // Case map + 0x1EC8: []rune{0x1EC9}, // Case map + 0x1ECA: []rune{0x1ECB}, // Case map + 0x1ECC: []rune{0x1ECD}, // Case map + 0x1ECE: []rune{0x1ECF}, // Case map + 0x1ED0: []rune{0x1ED1}, // Case map + 0x1ED2: []rune{0x1ED3}, // Case map + 0x1ED4: []rune{0x1ED5}, // Case map + 0x1ED6: []rune{0x1ED7}, // Case map + 0x1ED8: []rune{0x1ED9}, // Case map + 0x1EDA: []rune{0x1EDB}, // Case map + 0x1EDC: []rune{0x1EDD}, // Case map + 0x1EDE: []rune{0x1EDF}, // Case map + 0x1EE0: []rune{0x1EE1}, // Case map + 0x1EE2: []rune{0x1EE3}, // Case map + 0x1EE4: []rune{0x1EE5}, // Case map + 0x1EE6: []rune{0x1EE7}, // Case map + 0x1EE8: []rune{0x1EE9}, // Case map + 0x1EEA: []rune{0x1EEB}, // Case map + 0x1EEC: []rune{0x1EED}, // Case map + 0x1EEE: []rune{0x1EEF}, // Case map + 0x1EF0: []rune{0x1EF1}, // Case map + 0x1EF2: []rune{0x1EF3}, // Case map + 0x1EF4: []rune{0x1EF5}, // Case map + 0x1EF6: []rune{0x1EF7}, // Case map + 0x1EF8: []rune{0x1EF9}, // Case map + 0x1F08: []rune{0x1F00}, // Case map + 0x1F09: []rune{0x1F01}, // Case map + 0x1F0A: []rune{0x1F02}, // Case map + 0x1F0B: []rune{0x1F03}, // Case map + 0x1F0C: []rune{0x1F04}, // Case map + 0x1F0D: []rune{0x1F05}, // Case map + 0x1F0E: []rune{0x1F06}, // Case map + 0x1F0F: []rune{0x1F07}, // Case map + 0x1F18: []rune{0x1F10}, // Case map + 0x1F19: []rune{0x1F11}, // Case map + 0x1F1A: []rune{0x1F12}, // Case map + 0x1F1B: []rune{0x1F13}, // Case map + 0x1F1C: []rune{0x1F14}, // Case map + 0x1F1D: []rune{0x1F15}, // Case map + 0x1F28: []rune{0x1F20}, // Case map + 0x1F29: []rune{0x1F21}, // Case map + 0x1F2A: []rune{0x1F22}, // Case map + 0x1F2B: []rune{0x1F23}, // Case map + 0x1F2C: []rune{0x1F24}, // Case map + 0x1F2D: []rune{0x1F25}, // Case map + 0x1F2E: []rune{0x1F26}, // Case map + 0x1F2F: []rune{0x1F27}, // Case map + 0x1F38: []rune{0x1F30}, // Case map + 0x1F39: []rune{0x1F31}, // Case map + 0x1F3A: []rune{0x1F32}, // Case map + 0x1F3B: []rune{0x1F33}, // Case map + 0x1F3C: []rune{0x1F34}, // Case map + 0x1F3D: []rune{0x1F35}, // Case map + 0x1F3E: []rune{0x1F36}, // Case map + 0x1F3F: []rune{0x1F37}, // Case map + 0x1F48: []rune{0x1F40}, // Case map + 0x1F49: []rune{0x1F41}, // Case map + 0x1F4A: []rune{0x1F42}, // Case map + 0x1F4B: []rune{0x1F43}, // Case map + 0x1F4C: []rune{0x1F44}, // Case map + 0x1F4D: []rune{0x1F45}, // Case map + 0x1F50: []rune{0x03C5, 0x0313}, // Case map + 0x1F52: []rune{0x03C5, 0x0313, 0x0300}, // Case map + 0x1F54: []rune{0x03C5, 0x0313, 0x0301}, // Case map + 0x1F56: []rune{0x03C5, 0x0313, 0x0342}, // Case map + 0x1F59: []rune{0x1F51}, // Case map + 0x1F5B: []rune{0x1F53}, // Case map + 0x1F5D: []rune{0x1F55}, // Case map + 0x1F5F: []rune{0x1F57}, // Case map + 0x1F68: []rune{0x1F60}, // Case map + 0x1F69: []rune{0x1F61}, // Case map + 0x1F6A: []rune{0x1F62}, // Case map + 0x1F6B: []rune{0x1F63}, // Case map + 0x1F6C: []rune{0x1F64}, // Case map + 0x1F6D: []rune{0x1F65}, // Case map + 0x1F6E: []rune{0x1F66}, // Case map + 0x1F6F: []rune{0x1F67}, // Case map + 0x1F80: []rune{0x1F00, 0x03B9}, // Case map + 0x1F81: []rune{0x1F01, 0x03B9}, // Case map + 0x1F82: []rune{0x1F02, 0x03B9}, // Case map + 0x1F83: []rune{0x1F03, 0x03B9}, // Case map + 0x1F84: []rune{0x1F04, 0x03B9}, // Case map + 0x1F85: []rune{0x1F05, 0x03B9}, // Case map + 0x1F86: []rune{0x1F06, 0x03B9}, // Case map + 0x1F87: []rune{0x1F07, 0x03B9}, // Case map + 0x1F88: []rune{0x1F00, 0x03B9}, // Case map + 0x1F89: []rune{0x1F01, 0x03B9}, // Case map + 0x1F8A: []rune{0x1F02, 0x03B9}, // Case map + 0x1F8B: []rune{0x1F03, 0x03B9}, // Case map + 0x1F8C: []rune{0x1F04, 0x03B9}, // Case map + 0x1F8D: []rune{0x1F05, 0x03B9}, // Case map + 0x1F8E: []rune{0x1F06, 0x03B9}, // Case map + 0x1F8F: []rune{0x1F07, 0x03B9}, // Case map + 0x1F90: []rune{0x1F20, 0x03B9}, // Case map + 0x1F91: []rune{0x1F21, 0x03B9}, // Case map + 0x1F92: []rune{0x1F22, 0x03B9}, // Case map + 0x1F93: []rune{0x1F23, 0x03B9}, // Case map + 0x1F94: []rune{0x1F24, 0x03B9}, // Case map + 0x1F95: []rune{0x1F25, 0x03B9}, // Case map + 0x1F96: []rune{0x1F26, 0x03B9}, // Case map + 0x1F97: []rune{0x1F27, 0x03B9}, // Case map + 0x1F98: []rune{0x1F20, 0x03B9}, // Case map + 0x1F99: []rune{0x1F21, 0x03B9}, // Case map + 0x1F9A: []rune{0x1F22, 0x03B9}, // Case map + 0x1F9B: []rune{0x1F23, 0x03B9}, // Case map + 0x1F9C: []rune{0x1F24, 0x03B9}, // Case map + 0x1F9D: []rune{0x1F25, 0x03B9}, // Case map + 0x1F9E: []rune{0x1F26, 0x03B9}, // Case map + 0x1F9F: []rune{0x1F27, 0x03B9}, // Case map + 0x1FA0: []rune{0x1F60, 0x03B9}, // Case map + 0x1FA1: []rune{0x1F61, 0x03B9}, // Case map + 0x1FA2: []rune{0x1F62, 0x03B9}, // Case map + 0x1FA3: []rune{0x1F63, 0x03B9}, // Case map + 0x1FA4: []rune{0x1F64, 0x03B9}, // Case map + 0x1FA5: []rune{0x1F65, 0x03B9}, // Case map + 0x1FA6: []rune{0x1F66, 0x03B9}, // Case map + 0x1FA7: []rune{0x1F67, 0x03B9}, // Case map + 0x1FA8: []rune{0x1F60, 0x03B9}, // Case map + 0x1FA9: []rune{0x1F61, 0x03B9}, // Case map + 0x1FAA: []rune{0x1F62, 0x03B9}, // Case map + 0x1FAB: []rune{0x1F63, 0x03B9}, // Case map + 0x1FAC: []rune{0x1F64, 0x03B9}, // Case map + 0x1FAD: []rune{0x1F65, 0x03B9}, // Case map + 0x1FAE: []rune{0x1F66, 0x03B9}, // Case map + 0x1FAF: []rune{0x1F67, 0x03B9}, // Case map + 0x1FB2: []rune{0x1F70, 0x03B9}, // Case map + 0x1FB3: []rune{0x03B1, 0x03B9}, // Case map + 0x1FB4: []rune{0x03AC, 0x03B9}, // Case map + 0x1FB6: []rune{0x03B1, 0x0342}, // Case map + 0x1FB7: []rune{0x03B1, 0x0342, 0x03B9}, // Case map + 0x1FB8: []rune{0x1FB0}, // Case map + 0x1FB9: []rune{0x1FB1}, // Case map + 0x1FBA: []rune{0x1F70}, // Case map + 0x1FBB: []rune{0x1F71}, // Case map + 0x1FBC: []rune{0x03B1, 0x03B9}, // Case map + 0x1FBE: []rune{0x03B9}, // Case map + 0x1FC2: []rune{0x1F74, 0x03B9}, // Case map + 0x1FC3: []rune{0x03B7, 0x03B9}, // Case map + 0x1FC4: []rune{0x03AE, 0x03B9}, // Case map + 0x1FC6: []rune{0x03B7, 0x0342}, // Case map + 0x1FC7: []rune{0x03B7, 0x0342, 0x03B9}, // Case map + 0x1FC8: []rune{0x1F72}, // Case map + 0x1FC9: []rune{0x1F73}, // Case map + 0x1FCA: []rune{0x1F74}, // Case map + 0x1FCB: []rune{0x1F75}, // Case map + 0x1FCC: []rune{0x03B7, 0x03B9}, // Case map + 0x1FD2: []rune{0x03B9, 0x0308, 0x0300}, // Case map + 0x1FD3: []rune{0x03B9, 0x0308, 0x0301}, // Case map + 0x1FD6: []rune{0x03B9, 0x0342}, // Case map + 0x1FD7: []rune{0x03B9, 0x0308, 0x0342}, // Case map + 0x1FD8: []rune{0x1FD0}, // Case map + 0x1FD9: []rune{0x1FD1}, // Case map + 0x1FDA: []rune{0x1F76}, // Case map + 0x1FDB: []rune{0x1F77}, // Case map + 0x1FE2: []rune{0x03C5, 0x0308, 0x0300}, // Case map + 0x1FE3: []rune{0x03C5, 0x0308, 0x0301}, // Case map + 0x1FE4: []rune{0x03C1, 0x0313}, // Case map + 0x1FE6: []rune{0x03C5, 0x0342}, // Case map + 0x1FE7: []rune{0x03C5, 0x0308, 0x0342}, // Case map + 0x1FE8: []rune{0x1FE0}, // Case map + 0x1FE9: []rune{0x1FE1}, // Case map + 0x1FEA: []rune{0x1F7A}, // Case map + 0x1FEB: []rune{0x1F7B}, // Case map + 0x1FEC: []rune{0x1FE5}, // Case map + 0x1FF2: []rune{0x1F7C, 0x03B9}, // Case map + 0x1FF3: []rune{0x03C9, 0x03B9}, // Case map + 0x1FF4: []rune{0x03CE, 0x03B9}, // Case map + 0x1FF6: []rune{0x03C9, 0x0342}, // Case map + 0x1FF7: []rune{0x03C9, 0x0342, 0x03B9}, // Case map + 0x1FF8: []rune{0x1F78}, // Case map + 0x1FF9: []rune{0x1F79}, // Case map + 0x1FFA: []rune{0x1F7C}, // Case map + 0x1FFB: []rune{0x1F7D}, // Case map + 0x1FFC: []rune{0x03C9, 0x03B9}, // Case map + 0x2126: []rune{0x03C9}, // Case map + 0x212A: []rune{0x006B}, // Case map + 0x212B: []rune{0x00E5}, // Case map + 0x2160: []rune{0x2170}, // Case map + 0x2161: []rune{0x2171}, // Case map + 0x2162: []rune{0x2172}, // Case map + 0x2163: []rune{0x2173}, // Case map + 0x2164: []rune{0x2174}, // Case map + 0x2165: []rune{0x2175}, // Case map + 0x2166: []rune{0x2176}, // Case map + 0x2167: []rune{0x2177}, // Case map + 0x2168: []rune{0x2178}, // Case map + 0x2169: []rune{0x2179}, // Case map + 0x216A: []rune{0x217A}, // Case map + 0x216B: []rune{0x217B}, // Case map + 0x216C: []rune{0x217C}, // Case map + 0x216D: []rune{0x217D}, // Case map + 0x216E: []rune{0x217E}, // Case map + 0x216F: []rune{0x217F}, // Case map + 0x24B6: []rune{0x24D0}, // Case map + 0x24B7: []rune{0x24D1}, // Case map + 0x24B8: []rune{0x24D2}, // Case map + 0x24B9: []rune{0x24D3}, // Case map + 0x24BA: []rune{0x24D4}, // Case map + 0x24BB: []rune{0x24D5}, // Case map + 0x24BC: []rune{0x24D6}, // Case map + 0x24BD: []rune{0x24D7}, // Case map + 0x24BE: []rune{0x24D8}, // Case map + 0x24BF: []rune{0x24D9}, // Case map + 0x24C0: []rune{0x24DA}, // Case map + 0x24C1: []rune{0x24DB}, // Case map + 0x24C2: []rune{0x24DC}, // Case map + 0x24C3: []rune{0x24DD}, // Case map + 0x24C4: []rune{0x24DE}, // Case map + 0x24C5: []rune{0x24DF}, // Case map + 0x24C6: []rune{0x24E0}, // Case map + 0x24C7: []rune{0x24E1}, // Case map + 0x24C8: []rune{0x24E2}, // Case map + 0x24C9: []rune{0x24E3}, // Case map + 0x24CA: []rune{0x24E4}, // Case map + 0x24CB: []rune{0x24E5}, // Case map + 0x24CC: []rune{0x24E6}, // Case map + 0x24CD: []rune{0x24E7}, // Case map + 0x24CE: []rune{0x24E8}, // Case map + 0x24CF: []rune{0x24E9}, // Case map + 0xFB00: []rune{0x0066, 0x0066}, // Case map + 0xFB01: []rune{0x0066, 0x0069}, // Case map + 0xFB02: []rune{0x0066, 0x006C}, // Case map + 0xFB03: []rune{0x0066, 0x0066, 0x0069}, // Case map + 0xFB04: []rune{0x0066, 0x0066, 0x006C}, // Case map + 0xFB05: []rune{0x0073, 0x0074}, // Case map + 0xFB06: []rune{0x0073, 0x0074}, // Case map + 0xFB13: []rune{0x0574, 0x0576}, // Case map + 0xFB14: []rune{0x0574, 0x0565}, // Case map + 0xFB15: []rune{0x0574, 0x056B}, // Case map + 0xFB16: []rune{0x057E, 0x0576}, // Case map + 0xFB17: []rune{0x0574, 0x056D}, // Case map + 0xFF21: []rune{0xFF41}, // Case map + 0xFF22: []rune{0xFF42}, // Case map + 0xFF23: []rune{0xFF43}, // Case map + 0xFF24: []rune{0xFF44}, // Case map + 0xFF25: []rune{0xFF45}, // Case map + 0xFF26: []rune{0xFF46}, // Case map + 0xFF27: []rune{0xFF47}, // Case map + 0xFF28: []rune{0xFF48}, // Case map + 0xFF29: []rune{0xFF49}, // Case map + 0xFF2A: []rune{0xFF4A}, // Case map + 0xFF2B: []rune{0xFF4B}, // Case map + 0xFF2C: []rune{0xFF4C}, // Case map + 0xFF2D: []rune{0xFF4D}, // Case map + 0xFF2E: []rune{0xFF4E}, // Case map + 0xFF2F: []rune{0xFF4F}, // Case map + 0xFF30: []rune{0xFF50}, // Case map + 0xFF31: []rune{0xFF51}, // Case map + 0xFF32: []rune{0xFF52}, // Case map + 0xFF33: []rune{0xFF53}, // Case map + 0xFF34: []rune{0xFF54}, // Case map + 0xFF35: []rune{0xFF55}, // Case map + 0xFF36: []rune{0xFF56}, // Case map + 0xFF37: []rune{0xFF57}, // Case map + 0xFF38: []rune{0xFF58}, // Case map + 0xFF39: []rune{0xFF59}, // Case map + 0xFF3A: []rune{0xFF5A}, // Case map + 0x10400: []rune{0x10428}, // Case map + 0x10401: []rune{0x10429}, // Case map + 0x10402: []rune{0x1042A}, // Case map + 0x10403: []rune{0x1042B}, // Case map + 0x10404: []rune{0x1042C}, // Case map + 0x10405: []rune{0x1042D}, // Case map + 0x10406: []rune{0x1042E}, // Case map + 0x10407: []rune{0x1042F}, // Case map + 0x10408: []rune{0x10430}, // Case map + 0x10409: []rune{0x10431}, // Case map + 0x1040A: []rune{0x10432}, // Case map + 0x1040B: []rune{0x10433}, // Case map + 0x1040C: []rune{0x10434}, // Case map + 0x1040D: []rune{0x10435}, // Case map + 0x1040E: []rune{0x10436}, // Case map + 0x1040F: []rune{0x10437}, // Case map + 0x10410: []rune{0x10438}, // Case map + 0x10411: []rune{0x10439}, // Case map + 0x10412: []rune{0x1043A}, // Case map + 0x10413: []rune{0x1043B}, // Case map + 0x10414: []rune{0x1043C}, // Case map + 0x10415: []rune{0x1043D}, // Case map + 0x10416: []rune{0x1043E}, // Case map + 0x10417: []rune{0x1043F}, // Case map + 0x10418: []rune{0x10440}, // Case map + 0x10419: []rune{0x10441}, // Case map + 0x1041A: []rune{0x10442}, // Case map + 0x1041B: []rune{0x10443}, // Case map + 0x1041C: []rune{0x10444}, // Case map + 0x1041D: []rune{0x10445}, // Case map + 0x1041E: []rune{0x10446}, // Case map + 0x1041F: []rune{0x10447}, // Case map + 0x10420: []rune{0x10448}, // Case map + 0x10421: []rune{0x10449}, // Case map + 0x10422: []rune{0x1044A}, // Case map + 0x10423: []rune{0x1044B}, // Case map + 0x10424: []rune{0x1044C}, // Case map + 0x10425: []rune{0x1044D}, // Case map +} + +// TableB3 represents RFC-3454 Table B.3. +var TableB3 Mapping = tableB3 + +var tableC1_1 = Set{ + RuneRange{0x0020, 0x0020}, // SPACE +} + +// TableC1_1 represents RFC-3454 Table C.1.1. +var TableC1_1 Set = tableC1_1 + +var tableC1_2 = Set{ + RuneRange{0x00A0, 0x00A0}, // NO-BREAK SPACE + RuneRange{0x1680, 0x1680}, // OGHAM SPACE MARK + RuneRange{0x2000, 0x2000}, // EN QUAD + RuneRange{0x2001, 0x2001}, // EM QUAD + RuneRange{0x2002, 0x2002}, // EN SPACE + RuneRange{0x2003, 0x2003}, // EM SPACE + RuneRange{0x2004, 0x2004}, // THREE-PER-EM SPACE + RuneRange{0x2005, 0x2005}, // FOUR-PER-EM SPACE + RuneRange{0x2006, 0x2006}, // SIX-PER-EM SPACE + RuneRange{0x2007, 0x2007}, // FIGURE SPACE + RuneRange{0x2008, 0x2008}, // PUNCTUATION SPACE + RuneRange{0x2009, 0x2009}, // THIN SPACE + RuneRange{0x200A, 0x200A}, // HAIR SPACE + RuneRange{0x200B, 0x200B}, // ZERO WIDTH SPACE + RuneRange{0x202F, 0x202F}, // NARROW NO-BREAK SPACE + RuneRange{0x205F, 0x205F}, // MEDIUM MATHEMATICAL SPACE + RuneRange{0x3000, 0x3000}, // IDEOGRAPHIC SPACE +} + +// TableC1_2 represents RFC-3454 Table C.1.2. +var TableC1_2 Set = tableC1_2 + +var tableC2_1 = Set{ + RuneRange{0x0000, 0x001F}, // [CONTROL CHARACTERS] + RuneRange{0x007F, 0x007F}, // DELETE +} + +// TableC2_1 represents RFC-3454 Table C.2.1. +var TableC2_1 Set = tableC2_1 + +var tableC2_2 = Set{ + RuneRange{0x0080, 0x009F}, // [CONTROL CHARACTERS] + RuneRange{0x06DD, 0x06DD}, // ARABIC END OF AYAH + RuneRange{0x070F, 0x070F}, // SYRIAC ABBREVIATION MARK + RuneRange{0x180E, 0x180E}, // MONGOLIAN VOWEL SEPARATOR + RuneRange{0x200C, 0x200C}, // ZERO WIDTH NON-JOINER + RuneRange{0x200D, 0x200D}, // ZERO WIDTH JOINER + RuneRange{0x2028, 0x2028}, // LINE SEPARATOR + RuneRange{0x2029, 0x2029}, // PARAGRAPH SEPARATOR + RuneRange{0x2060, 0x2060}, // WORD JOINER + RuneRange{0x2061, 0x2061}, // FUNCTION APPLICATION + RuneRange{0x2062, 0x2062}, // INVISIBLE TIMES + RuneRange{0x2063, 0x2063}, // INVISIBLE SEPARATOR + RuneRange{0x206A, 0x206F}, // [CONTROL CHARACTERS] + RuneRange{0xFEFF, 0xFEFF}, // ZERO WIDTH NO-BREAK SPACE + RuneRange{0xFFF9, 0xFFFC}, // [CONTROL CHARACTERS] + RuneRange{0x1D173, 0x1D17A}, // [MUSICAL CONTROL CHARACTERS] +} + +// TableC2_2 represents RFC-3454 Table C.2.2. +var TableC2_2 Set = tableC2_2 + +var tableC3 = Set{ + RuneRange{0xE000, 0xF8FF}, // [PRIVATE USE, PLANE 0] + RuneRange{0xF0000, 0xFFFFD}, // [PRIVATE USE, PLANE 15] + RuneRange{0x100000, 0x10FFFD}, // [PRIVATE USE, PLANE 16] +} + +// TableC3 represents RFC-3454 Table C.3. +var TableC3 Set = tableC3 + +var tableC4 = Set{ + RuneRange{0xFDD0, 0xFDEF}, // [NONCHARACTER CODE POINTS] + RuneRange{0xFFFE, 0xFFFF}, // [NONCHARACTER CODE POINTS] + RuneRange{0x1FFFE, 0x1FFFF}, // [NONCHARACTER CODE POINTS] + RuneRange{0x2FFFE, 0x2FFFF}, // [NONCHARACTER CODE POINTS] + RuneRange{0x3FFFE, 0x3FFFF}, // [NONCHARACTER CODE POINTS] + RuneRange{0x4FFFE, 0x4FFFF}, // [NONCHARACTER CODE POINTS] + RuneRange{0x5FFFE, 0x5FFFF}, // [NONCHARACTER CODE POINTS] + RuneRange{0x6FFFE, 0x6FFFF}, // [NONCHARACTER CODE POINTS] + RuneRange{0x7FFFE, 0x7FFFF}, // [NONCHARACTER CODE POINTS] + RuneRange{0x8FFFE, 0x8FFFF}, // [NONCHARACTER CODE POINTS] + RuneRange{0x9FFFE, 0x9FFFF}, // [NONCHARACTER CODE POINTS] + RuneRange{0xAFFFE, 0xAFFFF}, // [NONCHARACTER CODE POINTS] + RuneRange{0xBFFFE, 0xBFFFF}, // [NONCHARACTER CODE POINTS] + RuneRange{0xCFFFE, 0xCFFFF}, // [NONCHARACTER CODE POINTS] + RuneRange{0xDFFFE, 0xDFFFF}, // [NONCHARACTER CODE POINTS] + RuneRange{0xEFFFE, 0xEFFFF}, // [NONCHARACTER CODE POINTS] + RuneRange{0xFFFFE, 0xFFFFF}, // [NONCHARACTER CODE POINTS] + RuneRange{0x10FFFE, 0x10FFFF}, // [NONCHARACTER CODE POINTS] +} + +// TableC4 represents RFC-3454 Table C.4. +var TableC4 Set = tableC4 + +var tableC5 = Set{ + RuneRange{0xD800, 0xDFFF}, // [SURROGATE CODES] +} + +// TableC5 represents RFC-3454 Table C.5. +var TableC5 Set = tableC5 + +var tableC6 = Set{ + RuneRange{0xFFF9, 0xFFF9}, // INTERLINEAR ANNOTATION ANCHOR + RuneRange{0xFFFA, 0xFFFA}, // INTERLINEAR ANNOTATION SEPARATOR + RuneRange{0xFFFB, 0xFFFB}, // INTERLINEAR ANNOTATION TERMINATOR + RuneRange{0xFFFC, 0xFFFC}, // OBJECT REPLACEMENT CHARACTER + RuneRange{0xFFFD, 0xFFFD}, // REPLACEMENT CHARACTER +} + +// TableC6 represents RFC-3454 Table C.6. +var TableC6 Set = tableC6 + +var tableC7 = Set{ + RuneRange{0x2FF0, 0x2FFB}, // [IDEOGRAPHIC DESCRIPTION CHARACTERS] +} + +// TableC7 represents RFC-3454 Table C.7. +var TableC7 Set = tableC7 + +var tableC8 = Set{ + RuneRange{0x0340, 0x0340}, // COMBINING GRAVE TONE MARK + RuneRange{0x0341, 0x0341}, // COMBINING ACUTE TONE MARK + RuneRange{0x200E, 0x200E}, // LEFT-TO-RIGHT MARK + RuneRange{0x200F, 0x200F}, // RIGHT-TO-LEFT MARK + RuneRange{0x202A, 0x202A}, // LEFT-TO-RIGHT EMBEDDING + RuneRange{0x202B, 0x202B}, // RIGHT-TO-LEFT EMBEDDING + RuneRange{0x202C, 0x202C}, // POP DIRECTIONAL FORMATTING + RuneRange{0x202D, 0x202D}, // LEFT-TO-RIGHT OVERRIDE + RuneRange{0x202E, 0x202E}, // RIGHT-TO-LEFT OVERRIDE + RuneRange{0x206A, 0x206A}, // INHIBIT SYMMETRIC SWAPPING + RuneRange{0x206B, 0x206B}, // ACTIVATE SYMMETRIC SWAPPING + RuneRange{0x206C, 0x206C}, // INHIBIT ARABIC FORM SHAPING + RuneRange{0x206D, 0x206D}, // ACTIVATE ARABIC FORM SHAPING + RuneRange{0x206E, 0x206E}, // NATIONAL DIGIT SHAPES + RuneRange{0x206F, 0x206F}, // NOMINAL DIGIT SHAPES +} + +// TableC8 represents RFC-3454 Table C.8. +var TableC8 Set = tableC8 + +var tableC9 = Set{ + RuneRange{0xE0001, 0xE0001}, // LANGUAGE TAG + RuneRange{0xE0020, 0xE007F}, // [TAGGING CHARACTERS] +} + +// TableC9 represents RFC-3454 Table C.9. +var TableC9 Set = tableC9 + +var tableD1 = Set{ + RuneRange{0x05BE, 0x05BE}, + RuneRange{0x05C0, 0x05C0}, + RuneRange{0x05C3, 0x05C3}, + RuneRange{0x05D0, 0x05EA}, + RuneRange{0x05F0, 0x05F4}, + RuneRange{0x061B, 0x061B}, + RuneRange{0x061F, 0x061F}, + RuneRange{0x0621, 0x063A}, + RuneRange{0x0640, 0x064A}, + RuneRange{0x066D, 0x066F}, + RuneRange{0x0671, 0x06D5}, + RuneRange{0x06DD, 0x06DD}, + RuneRange{0x06E5, 0x06E6}, + RuneRange{0x06FA, 0x06FE}, + RuneRange{0x0700, 0x070D}, + RuneRange{0x0710, 0x0710}, + RuneRange{0x0712, 0x072C}, + RuneRange{0x0780, 0x07A5}, + RuneRange{0x07B1, 0x07B1}, + RuneRange{0x200F, 0x200F}, + RuneRange{0xFB1D, 0xFB1D}, + RuneRange{0xFB1F, 0xFB28}, + RuneRange{0xFB2A, 0xFB36}, + RuneRange{0xFB38, 0xFB3C}, + RuneRange{0xFB3E, 0xFB3E}, + RuneRange{0xFB40, 0xFB41}, + RuneRange{0xFB43, 0xFB44}, + RuneRange{0xFB46, 0xFBB1}, + RuneRange{0xFBD3, 0xFD3D}, + RuneRange{0xFD50, 0xFD8F}, + RuneRange{0xFD92, 0xFDC7}, + RuneRange{0xFDF0, 0xFDFC}, + RuneRange{0xFE70, 0xFE74}, + RuneRange{0xFE76, 0xFEFC}, +} + +// TableD1 represents RFC-3454 Table D.1. +var TableD1 Set = tableD1 + +var tableD2 = Set{ + RuneRange{0x0041, 0x005A}, + RuneRange{0x0061, 0x007A}, + RuneRange{0x00AA, 0x00AA}, + RuneRange{0x00B5, 0x00B5}, + RuneRange{0x00BA, 0x00BA}, + RuneRange{0x00C0, 0x00D6}, + RuneRange{0x00D8, 0x00F6}, + RuneRange{0x00F8, 0x0220}, + RuneRange{0x0222, 0x0233}, + RuneRange{0x0250, 0x02AD}, + RuneRange{0x02B0, 0x02B8}, + RuneRange{0x02BB, 0x02C1}, + RuneRange{0x02D0, 0x02D1}, + RuneRange{0x02E0, 0x02E4}, + RuneRange{0x02EE, 0x02EE}, + RuneRange{0x037A, 0x037A}, + RuneRange{0x0386, 0x0386}, + RuneRange{0x0388, 0x038A}, + RuneRange{0x038C, 0x038C}, + RuneRange{0x038E, 0x03A1}, + RuneRange{0x03A3, 0x03CE}, + RuneRange{0x03D0, 0x03F5}, + RuneRange{0x0400, 0x0482}, + RuneRange{0x048A, 0x04CE}, + RuneRange{0x04D0, 0x04F5}, + RuneRange{0x04F8, 0x04F9}, + RuneRange{0x0500, 0x050F}, + RuneRange{0x0531, 0x0556}, + RuneRange{0x0559, 0x055F}, + RuneRange{0x0561, 0x0587}, + RuneRange{0x0589, 0x0589}, + RuneRange{0x0903, 0x0903}, + RuneRange{0x0905, 0x0939}, + RuneRange{0x093D, 0x0940}, + RuneRange{0x0949, 0x094C}, + RuneRange{0x0950, 0x0950}, + RuneRange{0x0958, 0x0961}, + RuneRange{0x0964, 0x0970}, + RuneRange{0x0982, 0x0983}, + RuneRange{0x0985, 0x098C}, + RuneRange{0x098F, 0x0990}, + RuneRange{0x0993, 0x09A8}, + RuneRange{0x09AA, 0x09B0}, + RuneRange{0x09B2, 0x09B2}, + RuneRange{0x09B6, 0x09B9}, + RuneRange{0x09BE, 0x09C0}, + RuneRange{0x09C7, 0x09C8}, + RuneRange{0x09CB, 0x09CC}, + RuneRange{0x09D7, 0x09D7}, + RuneRange{0x09DC, 0x09DD}, + RuneRange{0x09DF, 0x09E1}, + RuneRange{0x09E6, 0x09F1}, + RuneRange{0x09F4, 0x09FA}, + RuneRange{0x0A05, 0x0A0A}, + RuneRange{0x0A0F, 0x0A10}, + RuneRange{0x0A13, 0x0A28}, + RuneRange{0x0A2A, 0x0A30}, + RuneRange{0x0A32, 0x0A33}, + RuneRange{0x0A35, 0x0A36}, + RuneRange{0x0A38, 0x0A39}, + RuneRange{0x0A3E, 0x0A40}, + RuneRange{0x0A59, 0x0A5C}, + RuneRange{0x0A5E, 0x0A5E}, + RuneRange{0x0A66, 0x0A6F}, + RuneRange{0x0A72, 0x0A74}, + RuneRange{0x0A83, 0x0A83}, + RuneRange{0x0A85, 0x0A8B}, + RuneRange{0x0A8D, 0x0A8D}, + RuneRange{0x0A8F, 0x0A91}, + RuneRange{0x0A93, 0x0AA8}, + RuneRange{0x0AAA, 0x0AB0}, + RuneRange{0x0AB2, 0x0AB3}, + RuneRange{0x0AB5, 0x0AB9}, + RuneRange{0x0ABD, 0x0AC0}, + RuneRange{0x0AC9, 0x0AC9}, + RuneRange{0x0ACB, 0x0ACC}, + RuneRange{0x0AD0, 0x0AD0}, + RuneRange{0x0AE0, 0x0AE0}, + RuneRange{0x0AE6, 0x0AEF}, + RuneRange{0x0B02, 0x0B03}, + RuneRange{0x0B05, 0x0B0C}, + RuneRange{0x0B0F, 0x0B10}, + RuneRange{0x0B13, 0x0B28}, + RuneRange{0x0B2A, 0x0B30}, + RuneRange{0x0B32, 0x0B33}, + RuneRange{0x0B36, 0x0B39}, + RuneRange{0x0B3D, 0x0B3E}, + RuneRange{0x0B40, 0x0B40}, + RuneRange{0x0B47, 0x0B48}, + RuneRange{0x0B4B, 0x0B4C}, + RuneRange{0x0B57, 0x0B57}, + RuneRange{0x0B5C, 0x0B5D}, + RuneRange{0x0B5F, 0x0B61}, + RuneRange{0x0B66, 0x0B70}, + RuneRange{0x0B83, 0x0B83}, + RuneRange{0x0B85, 0x0B8A}, + RuneRange{0x0B8E, 0x0B90}, + RuneRange{0x0B92, 0x0B95}, + RuneRange{0x0B99, 0x0B9A}, + RuneRange{0x0B9C, 0x0B9C}, + RuneRange{0x0B9E, 0x0B9F}, + RuneRange{0x0BA3, 0x0BA4}, + RuneRange{0x0BA8, 0x0BAA}, + RuneRange{0x0BAE, 0x0BB5}, + RuneRange{0x0BB7, 0x0BB9}, + RuneRange{0x0BBE, 0x0BBF}, + RuneRange{0x0BC1, 0x0BC2}, + RuneRange{0x0BC6, 0x0BC8}, + RuneRange{0x0BCA, 0x0BCC}, + RuneRange{0x0BD7, 0x0BD7}, + RuneRange{0x0BE7, 0x0BF2}, + RuneRange{0x0C01, 0x0C03}, + RuneRange{0x0C05, 0x0C0C}, + RuneRange{0x0C0E, 0x0C10}, + RuneRange{0x0C12, 0x0C28}, + RuneRange{0x0C2A, 0x0C33}, + RuneRange{0x0C35, 0x0C39}, + RuneRange{0x0C41, 0x0C44}, + RuneRange{0x0C60, 0x0C61}, + RuneRange{0x0C66, 0x0C6F}, + RuneRange{0x0C82, 0x0C83}, + RuneRange{0x0C85, 0x0C8C}, + RuneRange{0x0C8E, 0x0C90}, + RuneRange{0x0C92, 0x0CA8}, + RuneRange{0x0CAA, 0x0CB3}, + RuneRange{0x0CB5, 0x0CB9}, + RuneRange{0x0CBE, 0x0CBE}, + RuneRange{0x0CC0, 0x0CC4}, + RuneRange{0x0CC7, 0x0CC8}, + RuneRange{0x0CCA, 0x0CCB}, + RuneRange{0x0CD5, 0x0CD6}, + RuneRange{0x0CDE, 0x0CDE}, + RuneRange{0x0CE0, 0x0CE1}, + RuneRange{0x0CE6, 0x0CEF}, + RuneRange{0x0D02, 0x0D03}, + RuneRange{0x0D05, 0x0D0C}, + RuneRange{0x0D0E, 0x0D10}, + RuneRange{0x0D12, 0x0D28}, + RuneRange{0x0D2A, 0x0D39}, + RuneRange{0x0D3E, 0x0D40}, + RuneRange{0x0D46, 0x0D48}, + RuneRange{0x0D4A, 0x0D4C}, + RuneRange{0x0D57, 0x0D57}, + RuneRange{0x0D60, 0x0D61}, + RuneRange{0x0D66, 0x0D6F}, + RuneRange{0x0D82, 0x0D83}, + RuneRange{0x0D85, 0x0D96}, + RuneRange{0x0D9A, 0x0DB1}, + RuneRange{0x0DB3, 0x0DBB}, + RuneRange{0x0DBD, 0x0DBD}, + RuneRange{0x0DC0, 0x0DC6}, + RuneRange{0x0DCF, 0x0DD1}, + RuneRange{0x0DD8, 0x0DDF}, + RuneRange{0x0DF2, 0x0DF4}, + RuneRange{0x0E01, 0x0E30}, + RuneRange{0x0E32, 0x0E33}, + RuneRange{0x0E40, 0x0E46}, + RuneRange{0x0E4F, 0x0E5B}, + RuneRange{0x0E81, 0x0E82}, + RuneRange{0x0E84, 0x0E84}, + RuneRange{0x0E87, 0x0E88}, + RuneRange{0x0E8A, 0x0E8A}, + RuneRange{0x0E8D, 0x0E8D}, + RuneRange{0x0E94, 0x0E97}, + RuneRange{0x0E99, 0x0E9F}, + RuneRange{0x0EA1, 0x0EA3}, + RuneRange{0x0EA5, 0x0EA5}, + RuneRange{0x0EA7, 0x0EA7}, + RuneRange{0x0EAA, 0x0EAB}, + RuneRange{0x0EAD, 0x0EB0}, + RuneRange{0x0EB2, 0x0EB3}, + RuneRange{0x0EBD, 0x0EBD}, + RuneRange{0x0EC0, 0x0EC4}, + RuneRange{0x0EC6, 0x0EC6}, + RuneRange{0x0ED0, 0x0ED9}, + RuneRange{0x0EDC, 0x0EDD}, + RuneRange{0x0F00, 0x0F17}, + RuneRange{0x0F1A, 0x0F34}, + RuneRange{0x0F36, 0x0F36}, + RuneRange{0x0F38, 0x0F38}, + RuneRange{0x0F3E, 0x0F47}, + RuneRange{0x0F49, 0x0F6A}, + RuneRange{0x0F7F, 0x0F7F}, + RuneRange{0x0F85, 0x0F85}, + RuneRange{0x0F88, 0x0F8B}, + RuneRange{0x0FBE, 0x0FC5}, + RuneRange{0x0FC7, 0x0FCC}, + RuneRange{0x0FCF, 0x0FCF}, + RuneRange{0x1000, 0x1021}, + RuneRange{0x1023, 0x1027}, + RuneRange{0x1029, 0x102A}, + RuneRange{0x102C, 0x102C}, + RuneRange{0x1031, 0x1031}, + RuneRange{0x1038, 0x1038}, + RuneRange{0x1040, 0x1057}, + RuneRange{0x10A0, 0x10C5}, + RuneRange{0x10D0, 0x10F8}, + RuneRange{0x10FB, 0x10FB}, + RuneRange{0x1100, 0x1159}, + RuneRange{0x115F, 0x11A2}, + RuneRange{0x11A8, 0x11F9}, + RuneRange{0x1200, 0x1206}, + RuneRange{0x1208, 0x1246}, + RuneRange{0x1248, 0x1248}, + RuneRange{0x124A, 0x124D}, + RuneRange{0x1250, 0x1256}, + RuneRange{0x1258, 0x1258}, + RuneRange{0x125A, 0x125D}, + RuneRange{0x1260, 0x1286}, + RuneRange{0x1288, 0x1288}, + RuneRange{0x128A, 0x128D}, + RuneRange{0x1290, 0x12AE}, + RuneRange{0x12B0, 0x12B0}, + RuneRange{0x12B2, 0x12B5}, + RuneRange{0x12B8, 0x12BE}, + RuneRange{0x12C0, 0x12C0}, + RuneRange{0x12C2, 0x12C5}, + RuneRange{0x12C8, 0x12CE}, + RuneRange{0x12D0, 0x12D6}, + RuneRange{0x12D8, 0x12EE}, + RuneRange{0x12F0, 0x130E}, + RuneRange{0x1310, 0x1310}, + RuneRange{0x1312, 0x1315}, + RuneRange{0x1318, 0x131E}, + RuneRange{0x1320, 0x1346}, + RuneRange{0x1348, 0x135A}, + RuneRange{0x1361, 0x137C}, + RuneRange{0x13A0, 0x13F4}, + RuneRange{0x1401, 0x1676}, + RuneRange{0x1681, 0x169A}, + RuneRange{0x16A0, 0x16F0}, + RuneRange{0x1700, 0x170C}, + RuneRange{0x170E, 0x1711}, + RuneRange{0x1720, 0x1731}, + RuneRange{0x1735, 0x1736}, + RuneRange{0x1740, 0x1751}, + RuneRange{0x1760, 0x176C}, + RuneRange{0x176E, 0x1770}, + RuneRange{0x1780, 0x17B6}, + RuneRange{0x17BE, 0x17C5}, + RuneRange{0x17C7, 0x17C8}, + RuneRange{0x17D4, 0x17DA}, + RuneRange{0x17DC, 0x17DC}, + RuneRange{0x17E0, 0x17E9}, + RuneRange{0x1810, 0x1819}, + RuneRange{0x1820, 0x1877}, + RuneRange{0x1880, 0x18A8}, + RuneRange{0x1E00, 0x1E9B}, + RuneRange{0x1EA0, 0x1EF9}, + RuneRange{0x1F00, 0x1F15}, + RuneRange{0x1F18, 0x1F1D}, + RuneRange{0x1F20, 0x1F45}, + RuneRange{0x1F48, 0x1F4D}, + RuneRange{0x1F50, 0x1F57}, + RuneRange{0x1F59, 0x1F59}, + RuneRange{0x1F5B, 0x1F5B}, + RuneRange{0x1F5D, 0x1F5D}, + RuneRange{0x1F5F, 0x1F7D}, + RuneRange{0x1F80, 0x1FB4}, + RuneRange{0x1FB6, 0x1FBC}, + RuneRange{0x1FBE, 0x1FBE}, + RuneRange{0x1FC2, 0x1FC4}, + RuneRange{0x1FC6, 0x1FCC}, + RuneRange{0x1FD0, 0x1FD3}, + RuneRange{0x1FD6, 0x1FDB}, + RuneRange{0x1FE0, 0x1FEC}, + RuneRange{0x1FF2, 0x1FF4}, + RuneRange{0x1FF6, 0x1FFC}, + RuneRange{0x200E, 0x200E}, + RuneRange{0x2071, 0x2071}, + RuneRange{0x207F, 0x207F}, + RuneRange{0x2102, 0x2102}, + RuneRange{0x2107, 0x2107}, + RuneRange{0x210A, 0x2113}, + RuneRange{0x2115, 0x2115}, + RuneRange{0x2119, 0x211D}, + RuneRange{0x2124, 0x2124}, + RuneRange{0x2126, 0x2126}, + RuneRange{0x2128, 0x2128}, + RuneRange{0x212A, 0x212D}, + RuneRange{0x212F, 0x2131}, + RuneRange{0x2133, 0x2139}, + RuneRange{0x213D, 0x213F}, + RuneRange{0x2145, 0x2149}, + RuneRange{0x2160, 0x2183}, + RuneRange{0x2336, 0x237A}, + RuneRange{0x2395, 0x2395}, + RuneRange{0x249C, 0x24E9}, + RuneRange{0x3005, 0x3007}, + RuneRange{0x3021, 0x3029}, + RuneRange{0x3031, 0x3035}, + RuneRange{0x3038, 0x303C}, + RuneRange{0x3041, 0x3096}, + RuneRange{0x309D, 0x309F}, + RuneRange{0x30A1, 0x30FA}, + RuneRange{0x30FC, 0x30FF}, + RuneRange{0x3105, 0x312C}, + RuneRange{0x3131, 0x318E}, + RuneRange{0x3190, 0x31B7}, + RuneRange{0x31F0, 0x321C}, + RuneRange{0x3220, 0x3243}, + RuneRange{0x3260, 0x327B}, + RuneRange{0x327F, 0x32B0}, + RuneRange{0x32C0, 0x32CB}, + RuneRange{0x32D0, 0x32FE}, + RuneRange{0x3300, 0x3376}, + RuneRange{0x337B, 0x33DD}, + RuneRange{0x33E0, 0x33FE}, + RuneRange{0x3400, 0x4DB5}, + RuneRange{0x4E00, 0x9FA5}, + RuneRange{0xA000, 0xA48C}, + RuneRange{0xAC00, 0xD7A3}, + RuneRange{0xD800, 0xFA2D}, + RuneRange{0xFA30, 0xFA6A}, + RuneRange{0xFB00, 0xFB06}, + RuneRange{0xFB13, 0xFB17}, + RuneRange{0xFF21, 0xFF3A}, + RuneRange{0xFF41, 0xFF5A}, + RuneRange{0xFF66, 0xFFBE}, + RuneRange{0xFFC2, 0xFFC7}, + RuneRange{0xFFCA, 0xFFCF}, + RuneRange{0xFFD2, 0xFFD7}, + RuneRange{0xFFDA, 0xFFDC}, + RuneRange{0x10300, 0x1031E}, + RuneRange{0x10320, 0x10323}, + RuneRange{0x10330, 0x1034A}, + RuneRange{0x10400, 0x10425}, + RuneRange{0x10428, 0x1044D}, + RuneRange{0x1D000, 0x1D0F5}, + RuneRange{0x1D100, 0x1D126}, + RuneRange{0x1D12A, 0x1D166}, + RuneRange{0x1D16A, 0x1D172}, + RuneRange{0x1D183, 0x1D184}, + RuneRange{0x1D18C, 0x1D1A9}, + RuneRange{0x1D1AE, 0x1D1DD}, + RuneRange{0x1D400, 0x1D454}, + RuneRange{0x1D456, 0x1D49C}, + RuneRange{0x1D49E, 0x1D49F}, + RuneRange{0x1D4A2, 0x1D4A2}, + RuneRange{0x1D4A5, 0x1D4A6}, + RuneRange{0x1D4A9, 0x1D4AC}, + RuneRange{0x1D4AE, 0x1D4B9}, + RuneRange{0x1D4BB, 0x1D4BB}, + RuneRange{0x1D4BD, 0x1D4C0}, + RuneRange{0x1D4C2, 0x1D4C3}, + RuneRange{0x1D4C5, 0x1D505}, + RuneRange{0x1D507, 0x1D50A}, + RuneRange{0x1D50D, 0x1D514}, + RuneRange{0x1D516, 0x1D51C}, + RuneRange{0x1D51E, 0x1D539}, + RuneRange{0x1D53B, 0x1D53E}, + RuneRange{0x1D540, 0x1D544}, + RuneRange{0x1D546, 0x1D546}, + RuneRange{0x1D54A, 0x1D550}, + RuneRange{0x1D552, 0x1D6A3}, + RuneRange{0x1D6A8, 0x1D7C9}, + RuneRange{0x20000, 0x2A6D6}, + RuneRange{0x2F800, 0x2FA1D}, + RuneRange{0xF0000, 0xFFFFD}, + RuneRange{0x100000, 0x10FFFD}, +} + +// TableD2 represents RFC-3454 Table D.2. +var TableD2 Set = tableD2 diff --git a/vendor/go.mongodb.org/mongo-driver/.errcheck-excludes b/vendor/go.mongodb.org/mongo-driver/.errcheck-excludes new file mode 100644 index 0000000..65dc85e --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/.errcheck-excludes @@ -0,0 +1,12 @@ +(*go.mongodb.org/mongo-driver/x/network/connection.connection).Close +(go.mongodb.org/mongo-driver/x/network/connection.Connection).Close +(*go.mongodb.org/mongo-driver/x/mongo/driver/topology.Subscription).Unsubscribe +(*go.mongodb.org/mongo-driver/x/mongo/driver/topology.Server).Close +(*go.mongodb.org/mongo-driver/x/network/connection.pool).closeConnection +(go.mongodb.org/mongo-driver/x/network/wiremessage.ReadWriteCloser).Close +(*go.mongodb.org/mongo-driver/mongo.Cursor).Close +(*go.mongodb.org/mongo-driver/mongo.ChangeStream).Close +(net.Conn).Close +encoding/pem.Encode +fmt.Fprintf +fmt.Fprint diff --git a/vendor/go.mongodb.org/mongo-driver/.evergreen/config.yml b/vendor/go.mongodb.org/mongo-driver/.evergreen/config.yml new file mode 100644 index 0000000..4bd401c --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/.evergreen/config.yml @@ -0,0 +1,774 @@ +######################################## +# Evergreen Template for MongoDB Drivers +######################################## + +# When a task that used to pass starts to fail +# Go through all versions that may have been skipped to detect +# when the task started failing +stepback: true + +# Mark a failure as a system/bootstrap failure (purple box) rather then a task +# failure by default. +# Actual testing tasks are marked with `type: test` +command_type: setup + +# Protect ourself against rogue test case, or curl gone wild, that runs forever +# 12 minutes is the longest we'll ever run +exec_timeout_secs: 3600 # 12 minutes is the longest we'll ever run + +# What to do when evergreen hits the timeout (`post:` tasks are run automatically) +timeout: + - command: shell.exec + params: + script: | + ls -la +functions: + + fetch-source: + # Executes git clone and applies the submitted patch, if any + - command: git.get_project + type: system + params: + directory: src/go.mongodb.org/mongo-driver + # Make an evergreen expansion file with dynamic values + - command: shell.exec + params: + working_dir: src/go.mongodb.org/mongo-driver + script: | + if [ "Windows_NT" = "$OS" ]; then + export GOPATH=$(cygpath -w $(dirname $(dirname $(dirname `pwd`)))) + else + export GOPATH=$(dirname $(dirname $(dirname `pwd`))) + fi; + + # Get the current unique version of this checkout + if [ "${is_patch}" = "true" ]; then + CURRENT_VERSION=$(git describe)-patch-${version_id} + else + CURRENT_VERSION=latest + fi + + export DRIVERS_TOOLS="$(pwd)/../drivers-tools" + export PROJECT_DIRECTORY="$(pwd)" + + # Python has cygwin path problems on Windows. Detect prospective mongo-orchestration home directory + if [ "Windows_NT" = "$OS" ]; then # Magic variable in cygwin + export DRIVERS_TOOLS=$(cygpath -m $DRIVERS_TOOLS) + export PROJECT_DIRECTORY=$(cygpath -m $PROJECT_DIRECTORY) + fi + + export MONGO_ORCHESTRATION_HOME="$DRIVERS_TOOLS/.evergreen/orchestration" + export MONGODB_BINARIES="$DRIVERS_TOOLS/mongodb/bin" + export UPLOAD_BUCKET="${project}" + export PATH="${GO_DIST}/bin:${GCC_PATH}:$GOPATH/bin:$MONGODB_BINARIES:$PATH" + export PROJECT="${project}" + + if [ "Windows_NT" = "$OS" ]; then + export USERPROFILE=$(cygpath -w $(dirname $(dirname $(dirname `pwd`)))) + export HOME=$(cygpath -w $(dirname $(dirname $(dirname `pwd`)))) + fi + + go version + go env + + cat < expansion.yml + CURRENT_VERSION: "$CURRENT_VERSION" + DRIVERS_TOOLS: "$DRIVERS_TOOLS" + MONGO_ORCHESTRATION_HOME: "$MONGO_ORCHESTRATION_HOME" + MONGODB_BINARIES: "$MONGODB_BINARIES" + UPLOAD_BUCKET: "$UPLOAD_BUCKET" + PROJECT_DIRECTORY: "$PROJECT_DIRECTORY" + PREPARE_SHELL: | + set -o errexit + set -o xtrace + export GOPATH="$GOPATH" + export GOROOT="${GO_DIST}" + export DRIVERS_TOOLS="$DRIVERS_TOOLS" + export MONGO_ORCHESTRATION_HOME="$MONGO_ORCHESTRATION_HOME" + export MONGODB_BINARIES="$MONGODB_BINARIES" + export UPLOAD_BUCKET="$UPLOAD_BUCKET" + export PROJECT_DIRECTORY="$PROJECT_DIRECTORY" + export TMPDIR="$MONGO_ORCHESTRATION_HOME/db" + export PATH="$PATH" + export PROJECT="$PROJECT" + EOT + # See what we've done + cat expansion.yml + # Load the expansion file to make an evergreen variable with the current unique version + - command: expansions.update + params: + file: src/go.mongodb.org/mongo-driver/expansion.yml + + + prepare-resources: + - command: shell.exec + params: + script: | + ${PREPARE_SHELL} + rm -rf $DRIVERS_TOOLS + if [ "${project}" = "drivers-tools" ]; then + # If this was a patch build, doing a fresh clone would not actually test the patch + cp -R ${PROJECT_DIRECTORY}/ $DRIVERS_TOOLS + else + git clone git://github.com/mongodb-labs/drivers-evergreen-tools.git $DRIVERS_TOOLS + fi + echo "{ \"releases\": { \"default\": \"$MONGODB_BINARIES\" }}" > $MONGO_ORCHESTRATION_HOME/orchestration.config + - command: shell.exec + params: + working_dir: src/go.mongodb.org/mongo-driver + script: | + ${PREPARE_SHELL} + # any go tools that we need + go get -u golang.org/x/lint/golint + go get -u github.com/kisielk/errcheck + + # initialize submodules + git submodule init + git submodule update + + # generate any source code + make generate + + + upload-mo-artifacts: + - command: shell.exec + params: + script: | + ${PREPARE_SHELL} + find $MONGO_ORCHESTRATION_HOME -name \*.log | xargs tar czf mongodb-logs.tar.gz + - command: s3.put + params: + aws_key: ${aws_key} + aws_secret: ${aws_secret} + local_file: mongodb-logs.tar.gz + remote_file: ${UPLOAD_BUCKET}/${build_variant}/${revision}/${version_id}/${build_id}/logs/${task_id}-${execution}-mongodb-logs.tar.gz + bucket: mciuploads + permissions: public-read + content_type: ${content_type|application/x-gzip} + display_name: "mongodb-logs.tar.gz" + + bootstrap-mongo-orchestration: + - command: shell.exec + params: + script: | + ${PREPARE_SHELL} + + cp ${PROJECT_DIRECTORY}/data/certificates/server.pem ${DRIVERS_TOOLS}/.evergreen/x509gen/server.pem + cp ${PROJECT_DIRECTORY}/data/certificates/ca.pem ${DRIVERS_TOOLS}/.evergreen/x509gen/ca.pem + cp ${PROJECT_DIRECTORY}/data/certificates/client.pem ${DRIVERS_TOOLS}/.evergreen/x509gen/client.pem + cp ${PROJECT_DIRECTORY}/data/certificates/client.pem ${MONGO_ORCHESTRATION_HOME}/lib/client.pem + + MONGODB_VERSION=${VERSION} TOPOLOGY=${TOPOLOGY} AUTH=${AUTH} SSL=${SSL} sh ${DRIVERS_TOOLS}/.evergreen/run-orchestration.sh + - command: expansions.update + params: + file: mo-expansion.yml + + + cleanup: + - command: shell.exec + params: + script: | + ${PREPARE_SHELL} + cd "$MONGO_ORCHESTRATION_HOME" + # source the mongo-orchestration virtualenv if it exists + if [ -f venv/bin/activate ]; then + . venv/bin/activate + elif [ -f venv/Scripts/activate ]; then + . venv/Scripts/activate + fi + mongo-orchestration stop + cd - + rm -rf $DRIVERS_TOOLS || true + + + fix-absolute-paths: + - command: shell.exec + params: + script: | + ${PREPARE_SHELL} + for filename in $(find ${DRIVERS_TOOLS} -name \*.json); do + perl -p -i -e "s|ABSOLUTE_PATH_REPLACEMENT_TOKEN|${DRIVERS_TOOLS}|g" $filename + done + + + windows-fix: + - command: shell.exec + params: + script: | + ${PREPARE_SHELL} + for i in $(find ${DRIVERS_TOOLS}/.evergreen ${PROJECT_DIRECTORY} -name \*.sh); do + cat $i | tr -d '\r' > $i.new + mv $i.new $i + done + + + make-files-executable: + - command: shell.exec + params: + script: | + ${PREPARE_SHELL} + for i in $(find ${DRIVERS_TOOLS}/.evergreen ${PROJECT_DIRECTORY} -name \*.sh); do + chmod +x $i + done + + + run-make: + - command: shell.exec + type: test + params: + working_dir: src/go.mongodb.org/mongo-driver + script: | + ${PREPARE_SHELL} + ${BUILD_ENV|} make ${targets} BUILD_TAGS="-tags gssapi" + + + run-tests: + - command: shell.exec + type: test + params: + working_dir: src/go.mongodb.org/mongo-driver + script: | + ${PREPARE_SHELL} + + if [ ${SSL} = "ssl" ]; then + export MONGO_GO_DRIVER_CA_FILE="$PROJECT_DIRECTORY/data/certificates/ca.pem" + + if [ "Windows_NT" = "$OS" ]; then # Magic variable in cygwin + export MONGO_GO_DRIVER_CA_FILE=$(cygpath -m $MONGO_GO_DRIVER_CA_FILE) + fi + fi + + AUTH=${AUTH} SSL=${SSL} MONGODB_URI="${MONGODB_URI}" TOPOLOGY=${TOPOLOGY} MONGO_GO_DRIVER_COMPRESSOR=${MONGO_GO_DRIVER_COMPRESSOR} make evg-test + + + send-perf-data: + - command: json.send + params: + name: perf + file: src/go.mongodb.org/mongo-driver/perf.json + + + run-enterprise-auth-tests: + - command: shell.exec + type: test + params: + silent: true + working_dir: src/go.mongodb.org/mongo-driver + script: | + # DO NOT ECHO WITH XTRACE (which PREPARE_SHELL does) + if [ "Windows_NT" = "$OS" ]; then + export GOPATH=$(cygpath -w $(dirname $(dirname $(dirname `pwd`)))) + else + export GOPATH=$(dirname $(dirname $(dirname `pwd`))) + fi; + export GOPATH="$GOPATH" + export GOROOT="${GO_DIST}" + export PATH="${GCC_PATH}:${GO_DIST}/bin:$PATH" + MONGODB_URI="${MONGODB_URI}" MONGO_GO_DRIVER_COMPRESSOR="${MONGO_GO_DRIVER_COMPRESSOR}" make -s evg-test-auth + + run-enterprise-gssapi-auth-tests: + - command: shell.exec + type: test + params: + working_dir: src/go.mongodb.org/mongo-driver + script: | + # DO NOT ECHO WITH XTRACE (which PREPARE_SHELL does) + if [ "Windows_NT" = "$OS" ]; then + export GOPATH=$(cygpath -w $(dirname $(dirname $(dirname `pwd`)))) + export MONGODB_URI=${gssapi_auth_windows_mongodb_uri} + else + export GOPATH=$(dirname $(dirname $(dirname `pwd`))) + echo "${gssapi_auth_linux_keytab_base64}" > /tmp/drivers.keytab.base64 + base64 --decode /tmp/drivers.keytab.base64 > ${PROJECT_DIRECTORY}/.evergreen/drivers.keytab + mkdir -p ~/.krb5 + cat .evergreen/krb5.config | tee -a ~/.krb5/config + kinit -k -t ${PROJECT_DIRECTORY}/.evergreen/drivers.keytab -p "${gssapi_auth_username}" + export MONGODB_URI="${gssapi_auth_linux_mongodb_uri}" + fi; + export GOPATH="$GOPATH" + export GOROOT="${GO_DIST}" + export PATH="${GCC_PATH}:${GO_DIST}/bin:$PATH" + MONGO_GO_DRIVER_COMPRESSOR="${MONGO_GO_DRIVER_COMPRESSOR}" make -s evg-test-auth + + run-enterprise-gssapi-service-host-auth-tests: + - command: shell.exec + type: test + params: + working_dir: src/go.mongodb.org/mongo-driver + script: | + # DO NOT ECHO WITH XTRACE (which PREPARE_SHELL does) + if [ "Windows_NT" = "$OS" ]; then + export GOPATH=$(cygpath -w $(dirname $(dirname $(dirname `pwd`)))) + export MONGODB_URI="${gssapi_service_host_auth_windows_mongodb_uri}" + else + export GOPATH=$(dirname $(dirname $(dirname `pwd`))) + echo "${gssapi_auth_linux_keytab_base64}" > /tmp/drivers.keytab.base64 + base64 --decode /tmp/drivers.keytab.base64 > ${PROJECT_DIRECTORY}/.evergreen/drivers.keytab + mkdir -p ~/.krb5 + cat .evergreen/krb5.config | tee -a ~/.krb5/config + kinit -k -t ${PROJECT_DIRECTORY}/.evergreen/drivers.keytab -p "${gssapi_auth_username}" + export MONGODB_URI="${gssapi_service_host_auth_linux_mongodb_uri}" + fi; + export GOPATH="$GOPATH" + export GOROOT="${GO_DIST}" + export PATH="${GCC_PATH}:${GO_DIST}/bin:$PATH" + MONGO_GO_DRIVER_COMPRESSOR="${MONGO_GO_DRIVER_COMPRESSOR}" make -s evg-test-auth + +pre: + - func: fetch-source + - func: prepare-resources + - func: windows-fix + - func: fix-absolute-paths + - func: make-files-executable + +post: + - command: gotest.parse_files + params: + files: + - "src/go.mongodb.org/mongo-driver/*.suite" + - func: upload-mo-artifacts + - func: cleanup + +tasks: + - name: sa-fmt + tags: ["static-analysis"] + commands: + - func: run-make + vars: + targets: check-fmt + + - name: sa-errcheck + tags: ["static-analysis"] + commands: + - func: run-make + vars: + targets: errcheck + + + - name: sa-lint + tags: ["static-analysis"] + commands: + - func: run-make + vars: + targets: lint + + - name: sa-vet + tags: ["static-analysis"] + commands: + - func: run-make + vars: + targets: vet + + - name: perf + tags: ["performance"] + exec_timeout_secs: 7200 + commands: + - func: bootstrap-mongo-orchestration + vars: + TOPOLOGY: "server" + AUTH: "noauth" + SSL: "nossl" + - func: run-make + vars: + targets: driver-benchmark + - func: send-perf-data + + - name: sa-build-examples + tags: ["static-analysis"] + commands: + - func: run-make + vars: + targets: build-examples + + - name: test-standalone-noauth-nossl + tags: ["test", "standalone"] + commands: + - func: bootstrap-mongo-orchestration + vars: + TOPOLOGY: "server" + AUTH: "noauth" + SSL: "nossl" + - func: run-tests + vars: + TOPOLOGY: "server" + AUTH: "noauth" + SSL: "nossl" + + - name: test-standalone-noauth-nossl-snappy-compression + tags: ["test", "standalone", "compression", "snappy"] + commands: + - func: bootstrap-mongo-orchestration + vars: + TOPOLOGY: "server" + AUTH: "noauth" + SSL: "nossl" + - func: run-tests + vars: + TOPOLOGY: "server" + AUTH: "noauth" + SSL: "nossl" + MONGO_GO_DRIVER_COMPRESSOR: "snappy" + + - name: test-standalone-noauth-nossl-zlib-compression + tags: ["test", "standalone", "compression", "zlib"] + commands: + - func: bootstrap-mongo-orchestration + vars: + TOPOLOGY: "server" + AUTH: "noauth" + SSL: "nossl" + - func: run-tests + vars: + TOPOLOGY: "server" + AUTH: "noauth" + SSL: "nossl" + MONGO_GO_DRIVER_COMPRESSOR: "zlib" + + - name: test-standalone-auth-ssl + tags: ["test", "standalone", "authssl"] + commands: + - func: bootstrap-mongo-orchestration + vars: + TOPOLOGY: "server" + AUTH: "auth" + SSL: "ssl" + - func: run-tests + vars: + TOPOLOGY: "server" + AUTH: "auth" + SSL: "ssl" + + - name: test-standalone-auth-ssl-snappy-compression + tags: ["test", "standalone", "authssl", "compression", "snappy"] + commands: + - func: bootstrap-mongo-orchestration + vars: + TOPOLOGY: "server" + AUTH: "auth" + SSL: "ssl" + - func: run-tests + vars: + TOPOLOGY: "server" + AUTH: "auth" + SSL: "ssl" + MONGO_GO_DRIVER_COMPRESSOR: "snappy" + + - name: test-standalone-auth-ssl-zlib-compression + tags: ["test", "standalone", "authssl", "compression", "zlib"] + commands: + - func: bootstrap-mongo-orchestration + vars: + TOPOLOGY: "server" + AUTH: "auth" + SSL: "ssl" + - func: run-tests + vars: + TOPOLOGY: "server" + AUTH: "auth" + SSL: "ssl" + MONGO_GO_DRIVER_COMPRESSOR: "zlib" + + - name: test-replicaset-noauth-nossl + tags: ["test", "replicaset"] + commands: + - func: bootstrap-mongo-orchestration + vars: + TOPOLOGY: "replica_set" + AUTH: "noauth" + SSL: "nossl" + - func: run-tests + vars: + TOPOLOGY: "replica_set" + AUTH: "noauth" + SSL: "nossl" + + - name: test-replicaset-auth-ssl + tags: ["test", "replicaset", "authssl"] + commands: + - func: bootstrap-mongo-orchestration + vars: + TOPOLOGY: "replica_set" + AUTH: "auth" + SSL: "ssl" + - func: run-tests + vars: + TOPOLOGY: "replica_set" + AUTH: "auth" + SSL: "ssl" + + - name: test-sharded-noauth-nossl + tags: ["test", "sharded"] + commands: + - func: bootstrap-mongo-orchestration + vars: + TOPOLOGY: "sharded_cluster" + AUTH: "noauth" + SSL: "nossl" + - func: run-tests + vars: + TOPOLOGY: "sharded_cluster" + AUTH: "noauth" + SSL: "nossl" + + - name: test-sharded-noauth-nossl-snappy-compression + tags: ["test", "sharded", "compression", "snappy"] + commands: + - func: bootstrap-mongo-orchestration + vars: + TOPOLOGY: "sharded_cluster" + AUTH: "noauth" + SSL: "nossl" + - func: run-tests + vars: + TOPOLOGY: "sharded_cluster" + AUTH: "noauth" + SSL: "nossl" + MONGO_GO_DRIVER_COMPRESSOR: "snappy" + + - name: test-sharded-noauth-nossl-zlib-compression + tags: ["test", "sharded", "compression", "zlib"] + commands: + - func: bootstrap-mongo-orchestration + vars: + TOPOLOGY: "sharded_cluster" + AUTH: "noauth" + SSL: "nossl" + - func: run-tests + vars: + TOPOLOGY: "sharded_cluster" + AUTH: "noauth" + SSL: "nossl" + MONGO_GO_DRIVER_COMPRESSOR: "zlib" + + - name: test-sharded-auth-ssl + tags: ["test", "sharded", "authssl"] + commands: + - func: bootstrap-mongo-orchestration + vars: + TOPOLOGY: "sharded_cluster" + AUTH: "auth" + SSL: "ssl" + - func: run-tests + vars: + TOPOLOGY: "sharded_cluster" + AUTH: "auth" + SSL: "ssl" + + - name: test-sharded-auth-ssl-snappy-compression + tags: ["test", "sharded", "authssl", "compression", "snappy"] + commands: + - func: bootstrap-mongo-orchestration + vars: + TOPOLOGY: "sharded_cluster" + AUTH: "auth" + SSL: "ssl" + - func: run-tests + vars: + TOPOLOGY: "sharded_cluster" + AUTH: "auth" + SSL: "ssl" + MONGO_GO_DRIVER_COMPRESSOR: "snappy" + + - name: test-sharded-auth-ssl-zlib-compression + tags: ["test", "sharded", "authssl", "compression", "zlib"] + commands: + - func: bootstrap-mongo-orchestration + vars: + TOPOLOGY: "sharded_cluster" + AUTH: "auth" + SSL: "ssl" + - func: run-tests + vars: + TOPOLOGY: "sharded_cluster" + AUTH: "auth" + SSL: "ssl" + MONGO_GO_DRIVER_COMPRESSOR: "zlib" + + - name: test-enterprise-auth-plain + tags: ["test", "enterprise-auth"] + commands: + - func: run-enterprise-auth-tests + vars: + MONGODB_URI: "${plain_auth_mongodb_uri}" + + - name: test-enterprise-auth-gssapi + tags: ["test", "enterprise-auth"] + commands: + - func: run-enterprise-gssapi-auth-tests + + - name: test-enterprise-auth-gssapi-service-host + tags: ["test", "enterprise-auth"] + commands: + - func: run-enterprise-gssapi-service-host-auth-tests + vars: + MONGO_GO_DRIVER_COMPRESSOR: "snappy" + + - name: go1.8-build + tags: ["compile-check"] + commands: + - func: run-make + vars: + targets: "build" + BUILD_ENV: "PATH=/opt/golang/go1.8/bin:$PATH GOROOT=/opt/golang/go1.8" + + - name: go1.10-build + tags: ["compile-check"] + commands: + - func: run-make + vars: + targets: "build" + BUILD_ENV: "PATH=/opt/golang/go1.10/bin:$PATH GOROOT=/opt/golang/go1.10" + + - name: linux-32-bit + tags: ["compile-check"] + commands: + - func: run-make + vars: + targets: "build" + BUILD_ENV: "GOARCH=386" + + - name: linux-arm64 + tags: ["compile-check"] + commands: + - func: run-make + vars: + targets: "build" + BUILD_ENV: "GOARCH=arm64" + + - name: linux-s390x + tags: ["compile-check"] + commands: + - func: run-make + vars: + targets: "build" + BUILD_ENV: "GOARCH=ppc64le" + +axes: + - id: version + display_name: MongoDB Version + values: + - id: "4.0" + display_name: "4.0" + variables: + VERSION: "4.0" + - id: "3.6" + display_name: "3.6" + variables: + VERSION: "3.6" + - id: "3.4" + display_name: "3.4" + variables: + VERSION: "3.4" + - id: "3.2" + display_name: "3.2" + variables: + VERSION: "3.2" + - id: "3.0" + display_name: "3.0" + variables: + VERSION: "3.0" + - id: "2.6" + display_name: "2.6" + variables: + VERSION: "2.6" + - id: "latest" + display_name: "latest" + variables: + VERSION: "latest" + + # OSes that support versions of MongoDB >= 2.6 with SSL. + - id: os-ssl-legacy + display_name: OS + values: + - id: "ubuntu1404-go-1-11" + display_name: "Ubuntu 14.04" + run_on: ubuntu1404-test + variables: + GO_DIST: "/opt/golang/go1.11" + + # OSes that require >= 3.2 for SSL + - id: os-ssl-32 + display_name: OS + values: + - id: "windows-64-go-1-11" + display_name: "Windows 64-bit" + run_on: + - windows-64-vs2015-test + variables: + GCC_PATH: "/cygdrive/c/mingw-w64/x86_64-4.9.1-posix-seh-rt_v3-rev1/mingw64/bin" + GO_DIST: "C:\\golang\\go1.11" + - id: "ubuntu1604-64-go-1-11" + display_name: "Ubuntu 16.04" + run_on: ubuntu1604-build + variables: + GO_DIST: "/opt/golang/go1.11" + - id: "osx-go-1-11" + display_name: "MacOS 10.12" + run_on: macos-1012 + variables: + GO_DIST: "/opt/golang/go1.11" + + +buildvariants: +- name: static-analysis + display_name: "Static Analysis" + run_on: + - ubuntu1604-build + expansions: + GO_DIST: "/opt/golang/go1.11" + tasks: + - name: ".static-analysis" + +- name: perf + display_name: "Performance" + run_on: + - ubuntu1604-build + expansions: + GO_DIST: "/opt/golang/go1.9" + tasks: + - name: ".performance" + +- name: build-check + display_name: "Compile Only Checks" + run_on: + - ubuntu1604-test + expansions: + GO_DIST: "/opt/golang/go1.10" + tasks: + - name: ".compile-check" + +- matrix_name: "tests-legacy-auth-ssl" + matrix_spec: { version: ["2.6", "3.0"], os-ssl-legacy: "*" } + display_name: "${version} ${os-ssl-legacy}" + tasks: + - name: ".test !.enterprise-auth !.compression" + +- matrix_name: "tests-legacy-noauth-nossl" + matrix_spec: { version: ["2.6", "3.0"], os-ssl-32: "*" } + display_name: "${version} ${os-ssl-32}" + tasks: + - name: ".test !.authssl !.enterprise-auth !.compression" + +- matrix_name: "tests-nonlegacy-servers" + matrix_spec: { version: "3.2", os-ssl-32: "*" } + display_name: "${version} ${os-ssl-32}" + tasks: + - name: ".test !.enterprise-auth !.compression" + +- matrix_name: "tests-nonlegacy-servers-with-snappy-support" + matrix_spec: { version: "3.4", os-ssl-32: "*" } + display_name: "${version} ${os-ssl-32}" + tasks: + - name: ".test !.enterprise-auth !.zlib" + +- matrix_name: "tests-nonlegacy-servers-with-zlib-support" + matrix_spec: { version: ["3.6", "4.0", "latest"], os-ssl-32: "*" } + display_name: "${version} ${os-ssl-32}" + tasks: + - name: ".test !.enterprise-auth" + +- matrix_name: "enterprise-auth-tests" + matrix_spec: { os-ssl-32: "*" } + display_name: "Enterprise Auth - ${os-ssl-32}" + tasks: + - name: ".test .enterprise-auth" diff --git a/vendor/go.mongodb.org/mongo-driver/.evergreen/krb5.config b/vendor/go.mongodb.org/mongo-driver/.evergreen/krb5.config new file mode 100644 index 0000000..aa518fa --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/.evergreen/krb5.config @@ -0,0 +1,8 @@ +[realms] + LDAPTEST.10GEN.CC = { + kdc = ldaptest.10gen.cc + admin_server = ldaptest.10gen.cc + } + +[libdefaults] + rdns = false diff --git a/vendor/go.mongodb.org/mongo-driver/.gitignore b/vendor/go.mongodb.org/mongo-driver/.gitignore new file mode 100644 index 0000000..018b3b0 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/.gitignore @@ -0,0 +1,11 @@ +.vscode +debug +.idea +*.iml +*.ipr +*.iws +.idea +*.sublime-project +*.sublime-workspace +driver-test-data.tar.gz +perf diff --git a/vendor/go.mongodb.org/mongo-driver/.gitmodules b/vendor/go.mongodb.org/mongo-driver/.gitmodules new file mode 100644 index 0000000..e5d2a68 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/.gitmodules @@ -0,0 +1,3 @@ +[submodule "specifications"] + path = specifications + url = git@github.com:mongodb/specifications.git diff --git a/vendor/go.mongodb.org/mongo-driver/.lint-whitelist b/vendor/go.mongodb.org/mongo-driver/.lint-whitelist new file mode 100644 index 0000000..25ed592 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/.lint-whitelist @@ -0,0 +1,64 @@ +bson/bson.go:103:5: error var SetZero should have name of the form ErrFoo +bson/bson.go:187:6: type ObjectId should be ObjectID +bson/bson.go:192:6: func ObjectIdHex should be ObjectIDHex +bson/bson.go:202:6: func IsObjectIdHex should be IsObjectIDHex +bson/bson.go:249:6: func NewObjectId should be NewObjectID +bson/bson.go:273:6: func NewObjectIdWithTime should be NewObjectIDWithTime +bson/bson.go:470:2: struct field Id should be ID +bson/bson.go:587:21: error strings should not be capitalized or end with punctuation or a newline +bson/bson.go:589:21: error strings should not be capitalized or end with punctuation or a newline +bson/bson.go:613:21: error strings should not be capitalized or end with punctuation or a newline +bson/bson.go:615:21: error strings should not be capitalized or end with punctuation or a newline +bson/encode.go:46:2: var typeObjectId should be typeObjectID +bson/internal/json/stream_test.go:196:3: struct field Id should be ID +bson/internal/json/stream_test.go:221:3: struct field Id should be ID +bson/internal/json/stream_test.go:285:22: should omit type []tokenStreamCase from declaration of var tokenStreamCases; it will be inferred from the right-hand side +bson/internal/testutil/close_helper.go:14:1: exported function CloseReadOnlyFile should have comment or be unexported +bson/internal/testutil/close_helper.go:8:1: exported function CloseOrError should have comment or be unexported +bson/json.go:246:6: func jdecObjectId should be jdecObjectID +bson/json.go:263:6: func jencObjectId should be jencObjectID +mongo/internal/testutil/helpers/helpers.go:10:1: exported function RequireNoErrorOnClose should have comment or be unexported +mongo/internal/testutil/helpers/helpers.go:14:1: exported function FindJSONFilesInDir should have comment or be unexported +mongo/internal/testutil/helpers/helpers.go:45:1: exported function VerifyConnStringOptions should have comment or be unexported +mongo/options/find_and_modify.go:19:1: exported function CopyFindOneAndReplaceOptions should have comment or be unexported +mongo/options/find_and_modify.go:29:1: exported function CopyFindOneAndUpdateOptions should have comment or be unexported +mongo/options/find_and_modify.go:9:1: exported function CopyFindOneAndDeleteOptions should have comment or be unexported +mongo/options.go:157:1: exported function OplogReplay should have comment or be unexported +mongo/options.go:177:56: exported func ReadPreference returns unexported type *mongo.optReadPreference, which can be annoying to use +bson/internal/jsonparser/bytes.go:9:10: should omit type bool from declaration of var neg; it will be inferred from the right-hand side +bson/internal/jsonparser/bytes.go:25:9: if block ends with a return statement, so drop this else and outdent its block +bson/internal/jsonparser/escape.go:113:10: if block ends with a return statement, so drop this else and outdent its block (move short variable declaration to its own line if necessary) +bson/internal/jsonparser/escape.go:123:1: comment on exported function Unescape should be of the form "Unescape ..." +bson/internal/jsonparser/parser.go:14:2: error var KeyPathNotFoundError should have name of the form ErrFoo +bson/internal/jsonparser/parser.go:15:2: error var UnknownValueTypeError should have name of the form ErrFoo +bson/internal/jsonparser/parser.go:16:2: error var MalformedJsonError should have name of the form ErrFoo +bson/internal/jsonparser/parser.go:16:2: var MalformedJsonError should be MalformedJSONError +bson/internal/jsonparser/parser.go:17:2: error var MalformedStringError should have name of the form ErrFoo +bson/internal/jsonparser/parser.go:18:2: error var MalformedArrayError should have name of the form ErrFoo +bson/internal/jsonparser/parser.go:19:2: error var MalformedObjectError should have name of the form ErrFoo +bson/internal/jsonparser/parser.go:20:2: error var MalformedValueError should have name of the form ErrFoo +bson/internal/jsonparser/parser.go:21:2: error var MalformedStringEscapeError should have name of the form ErrFoo +bson/internal/jsonparser/parser.go:147:11: if block ends with a return statement, so drop this else and outdent its block +bson/internal/jsonparser/parser.go:285:6: should replace curIdx += 1 with curIdx++ +bson/internal/jsonparser/parser.go:292:12: if block ends with a return statement, so drop this else and outdent its block +bson/internal/jsonparser/parser.go:303:12: if block ends with a return statement, so drop this else and outdent its block (move short variable declaration to its own line if necessary) +bson/internal/jsonparser/parser.go:329:6: don't use underscores in Go names; range var pi_1 should be pi1 +bson/internal/jsonparser/parser.go:329:12: don't use underscores in Go names; range var p_1 should be p1 +bson/internal/jsonparser/parser.go:338:1: exported function EachKey should have comment or be unexported +bson/internal/jsonparser/parser.go:489:6: should replace curIdx += 1 with curIdx++ +bson/internal/jsonparser/parser.go:503:12: if block ends with a return statement, so drop this else and outdent its block (move short variable declaration to its own line if necessary) +bson/internal/jsonparser/parser.go:517:1: comment on exported type ValueType should be of the form "ValueType ..." (with optional leading article) +bson/internal/jsonparser/parser.go:521:2: exported const NotExist should have comment (or a comment on this block) or be unexported +bson/internal/jsonparser/parser.go:582:1: comment on exported function Delete should be of the form "Delete ..." +bson/internal/jsonparser/parser.go:931:10: if block ends with a return statement, so drop this else and outdent its block (move short variable declaration to its own line if necessary) +bson/internal/jsonparser/parser.go:971:10: if block ends with a return statement, so drop this else and outdent its block (move short variable declaration to its own line if necessary) +bson/internal/jsonparser/parser.go:980:11: if block ends with a return statement, so drop this else and outdent its block (move short variable declaration to its own line if necessary) +bson/internal/jsonparser/parser.go:1006:10: if block ends with a return statement, so drop this else and outdent its block (move short variable declaration to its own line if necessary) +bson/internal/jsonparser/parser.go:1021:10: if block ends with a return statement, so drop this else and outdent its block (move short variable declaration to its own line if necessary) +bson/internal/jsonparser/parser.go:1128:9: if block ends with a return statement, so drop this else and outdent its block (move short variable declaration to its own line if necessary) +bson/internal/jsonparser/parser.go:1133:1: comment on exported function ParseFloat should be of the form "ParseFloat ..." +bson/internal/jsonparser/parser.go:1137:9: if block ends with a return statement, so drop this else and outdent its block (move short variable declaration to its own line if necessary) +bson/internal/jsonparser/parser.go:1146:9: if block ends with a return statement, so drop this else and outdent its block (move short variable declaration to its own line if necessary) +bson/internal/jsonparser/parser_test.go:1361:5: var testJson should be testJSON +bson/internal/jsonpretty/pretty.go:7:1: comment on exported type Options should be of the form "Options ..." (with optional leading article) +examples/documentation_examples/examples.go:10:1: don't use an underscore in package name diff --git a/vendor/go.mongodb.org/mongo-driver/CONTRIBUTING.md b/vendor/go.mongodb.org/mongo-driver/CONTRIBUTING.md new file mode 100644 index 0000000..0c8a802 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/CONTRIBUTING.md @@ -0,0 +1,37 @@ +# Contributing to the MongoDB Go Driver + +Thank you for your interest in contributing to the MongoDB Go driver. + +We are building this software together and strongly encourage contributions from the community that are within the guidelines set forth +below. + +## Bug Fixes and New Features + +Before starting to write code, look for existing [tickets](https://jira.mongodb.org/browse/GODRIVER) or +[create one](https://jira.mongodb.org/secure/CreateIssue!default.jspa) for your bug, issue, or feature request. This helps the community +avoid working on something that might not be of interest or which has already been addressed. + +## Pull Requests & Patches + +The Go Driver team is experimenting with GerritHub for contributions. GerritHub uses GitHub for authentication and uses a patch based +workflow. Since GerritHub supports importing of Pull Requests we will also accept Pull Requests, but Code Review will be done in +GerritHub. + +Patches should generally be made against the master (default) branch and include relevant tests, if applicable. + +Code should compile and tests should pass under all go versions which the driver currently supports. Currently the driver +supports a minimum version of go 1.7. Please ensure the following tools have been run on the code: gofmt, golint, errcheck, +go test (with coverage and with the race detector), and go vet. For convenience, you can run 'make' to run all these tools. +**By default, running the tests requires that you have a mongod server running on localhost, listening on the default port.** +At minimum, please test against the latest release version of the MongoDB server. + +If any tests do not pass, or relevant tests are not included, the patch will not be considered. + +If you are working on a bug or feature listed in Jira, please include the ticket number prefixed with GODRIVER in the commit, +e.g. GODRIVER-123. For the patch commit message itself, please follow the [How to Write a Git Commit Message](https://chris.beams.io/posts/git-commit/) guide. + +## Talk To Us + +If you want to work on the driver, write documentation, or have questions/complaints, please reach out to use either via +the [mongo-go-driver Google Group](https://groups.google.com/forum/#!forum/mongodb-go-driver) or by creating a Question +issue at (https://jira.mongodb.org/secure/CreateIssue!default.jspa). diff --git a/vendor/go.mongodb.org/mongo-driver/Gopkg.lock b/vendor/go.mongodb.org/mongo-driver/Gopkg.lock new file mode 100644 index 0000000..8b0f3d2 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/Gopkg.lock @@ -0,0 +1,156 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + digest = "1:a2c1d0e43bd3baaa071d1b9ed72c27d78169b2b269f71c105ac4ba34b1be4a39" + name = "github.com/davecgh/go-spew" + packages = ["spew"] + pruneopts = "UT" + revision = "346938d642f2ec3594ed81d874461961cd0faa76" + version = "v1.1.0" + +[[projects]] + digest = "1:c4a2528ccbcabf90f9f3c464a5fc9e302d592861bbfd0b7135a7de8a943d0406" + name = "github.com/go-stack/stack" + packages = ["."] + pruneopts = "UT" + revision = "259ab82a6cad3992b4e21ff5cac294ccb06474bc" + version = "v1.7.0" + +[[projects]] + branch = "master" + digest = "1:4a0c6bb4805508a6287675fac876be2ac1182539ca8a32468d8128882e9d5009" + name = "github.com/golang/snappy" + packages = ["."] + pruneopts = "UT" + revision = "2e65f85255dbc3072edf28d6b5b8efc472979f5a" + +[[projects]] + digest = "1:2e3c336fc7fde5c984d2841455a658a6d626450b1754a854b3b32e7a8f49a07a" + name = "github.com/google/go-cmp" + packages = [ + "cmp", + "cmp/internal/diff", + "cmp/internal/function", + "cmp/internal/value", + ] + pruneopts = "UT" + revision = "3af367b6b30c263d47e8895973edcca9a49cf029" + version = "v0.2.0" + +[[projects]] + digest = "1:06d94e582555f421565b117d324d2098f9e666a0bccf545bfb4eb0cc6bdc1b6e" + name = "github.com/kr/pretty" + packages = ["."] + pruneopts = "UT" + revision = "cfb55aafdaf3ec08f0db22699ab822c50091b1c4" + +[[projects]] + digest = "1:d249ec295344198f1d3cb3534dc03204a83f7321b99fa604284e4eec1792ddf5" + name = "github.com/kr/text" + packages = ["."] + pruneopts = "UT" + revision = "7cafcd837844e784b526369c9bce262804aebc60" + +[[projects]] + digest = "1:246ab598a22ea9d50f46e65f655f78161a19822f6597268b02f04af998684807" + name = "github.com/montanaflynn/stats" + packages = ["."] + pruneopts = "UT" + revision = "1bf9dbcd8cbe1fdb75add3785b1d4a9a646269ab" + version = "0.3.0" + +[[projects]] + digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe" + name = "github.com/pmezard/go-difflib" + packages = ["difflib"] + pruneopts = "UT" + revision = "792786c7400a136282c1664665ae0a8db921c6c2" + version = "v1.0.0" + +[[projects]] + digest = "1:38bd78a1b1c018fd71f17abcfe4b5fde0c0240b96c5b107ede13073081997de5" + name = "github.com/stretchr/testify" + packages = [ + "assert", + "require", + ] + pruneopts = "UT" + revision = "4d4bfba8f1d1027c4fdbe371823030df51419987" + +[[projects]] + branch = "master" + digest = "1:88611438dc87bda5862e2d12d8e901bac8ebfd4a9034a896d2f69835360439a7" + name = "github.com/tidwall/pretty" + packages = ["."] + pruneopts = "UT" + revision = "65a9db5fad5105a89e17f38adcc9878685be6d78" + +[[projects]] + digest = "1:8e214e348491ae6a1a79c5bb728ed4eca7f1a2892cd66ad11f46558d684333f4" + name = "github.com/xdg/scram" + packages = ["."] + pruneopts = "UT" + revision = "b32d4bd2c91c5a4f0ea2a230da4350051b5fb5b0" + +[[projects]] + branch = "master" + digest = "1:f5c1d04bc09c644c592b45b9f0bad4030521b1a7d11c7dadbb272d9439fa6e8e" + name = "github.com/xdg/stringprep" + packages = ["."] + pruneopts = "UT" + revision = "73f8eece6fdcd902c185bf651de50f3828bed5ed" + +[[projects]] + digest = "1:6cd78d698d1964891f60e982752a323f1499374c9940ef00d84724ab4a335129" + name = "golang.org/x/crypto" + packages = ["pbkdf2"] + pruneopts = "UT" + revision = "5f31782cfb2b6373211f8f9fbf31283fa234b570" + +[[projects]] + digest = "1:76ee51c3f468493aff39dbacc401e8831fbb765104cbf613b89bef01cf4bad70" + name = "golang.org/x/net" + packages = ["context"] + pruneopts = "UT" + revision = "22ae77b79946ea320088417e4d50825671d82d57" + +[[projects]] + digest = "1:e0140c0c868c6e0f01c0380865194592c011fe521d6e12d78bfd33e756fe018a" + name = "golang.org/x/sync" + packages = ["semaphore"] + pruneopts = "UT" + revision = "fd80eb99c8f653c847d294a001bdf2a3a6f768f5" + +[[projects]] + digest = "1:52391a559cfbc9a48ecc35e15b2b4d742eb8cea1e8a759ee2f80b71a91db232c" + name = "golang.org/x/text" + packages = [ + "internal/gen", + "internal/triegen", + "internal/ucd", + "transform", + "unicode/cldr", + "unicode/norm", + ] + pruneopts = "UT" + revision = "0605a8320aceb4207a5fb3521281e17ec2075476" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + input-imports = [ + "github.com/go-stack/stack", + "github.com/golang/snappy", + "github.com/google/go-cmp/cmp", + "github.com/kr/pretty", + "github.com/montanaflynn/stats", + "github.com/stretchr/testify/assert", + "github.com/stretchr/testify/require", + "github.com/tidwall/pretty", + "github.com/xdg/scram", + "github.com/xdg/stringprep", + "golang.org/x/sync/semaphore", + ] + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/vendor/go.mongodb.org/mongo-driver/Gopkg.toml b/vendor/go.mongodb.org/mongo-driver/Gopkg.toml new file mode 100644 index 0000000..9edd17c --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/Gopkg.toml @@ -0,0 +1,54 @@ +# Gopkg.toml example +# +# Refer to https://golang.github.io/dep/docs/Gopkg.toml.html +# for detailed Gopkg.toml documentation. +# +# required = ["github.com/user/thing/cmd/thing"] +# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] +# +# [[constraint]] +# name = "github.com/user/project" +# version = "1.0.0" +# +# [[constraint]] +# name = "github.com/user/project2" +# branch = "dev" +# source = "github.com/myfork/project2" +# +# [[override]] +# name = "github.com/x/y" +# version = "2.4.0" +# +# [prune] +# non-go = false +# go-tests = true +# unused-packages = true + + +[[constraint]] + name = "github.com/go-stack/stack" + version = "1.7.0" + +[[constraint]] + branch = "master" + name = "github.com/golang/snappy" + +[[constraint]] + name = "github.com/google/go-cmp" + version = "0.2.0" + +[[constraint]] + name = "github.com/montanaflynn/stats" + version = "0.3.0" + +[[constraint]] + branch = "master" + name = "github.com/tidwall/pretty" + +[[constraint]] + branch = "master" + name = "github.com/xdg/stringprep" + +[prune] + go-tests = true + unused-packages = true diff --git a/vendor/go.mongodb.org/mongo-driver/LICENSE b/vendor/go.mongodb.org/mongo-driver/LICENSE new file mode 100644 index 0000000..261eeb9 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.mongodb.org/mongo-driver/Makefile b/vendor/go.mongodb.org/mongo-driver/Makefile new file mode 100644 index 0000000..0ac47ee --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/Makefile @@ -0,0 +1,128 @@ +BSON_PKGS = $(shell etc/list_pkgs.sh ./bson) +BSON_TEST_PKGS = $(shell etc/list_test_pkgs.sh ./bson) +MONGO_PKGS = $(shell etc/list_pkgs.sh ./mongo) +MONGO_TEST_PKGS = $(shell etc/list_test_pkgs.sh ./mongo) +UNSTABLE_PKGS = $(shell etc/list_pkgs.sh ./x) +UNSTABLE_TEST_PKGS = $(shell etc/list_test_pkgs.sh ./x) +TAG_PKG = $(shell etc/list_pkgs.sh ./tag) +TAG_TEST_PKG = $(shell etc/list_test_pkgs.sh ./tag) +EXAMPLES_PKGS = $(shell etc/list_pkgs.sh ./examples) +EXAMPLES_TEST_PKGS = $(shell etc/list_test_pkgs.sh ./examples) +PKGS = $(BSON_PKGS) $(MONGO_PKGS) $(UNSTABLE_PKGS) $(TAG_PKG) $(EXAMPLES_PKGS) +TEST_PKGS = $(BSON_TEST_PKGS) $(MONGO_TEST_PKGS) $(UNSTABLE_TEST_PKGS) $(TAG_PKG) $(EXAMPLES_TEST_PKGS) + +TEST_TIMEOUT = 600 + +.PHONY: default +default: check-env check-fmt vet build-examples lint errcheck test-cover test-race + +.PHONY: check-env +check-env: + etc/check_env.sh + +.PHONY: doc +doc: + godoc -http=:6060 -index + +.PHONY: build-examples +build-examples: + go build $(BUILD_TAGS) ./examples/... ./x/network/examples/... + +.PHONY: build +build: + go build $(filter-out ./core/auth/internal/gssapi,$(PKGS)) + +.PHONY: check-fmt +check-fmt: + @gofmt -l -s $(PKGS) | read; if [ $$? == 0 ]; then echo "gofmt check failed for:"; gofmt -l -s $(PKGS) | sed -e 's/^/ - /'; exit 1; fi + +.PHONY: fmt +fmt: + gofmt -l -s -w $(PKGS) + +.PHONY: lint +lint: + golint $(PKGS) | ./etc/lintscreen.pl .lint-whitelist + +.PHONY: lint-add-whitelist +lint-add-whitelist: + golint $(PKGS) | ./etc/lintscreen.pl -u .lint-whitelist + sort .lint-whitelist -o .lint-whitelist + +.PHONY: errcheck +errcheck: + errcheck -exclude .errcheck-excludes ./bson/... ./mongo/... ./x/... + +.PHONY: test +test: + go test $(BUILD_TAGS) -timeout $(TEST_TIMEOUT)s $(TEST_PKGS) + +.PHONY: test-cover +test-cover: + go test $(BUILD_TAGS) -timeout $(TEST_TIMEOUT)s -cover $(COVER_ARGS) $(TEST_PKGS) + +.PHONY: test-race +test-race: + go test $(BUILD_TAGS) -timeout $(TEST_TIMEOUT)s -race $(TEST_PKGS) + +.PHONY: test-short +test-short: + go test $(BUILD_TAGS) -timeout $(TEST_TIMEOUT)s -short $(TEST_PKGS) + +.PHONY: update-bson-corpus-tests +update-bson-corpus-tests: + etc/update-spec-tests.sh bson-corpus + +.PHONY: update-connection-string-tests +update-connection-string-tests: + etc/update-spec-tests.sh connection-string + +.PHONY: update-crud-tests +update-crud-tests: + etc/update-spec-tests.sh crud + +.PHONY: update-initial-dns-seedlist-discovery-tests +update-initial-dns-seedlist-discovery-tests: + etc/update-spec-tests.sh initial-dns-seedlist-discovery + +.PHONY: update-max-staleness-tests +update-max-staleness-tests: + etc/update-spec-tests.sh max-staleness + +.PHONY: update-server-discovery-and-monitoring-tests +update-server-discovery-and-monitoring-tests: + etc/update-spec-tests.sh server-discovery-and-monitoring + +.PHONY: update-server-selection-tests +update-server-selection-tests: + etc/update-spec-tests.sh server-selection + +.PHONY: update-notices +update-notices: + etc/generate-notices.pl > THIRD-PARTY-NOTICES + +.PHONY: vet +vet: + go vet -cgocall=false -composites=false -unusedstringmethods="Error" $(PKGS) + + +# Evergreen specific targets +.PHONY: evg-test +evg-test: + go test $(BUILD_TAGS) -v -timeout $(TEST_TIMEOUT)s $(TEST_PKGS) > test.suite + +.PHONY: evg-test-auth +evg-test-auth: + go run -tags gssapi ./x/network/examples/count/main.go -uri $(MONGODB_URI) + +# benchmark specific targets and support +perf:driver-test-data.tar.gz + tar -zxf $< $(if $(eq $(UNAME_S),Darwin),-s , --transform=s)/data/perf/ + @touch $@ +driver-test-data.tar.gz: + curl --retry 5 "https://s3.amazonaws.com/boxes.10gen.com/build/driver-test-data.tar.gz" -o driver-test-data.tar.gz --silent --max-time 120 +benchmark:perf + go test $(BUILD_TAGS) -benchmem -bench=. ./benchmark +driver-benchmark:perf + @go run cmd/godriver-benchmark/main.go | tee perf.suite +.PHONY:benchmark driver-benchmark diff --git a/vendor/go.mongodb.org/mongo-driver/README.md b/vendor/go.mongodb.org/mongo-driver/README.md new file mode 100644 index 0000000..7740853 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/README.md @@ -0,0 +1,201 @@ +

+

+ + GoDoc + GoDoc + +

+ +# MongoDB Go Driver + +The MongoDB supported driver for Go. + +------------------------- +- [Requirements](#requirements) +- [Installation](#installation) +- [Usage](#usage) +- [Bugs/Feature Reporting](#bugs-feature-reporting) +- [Testing / Development](#testing--development) +- [Continuous Integration](#continuous-integration) +- [License](#license) + +------------------------- +## Requirements + +- Go 1.10 or higher. We aim to support the latest supported versions of go. +- MongoDB 2.6 and higher. + +------------------------- +## Installation + +The recommended way to get started using the MongoDB Go driver is by using `dep` to install the dependency in your project. + +```bash +dep ensure -add "go.mongodb.org/mongo-driver/mongo@~1.0.3" +``` + +------------------------- +## Usage + +To get started with the driver, import the `mongo` package, create a `mongo.Client`: + +```go +import "go.mongodb.org/mongo-driver/mongo" + +client, err := mongo.NewClient(options.Client().ApplyURI("mongodb://localhost:27017")) +``` + +And connect it to your running MongoDB server: + +```go +ctx, _ := context.WithTimeout(context.Background(), 10*time.Second) +err = client.Connect(ctx) +``` + +To do this in a single step, you can use the `Connect` function: + +```go +ctx, _ := context.WithTimeout(context.Background(), 10*time.Second) +client, err := mongo.Connect(ctx, options.Client().ApplyURI("mongodb://localhost:27017")) +``` + +Calling `Connect` does not block for server discovery. If you wish to know if a MongoDB server has been found and connected to, +use the `Ping` method: + +```go +ctx, _ = context.WithTimeout(context.Background(), 2*time.Second) +err = client.Ping(ctx, readpref.Primary()) +``` + +To insert a document into a collection, first retrieve a `Database` and then `Collection` instance from the `Client`: + +```go +collection := client.Database("testing").Collection("numbers") +``` + +The `Collection` instance can then be used to insert documents: + +```go +ctx, _ = context.WithTimeout(context.Background(), 5*time.Second) +res, err := collection.InsertOne(ctx, bson.M{"name": "pi", "value": 3.14159}) +id := res.InsertedID +``` + +Several query methods return a cursor, which can be used like this: + +```go +ctx, _ = context.WithTimeout(context.Background(), 30*time.Second) +cur, err := collection.Find(ctx, bson.D{}) +if err != nil { log.Fatal(err) } +defer cur.Close(ctx) +for cur.Next(ctx) { + var result bson.M + err := cur.Decode(&result) + if err != nil { log.Fatal(err) } + // do something with result.... +} +if err := cur.Err(); err != nil { + log.Fatal(err) +} +``` + +For methods that return a single item, a `SingleResult` instance is returned: + +```go +var result struct { + Value float64 +} +filter := bson.M{"name": "pi"} +ctx, _ = context.WithTimeout(context.Background(), 5*time.Second) +err = collection.FindOne(ctx, filter).Decode(&result) +if err != nil { + log.Fatal(err) +} +// Do something with result... +``` + +Additional examples and documentation can be found under the examples directory and [on the MongoDB Documentation website](https://docs.mongodb.com/ecosystem/drivers/go/). + +------------------------- +## Bugs / Feature Reporting + +New Features and bugs can be reported on jira: https://jira.mongodb.org/browse/GODRIVER + +------------------------- +## Testing / Development + +The driver tests can be run against several database configurations. The most simple configuration is a standalone mongod with no auth, no ssl, and no compression. To run these basic driver tests, make sure a standalone MongoDB server instance is running at localhost:27017. To run the tests, you can run `make` (on Windows, run `nmake`) with the following: + +``` +TOPOLOGY=server make +``` + +The `TOPOLOGY`variable must be set to run tests. This will run coverage, run go-lint, run go-vet, and build the examples. + +### Testing Different Topologies + +To test a **replica set**, set `MONGODB_URI=""` and `TOPOLOGY=replica_set` for the `make` command. For example, for a local replica set named `rs1` comprised of three nodes on ports 27017, 27018, and 27019: + +``` +MONGODB_URI="mongodb://localhost:27017,localhost:27018,localhost:27018/?replicaSet=rs1" TOPOLOGY=replica_set make +``` + +To test a **sharded cluster**, set `MONGODB_URI=""` and `TOPOLOGY=sharded_cluster` variables for the `make` command. For example, for a sharded cluster with a single mongos on port 27017: + +``` +MONGODB_URI="mongodb://localhost:27017/" TOPOLOGY=sharder_cluster make +``` + +### Testing Auth and SSL + +To test authentication and SSL, first set up a MongoDB cluster with auth and SSL configured. Testing authentication requires a user with the `root` role on the `admin` database. The Go Driver repository comes with example certificates in the `data/certificates` directory. These certs can be used for testing. Here is an example command that would run a mongod with SSL correctly configured for tests: + +``` +mongod \ +--auth \ +--sslMode requireSSL \ +--sslPEMKeyFile $(pwd)/data/certificates/server.pem \ +--sslCAFile $(pwd)/data/certificates/ca.pem \ +--sslWeakCertificateValidation +``` + +To run the tests with `make`, set `MONGO_GO_DRIVER_CA_FILE` to the location of the CA file used by the database, set `MONGODB_URI` to the connection string of the server, set `AUTH=auth`, and set `SSL=ssl`. For example: + +``` +AUTH=auth SSL=ssl MONGO_GO_DRIVER_CA_FILE=$(pwd)/data/certificates/ca.pem MONGODB_URI="mongodb://user:password@localhost:27017/?authSource=admin" make +``` + +Notes: +- The `--sslWeakCertificateValidation` flag is required on the server for the test suite to work correctly. +- The test suite requires the auth database to be set with `?authSource=admin`, not `/admin`. + +### Testing Compression + +The MongoDB Go Driver supports wire protocol compression using Snappy or zLib. To run tests with wire protocol compression, set `MONGO_GO_DRIVER_COMPRESSOR` to `snappy` or `zlib`. For example: + +``` +MONGO_GO_DRIVER_COMPRESSOR=snappy make +``` + +Ensure the [`--networkMessageCompressors` flag](https://docs.mongodb.com/manual/reference/program/mongod/#cmdoption-mongod-networkmessagecompressors) on mongod or mongos includes `zlib` if testing zLib compression. + +------------------------- +## Feedback + +The MongoDB Go Driver is not feature complete, so any help is appreciated. Check out the [project page](https://jira.mongodb.org/browse/GODRIVER) +for tickets that need completing. See our [contribution guidelines](CONTRIBUTING.md) for details. + +------------------------- +## Continuous Integration + +Commits to master are run automatically on [evergreen](https://evergreen.mongodb.com/waterfall/mongo-go-driver). + +------------------------- +## Thanks and Acknowledgement + +@ashleymcnamara - Mongo Gopher Artwork + +------------------------- +## License + +The MongoDB Go Driver is licensed under the [Apache License](LICENSE). diff --git a/vendor/go.mongodb.org/mongo-driver/THIRD-PARTY-NOTICES b/vendor/go.mongodb.org/mongo-driver/THIRD-PARTY-NOTICES new file mode 100644 index 0000000..6e6cd4b --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/THIRD-PARTY-NOTICES @@ -0,0 +1,445 @@ +--------------------------------------------------------------------- +License notice for gopkg.in/mgo.v2/bson +--------------------------------------------------------------------- + +BSON library for Go + +Copyright (c) 2010-2013 - Gustavo Niemeyer + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +--------------------------------------------------------------------- +License notice for JSON and CSV code from github.com/golang/go +--------------------------------------------------------------------- + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +---------------------------------------------------------------------- +License notice for github.com/buger/jsonparser +---------------------------------------------------------------------- + +MIT License + +Copyright (c) 2016 Leonid Bugaev + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +---------------------------------------------------------------------- +License notice for github.com/davecgh/go-spew +---------------------------------------------------------------------- + +ISC License + +Copyright (c) 2012-2016 Dave Collins + +Permission to use, copy, modify, and distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +---------------------------------------------------------------------- +License notice for github.com/golang/snappy +---------------------------------------------------------------------- + +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +---------------------------------------------------------------------- +License notice for github.com/google/go-cmp +---------------------------------------------------------------------- + +Copyright (c) 2017 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +---------------------------------------------------------------------- +License notice for github.com/kr/pretty +---------------------------------------------------------------------- + +The MIT License (MIT) + +Copyright 2012 Keith Rarick + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +---------------------------------------------------------------------- +License notice for github.com/kr/text +---------------------------------------------------------------------- + +Copyright 2012 Keith Rarick + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +---------------------------------------------------------------------- +License notice for github.com/montanaflynn/stats +---------------------------------------------------------------------- + +The MIT License (MIT) + +Copyright (c) 2014-2015 Montana Flynn (https://anonfunction.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +---------------------------------------------------------------------- +License notice for github.com/pmezard/go-difflib +---------------------------------------------------------------------- + +Copyright (c) 2013, Patrick Mezard +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + The names of its contributors may not be used to endorse or promote +products derived from this software without specific prior written +permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +---------------------------------------------------------------------- +License notice for github.com/stretchr/testify +---------------------------------------------------------------------- + +Copyright (c) 2012 - 2013 Mat Ryer and Tyler Bunnell + +Please consider promoting this project if you find it useful. + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without restriction, +including without limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of the Software, +and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT +OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE +OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +---------------------------------------------------------------------- +License notice for github.com/tidwall/pretty +---------------------------------------------------------------------- + +The MIT License (MIT) + +Copyright (c) 2017 Josh Baker + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +---------------------------------------------------------------------- +License notice for golang.org/x/crypto +---------------------------------------------------------------------- + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +---------------------------------------------------------------------- +License notice for golang.org/x/net +---------------------------------------------------------------------- + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +---------------------------------------------------------------------- +License notice for golang.org/x/sync +---------------------------------------------------------------------- + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +---------------------------------------------------------------------- +License notice for gopkg.in/yaml.v2 +---------------------------------------------------------------------- + +Copyright 2011-2016 Canonical Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/go.mongodb.org/mongo-driver/benchmark/bson.go b/vendor/go.mongodb.org/mongo-driver/benchmark/bson.go new file mode 100644 index 0000000..e5de5fb --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/benchmark/bson.go @@ -0,0 +1,75 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package benchmark + +import ( + "errors" + "io/ioutil" + "path/filepath" + + "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/x/bsonx" +) + +const ( + perfDataDir = "perf" + bsonDataDir = "extended_bson" + flatBSONData = "flat_bson.json" + deepBSONData = "deep_bson.json" + fullBSONData = "full_bson.json" +) + +// utility functions for the bson benchmarks + +func loadSourceDocument(pathParts ...string) (bsonx.Doc, error) { + data, err := ioutil.ReadFile(filepath.Join(pathParts...)) + if err != nil { + return nil, err + } + doc := bsonx.Doc{} + err = bson.UnmarshalExtJSON(data, true, &doc) + if err != nil { + return nil, err + } + + if len(doc) == 0 { + return nil, errors.New("empty bson document") + } + + return doc, nil +} + +func loadSourceRaw(pathParts ...string) (bson.Raw, error) { + doc, err := loadSourceDocument(pathParts...) + if err != nil { + return nil, err + } + raw, err := doc.MarshalBSON() + if err != nil { + return nil, err + } + + return bson.Raw(raw), nil +} + +func loadSourceD(pathParts ...string) (bson.D, error) { + data, err := ioutil.ReadFile(filepath.Join(pathParts...)) + if err != nil { + return nil, err + } + doc := bson.D{} + err = bson.UnmarshalExtJSON(data, true, &doc) + if err != nil { + return nil, err + } + + if len(doc) == 0 { + return nil, errors.New("empty bson document") + } + + return doc, nil +} diff --git a/vendor/go.mongodb.org/mongo-driver/benchmark/bson_document.go b/vendor/go.mongodb.org/mongo-driver/benchmark/bson_document.go new file mode 100644 index 0000000..02c1842 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/benchmark/bson_document.go @@ -0,0 +1,123 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package benchmark + +import ( + "context" + "errors" + + "go.mongodb.org/mongo-driver/x/bsonx" +) + +func bsonDocumentEncoding(ctx context.Context, tm TimerManager, iters int, source string) error { + doc, err := loadSourceDocument(getProjectRoot(), perfDataDir, bsonDataDir, source) + if err != nil { + return err + } + + tm.ResetTimer() + + for i := 0; i < iters; i++ { + out, err := doc.MarshalBSON() + if err != nil { + return err + } + if len(out) == 0 { + return errors.New("marshaling error") + } + } + + return nil +} + +func bsonDocumentDecodingLazy(ctx context.Context, tm TimerManager, iters int, source string) error { + doc, err := loadSourceDocument(getProjectRoot(), perfDataDir, bsonDataDir, source) + if err != nil { + return err + } + + raw, err := doc.MarshalBSON() + if err != nil { + return err + } + + tm.ResetTimer() + + for i := 0; i < iters; i++ { + out, err := bsonx.ReadDoc(raw) + if err != nil { + return err + } + if len(out) == 0 { + return errors.New("marshaling error") + } + } + return nil +} + +func bsonDocumentDecoding(ctx context.Context, tm TimerManager, iters, numKeys int, source string) error { + doc, err := loadSourceDocument(getProjectRoot(), perfDataDir, bsonDataDir, source) + if err != nil { + return err + } + + raw, err := doc.MarshalBSON() + if err != nil { + return err + } + + tm.ResetTimer() + + for i := 0; i < iters; i++ { + out, err := bsonx.ReadDoc(raw) + if err != nil { + return err + } + + if len(out) != numKeys { + return errors.New("document parsing error") + } + } + return nil + +} + +func BSONFlatDocumentEncoding(ctx context.Context, tm TimerManager, iters int) error { + return bsonDocumentEncoding(ctx, tm, iters, flatBSONData) +} + +func BSONFlatDocumentDecodingLazy(ctx context.Context, tm TimerManager, iters int) error { + return bsonDocumentDecodingLazy(ctx, tm, iters, flatBSONData) +} + +func BSONFlatDocumentDecoding(ctx context.Context, tm TimerManager, iters int) error { + return bsonDocumentDecoding(ctx, tm, iters, 145, flatBSONData) +} + +func BSONDeepDocumentEncoding(ctx context.Context, tm TimerManager, iters int) error { + return bsonDocumentEncoding(ctx, tm, iters, deepBSONData) +} + +func BSONDeepDocumentDecodingLazy(ctx context.Context, tm TimerManager, iters int) error { + return bsonDocumentDecodingLazy(ctx, tm, iters, deepBSONData) +} + +func BSONDeepDocumentDecoding(ctx context.Context, tm TimerManager, iters int) error { + return bsonDocumentDecoding(ctx, tm, iters, 126, deepBSONData) +} + +func BSONFullDocumentEncoding(ctx context.Context, tm TimerManager, iters int) error { + return bsonDocumentEncoding(ctx, tm, iters, fullBSONData) +} + +func BSONFullDocumentDecodingLazy(ctx context.Context, tm TimerManager, iters int) error { + return bsonDocumentDecodingLazy(ctx, tm, iters, fullBSONData) +} + +func BSONFullDocumentDecoding(ctx context.Context, tm TimerManager, iters int) error { + return bsonDocumentDecoding(ctx, tm, iters, 145, fullBSONData) +} diff --git a/vendor/go.mongodb.org/mongo-driver/benchmark/bson_map.go b/vendor/go.mongodb.org/mongo-driver/benchmark/bson_map.go new file mode 100644 index 0000000..164827e --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/benchmark/bson_map.go @@ -0,0 +1,88 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package benchmark + +import ( + "context" + "errors" + "fmt" + + "go.mongodb.org/mongo-driver/bson" +) + +func bsonMapDecoding(ctx context.Context, tm TimerManager, iters int, dataSet string) error { + r, err := loadSourceRaw(getProjectRoot(), perfDataDir, bsonDataDir, dataSet) + if err != nil { + return err + } + + tm.ResetTimer() + + for i := 0; i < iters; i++ { + out := make(map[string]interface{}) + err := bson.Unmarshal(r, &out) + if err != nil { + return nil + } + if len(out) == 0 { + return fmt.Errorf("decoding failed") + } + } + return nil +} + +func bsonMapEncoding(ctx context.Context, tm TimerManager, iters int, dataSet string) error { + r, err := loadSourceRaw(getProjectRoot(), perfDataDir, bsonDataDir, dataSet) + if err != nil { + return err + } + + doc := make(map[string]interface{}) + err = bson.Unmarshal(r, &doc) + if err != nil { + return err + } + + var buf []byte + tm.ResetTimer() + for i := 0; i < iters; i++ { + buf, err = bson.MarshalAppend(buf[:0], doc) + if err != nil { + return nil + } + + if len(buf) == 0 { + return errors.New("encoding failed") + } + } + + return nil +} + +func BSONFlatMapDecoding(ctx context.Context, tm TimerManager, iters int) error { + return bsonMapDecoding(ctx, tm, iters, flatBSONData) +} + +func BSONFlatMapEncoding(ctx context.Context, tm TimerManager, iters int) error { + return bsonMapEncoding(ctx, tm, iters, flatBSONData) +} + +func BSONDeepMapDecoding(ctx context.Context, tm TimerManager, iters int) error { + return bsonMapDecoding(ctx, tm, iters, deepBSONData) +} + +func BSONDeepMapEncoding(ctx context.Context, tm TimerManager, iters int) error { + return bsonMapEncoding(ctx, tm, iters, deepBSONData) +} + +func BSONFullMapDecoding(ctx context.Context, tm TimerManager, iters int) error { + return bsonMapDecoding(ctx, tm, iters, fullBSONData) +} + +func BSONFullMapEncoding(ctx context.Context, tm TimerManager, iters int) error { + return bsonMapEncoding(ctx, tm, iters, fullBSONData) +} diff --git a/vendor/go.mongodb.org/mongo-driver/benchmark/bson_struct.go b/vendor/go.mongodb.org/mongo-driver/benchmark/bson_struct.go new file mode 100644 index 0000000..12ec493 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/benchmark/bson_struct.go @@ -0,0 +1,103 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package benchmark + +import ( + "context" + "errors" + + "go.mongodb.org/mongo-driver/bson" +) + +func BSONFlatStructDecoding(ctx context.Context, tm TimerManager, iters int) error { + r, err := loadSourceRaw(getProjectRoot(), perfDataDir, bsonDataDir, flatBSONData) + if err != nil { + return err + } + + tm.ResetTimer() + + for i := 0; i < iters; i++ { + out := flatBSON{} + err := bson.Unmarshal(r, &out) + if err != nil { + return err + } + } + return nil +} + +func BSONFlatStructEncoding(ctx context.Context, tm TimerManager, iters int) error { + r, err := loadSourceRaw(getProjectRoot(), perfDataDir, bsonDataDir, flatBSONData) + if err != nil { + return err + } + + doc := flatBSON{} + err = bson.Unmarshal(r, &doc) + if err != nil { + return err + } + + var buf []byte + + tm.ResetTimer() + for i := 0; i < iters; i++ { + buf, err = bson.Marshal(doc) + if err != nil { + return err + } + if len(buf) == 0 { + return errors.New("encoding failed") + } + } + return nil +} + +func BSONFlatStructTagsEncoding(ctx context.Context, tm TimerManager, iters int) error { + r, err := loadSourceRaw(getProjectRoot(), perfDataDir, bsonDataDir, flatBSONData) + if err != nil { + return err + } + + doc := flatBSONTags{} + err = bson.Unmarshal(r, &doc) + if err != nil { + return err + } + + var buf []byte + + tm.ResetTimer() + for i := 0; i < iters; i++ { + buf, err = bson.MarshalAppend(buf[:0], doc) + if err != nil { + return err + } + if len(buf) == 0 { + return errors.New("encoding failed") + } + } + return nil +} + +func BSONFlatStructTagsDecoding(ctx context.Context, tm TimerManager, iters int) error { + r, err := loadSourceRaw(getProjectRoot(), perfDataDir, bsonDataDir, flatBSONData) + if err != nil { + return err + } + + tm.ResetTimer() + for i := 0; i < iters; i++ { + out := flatBSONTags{} + err := bson.Unmarshal(r, &out) + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/go.mongodb.org/mongo-driver/benchmark/bson_test.go b/vendor/go.mongodb.org/mongo-driver/benchmark/bson_test.go new file mode 100644 index 0000000..db05dd8 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/benchmark/bson_test.go @@ -0,0 +1,35 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package benchmark + +import "testing" + +// func BenchmarkBSONFullReaderDecoding(b *testing.B) { WrapCase(BSONFullReaderDecoding)(b) } + +func BenchmarkBSONFlatDocumentEncoding(b *testing.B) { WrapCase(BSONFlatDocumentEncoding)(b) } +func BenchmarkBSONFlatDocumentDecodingLazy(b *testing.B) { WrapCase(BSONFlatDocumentDecodingLazy)(b) } +func BenchmarkBSONFlatDocumentDecoding(b *testing.B) { WrapCase(BSONFlatDocumentDecoding)(b) } +func BenchmarkBSONDeepDocumentEncoding(b *testing.B) { WrapCase(BSONDeepDocumentEncoding)(b) } +func BenchmarkBSONDeepDocumentDecodingLazy(b *testing.B) { WrapCase(BSONDeepDocumentDecodingLazy)(b) } +func BenchmarkBSONDeepDocumentDecoding(b *testing.B) { WrapCase(BSONDeepDocumentDecoding)(b) } + +// func BenchmarkBSONFullDocumentEncoding(b *testing.B) { WrapCase(BSONFullDocumentEncoding)(b) } +// func BenchmarkBSONFullDocumentDecodingLazy(b *testing.B) { WrapCase(BSONFullDocumentDecodingLazy)(b) } +// func BenchmarkBSONFullDocumentDecoding(b *testing.B) { WrapCase(BSONFullDocumentDecoding)(b) } + +func BenchmarkBSONFlatMapDecoding(b *testing.B) { WrapCase(BSONFlatMapDecoding)(b) } +func BenchmarkBSONFlatMapEncoding(b *testing.B) { WrapCase(BSONFlatMapEncoding)(b) } +func BenchmarkBSONDeepMapDecoding(b *testing.B) { WrapCase(BSONDeepMapDecoding)(b) } +func BenchmarkBSONDeepMapEncoding(b *testing.B) { WrapCase(BSONDeepMapEncoding)(b) } + +// func BenchmarkBSONFullMapDecoding(b *testing.B) { WrapCase(BSONFullMapDecoding)(b) } +// func BenchmarkBSONFullMapEncoding(b *testing.B) { WrapCase(BSONFullMapEncoding)(b) } + +func BenchmarkBSONFlatStructDecoding(b *testing.B) { WrapCase(BSONFlatStructDecoding)(b) } +func BenchmarkBSONFlatStructTagsDecoding(b *testing.B) { WrapCase(BSONFlatStructTagsDecoding)(b) } +func BenchmarkBSONFlatStructEncoding(b *testing.B) { WrapCase(BSONFlatStructEncoding)(b) } +func BenchmarkBSONFlatStructTagsEncoding(b *testing.B) { WrapCase(BSONFlatStructTagsEncoding)(b) } diff --git a/vendor/go.mongodb.org/mongo-driver/benchmark/bson_types.go b/vendor/go.mongodb.org/mongo-driver/benchmark/bson_types.go new file mode 100644 index 0000000..9d46cbc --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/benchmark/bson_types.go @@ -0,0 +1,306 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package benchmark + +import "go.mongodb.org/mongo-driver/bson/primitive" + +type flatBSONTags struct { + ID primitive.ObjectID `bson:"_id"` + + AA int64 `bson:"AAgSNVyBb"` + AI bool `bson:"aicoMxZq"` + AM int64 `bson:"AMQrGQmu"` + Ag int `bson:"AgYYbYPr"` + Ah int64 `bson:"ahFCBmqT"` + At int64 `bson:"AtWNZJXa"` + BB string `bson:"BBqZInWV"` + BK int64 `bson:"bkuaZWRT"` + Bw int `bson:"BwTXiovJ"` + CD int `bson:"CDIGOuIZ"` + CEA string `bson:"CEtYKsdd"` + CEB string `bson:"cepcgozk"` + CF int `bson:"CFujXoob"` + CV int64 `bson:"cVjWCrlu"` + CX string `bson:"cxOHMeDJ"` + CY string `bson:"CYhSCkWB"` + Cq string `bson:"CqCssWxW"` + DC int `bson:"dCLfYqqM"` + DDA int `bson:"ddPdLgGg"` + DDB int `bson:"ddVenEkK"` + DH string `bson:"dHsYhRbV"` + DJ int `bson:"DJsnHZIC"` + DN string `bson:"dNSuxlSU"` + DO int64 `bson:"doshbrpF"` + DP string `bson:"dpbwfSRb"` + DQ int64 `bson:"DQBQcQFj"` + DT string `bson:"dtywOLeD"` + DV int `bson:"dVkWIafN"` + EG bool `bson:"egxZaSsw"` + ER string `bson:"eRTIdIJR"` + FD int64 `bson:"FDYGeSiR"` + FE string `bson:"fEheUtop"` + Fp bool `bson:"FpduyhQP"` + GE string `bson:"gErhgZTh"` + GY int `bson:"gySFZeAE"` + Gi uint `bson:"GiAHzFII"` + HN string `bson:"hnVgYIQi"` + HQA int `bson:"HQeCoswW"` + HQB int `bson:"HQiykral"` + HV int64 `bson:"HVHyetUM"` + HW int `bson:"hwHOTmmW"` + Hi bool `bson:"HicJbMpj"` + Hr int `bson:"HrUPbFHD"` + IF string `bson:"iFFGfTXc"` + IJ int `bson:"ijwXMKqI"` + IW int `bson:"iwfbMdcv"` + Ib string `bson:"Ibrdrtgg"` + Is bool `bson:"IsorvnMR"` + JB string `bson:"jbUymqiB"` + JM string `bson:"jmglLvAS"` + JW int `bson:"jWaFvVAz"` + JX int `bson:"JXMyYkfb"` + Jh bool `bson:"JhImQOkw"` + Jr string `bson:"JrJzKiIx"` + Jz int `bson:"JzgaUWVG"` + KF bool `bson:"kfvcFmKw"` + KM int64 `bson:"KMKBtlov"` + Kn string `bson:"KnhgtAOJ"` + Ky int `bson:"KyxOoCqS"` + LU string `bson:"LUPqMOHS"` + LV bool `bson:"LVNIFCYm"` + Ln int `bson:"LngvlnTV"` + ML int `bson:"mlfZVfVT"` + MN bool `bson:"MNuWZMLP"` + MX int `bson:"MXMxLVBk"` + Mc string `bson:"McpOBmaR"` + Me string `bson:"MeUYSkPS"` + Mq int `bson:"MqfkBZJF"` + NB int `bson:"nBKWWUWk"` + NK int `bson:"nKhiSITP"` + OB int `bson:"obcwwqWZ"` + OC string `bson:"OCsIhHxq"` + OM int `bson:"omnwvBbA"` + OR string `bson:"oRWMNJTE"` + Of string `bson:"OfTmCvDx"` + PA int `bson:"pacTBmxE"` + PF int `bson:"PFZSRHNN"` + PK bool `bson:"pKjOghFa"` + PO int `bson:"pOMEwSod"` + PP string `bson:"pPtPsgRl"` + PQ int `bson:"pQyCJaEd"` + Pj int `bson:"PjKiuWnQ"` + Pv int `bson:"PvfnpsMV"` + QH int `bson:"qHzOMXeT"` + QR bool `bson:"qrJASGzU"` + Qo string `bson:"QobifTeZ"` + RE int64 `bson:"reiKnuza"` + RM string `bson:"rmzUAgmk"` + RP string `bson:"RPsQhgRD"` + Rb uint `bson:"Rbxpznea"` + ReA bool `bson:"RemSsnnR"` + ReB int `bson:"ReOZakjB"` + Rw string `bson:"RwAVVKHM"` + SG bool `bson:"sGWJTAcT"` + SU uint8 `bson:"SUWXijHT"` + SYA int64 `bson:"sYtnozSc"` + SYB string `bson:"SYtZkQbC"` + Sq int64 `bson:"SqNvlUZF"` + TA int `bson:"taoNnQYY"` + TD string `bson:"TDUzNJiH"` + TI string `bson:"tIJEYSYM"` + TR bool `bson:"TRpgnInA"` + Tg int `bson:"TgSwBbgp"` + Tk int64 `bson:"TkXMwZlU"` + Tm int64 `bson:"TmUnYUrv"` + UK int `bson:"UKwbAKGw"` + UM string `bson:"uMDWqLMf"` + Up bool `bson:"UpdMADoN"` + Ut int64 `bson:"UtbwOKLt"` + VC int64 `bson:"VCSKFCoE"` + VK string `bson:"vkEDWgmN"` + VL string `bson:"vlSZaxCV"` + VS string `bson:"vSLTtfDF"` + VVA bool `bson:"vvUeXASH"` + VVB int `bson:"VVvwKVRG"` + Vc bool `bson:"VcCSqSmp"` + Vp int16 `bson:"VplFgewF"` + Vt string `bson:"VtzeOlCT"` + WH bool `bson:"WHSQVLKG"` + WJA bool `bson:"wjfyueDC"` + WJB string `bson:"wjAWaOog"` + WM int64 `bson:"wmDLUkXt"` + WY string `bson:"WYJdGJLu"` + Wm bool `bson:"WmMOvgFc"` + Wo string `bson:"WoFGfdvb"` + XE int `bson:"XEBqaXkB"` + XG bool `bson:"XGxlHrXf"` + XR string `bson:"xrzGnsEK"` + XWA int64 `bson:"xWpeGNjl"` + XWB string `bson:"xWUlYggc"` + XX int64 `bson:"XXKbyIXG"` + XZ int64 `bson:"xZOksssj"` + Xe uint `bson:"XeRkAyCq"` + Xx int `bson:"XxvXmHiQ"` + YD string `bson:"YDHWnEXV"` + YE bool `bson:"yeTUgNrU"` + YK int `bson:"yKfZnGKG"` + YX string `bson:"yXSBbPeT"` + ZD bool `bson:"zDzSGNnW"` + ZE bool `bson:"zEgGhhZf"` + ZM string `bson:"zMCFzcWY"` + ZSA int64 `bson:"zSYvADVf"` + ZSB int64 `bson:"zswQbWEI"` + Zm string `bson:"ZmtEJFSO"` +} + +type flatBSON struct { + AMQrGQmu int64 + AAgSNVyBb int64 + AgYYbYPr int + AtWNZJXa int64 + BBqZInWV string + BwTXiovJ int + CDIGOuIZ int + CEtYKsdd string + CFujXoob int + CYhSCkWB string + CqCssWxW string + DJsnHZIC int + DQBQcQFj int64 + FDYGeSiR int64 + FpduyhQP bool + GiAHzFII uint + HQeCoswW int + HQiykral int + HVHyetUM int64 + HicJbMpj bool + HrUPbFHD int + Ibrdrtgg string + IsorvnMR bool + JXMyYkfb int + JhImQOkw bool + JrJzKiIx string + JzgaUWVG int + KMKBtlov int64 + KnhgtAOJ string + KyxOoCqS int + LUPqMOHS string + LVNIFCYm bool + LngvlnTV int + MNuWZMLP bool + MXMxLVBk int + McpOBmaR string + MeUYSkPS string + MqfkBZJF int + OCsIhHxq string + OfTmCvDx string + PjKiuWnQ int + PvfnpsMV int + QobifTeZ string + RPsQhgRD string + Rbxpznea uint + ReOZakjB int + RemSsnnR bool + RwAVVKHM string + SUWXijHT uint8 + SYtZkQbC string + SqNvlUZF int64 + TDUzNJiH string + TRpgnInA bool + TgSwBbgp int + TkXMwZlU int64 + TmUnYUrv int64 + UKwbAKGw int + UpdMADoN bool + UtbwOKLt int64 + VCSKFCoE int64 + VVvwKVRG int + VcCSqSmp bool + VplFgewF int16 + VtzeOlCT string + WHSQVLKG bool + WYJdGJLu string + WmMOvgFc bool + WoFGfdvb string + XEBqaXkB int + XGxlHrXf bool + XXKbyIXG int64 + XeRkAyCq uint + XxvXmHiQ int + YDHWnEXV string + ZmtEJFSO string + ID primitive.ObjectID `bson:"_id"` + AhFCBmqT int64 + AicoMxZq bool + BkuaZWRT int64 + CVjWCrlu int64 + Cepcgozk string + CxOHMeDJ string + DCLfYqqM int + DHsYhRbV string + DNSuxlSU string + DVkWIafN int + DdPdLgGg int + DdVenEkK int + DoshbrpF int64 + DpbwfSRb string + DtywOLeD string + ERTIdIJR string + EgxZaSsw bool + FEheUtop string + GErhgZTh string + GySFZeAE int + HnVgYIQi string + HwHOTmmW int + IFFGfTXc string + IjwXMKqI int + IwfbMdcv int + JWaFvVAz int + JbUymqiB string + JmglLvAS string + KfvcFmKw bool + MlfZVfVT int + NBKWWUWk int + NKhiSITP int + ORWMNJTE string + ObcwwqWZ int + OmnwvBbA int + PKjOghFa bool + POMEwSod int + PPtPsgRl string + PQyCJaEd int + PacTBmxE int + QHzOMXeT int + QrJASGzU bool + ReiKnuza int64 + RmzUAgmk string + SGWJTAcT bool + SYtnozSc int64 + TIJEYSYM string + TaoNnQYY int + UMDWqLMf string + VSLTtfDF string + VkEDWgmN string + VlSZaxCV string + VvUeXASH bool + WjAWaOog string + WjfyueDC bool + WmDLUkXt int64 + XWUlYggc string + XWpeGNjl int64 + XZOksssj int64 + XrzGnsEK string + YKfZnGKG int + YXSBbPeT string + YeTUgNrU bool + ZDzSGNnW bool + ZEgGhhZf bool + ZMCFzcWY string + ZSYvADVf int64 + ZswQbWEI int64 + PfZSRHnn int +} diff --git a/vendor/go.mongodb.org/mongo-driver/benchmark/canary.go b/vendor/go.mongodb.org/mongo-driver/benchmark/canary.go new file mode 100644 index 0000000..e696123 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/benchmark/canary.go @@ -0,0 +1,29 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package benchmark + +import ( + "context" +) + +func CanaryIncCase(ctx context.Context, tm TimerManager, iters int) error { + var canaryCount int + for i := 0; i < iters; i++ { + canaryCount++ + } + return nil +} + +var globalCanaryCount int + +func GlobalCanaryIncCase(ctx context.Context, tm TimerManager, iters int) error { + for i := 0; i < iters; i++ { + globalCanaryCount++ + } + + return nil +} diff --git a/vendor/go.mongodb.org/mongo-driver/benchmark/canary_test.go b/vendor/go.mongodb.org/mongo-driver/benchmark/canary_test.go new file mode 100644 index 0000000..13de58a --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/benchmark/canary_test.go @@ -0,0 +1,12 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package benchmark + +import "testing" + +func BenchmarkCanaryInc(b *testing.B) { WrapCase(CanaryIncCase)(b) } +func BenchmarkGlobalCanaryInc(b *testing.B) { WrapCase(GlobalCanaryIncCase)(b) } diff --git a/vendor/go.mongodb.org/mongo-driver/benchmark/harness.go b/vendor/go.mongodb.org/mongo-driver/benchmark/harness.go new file mode 100644 index 0000000..5eb8afd --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/benchmark/harness.go @@ -0,0 +1,226 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package benchmark // import "go.mongodb.org/mongo-driver/benchmark" + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +const ( + five = 5 + ten = 2 * five + hundred = ten * ten + thousand = ten * hundred + tenThousand = ten * thousand + hundredThousand = hundred * thousand + million = hundred * hundredThousand + halfMillion = five * hundredThousand + + ExecutionTimeout = five * time.Minute + StandardRuntime = time.Minute + MinimumRuntime = five * time.Second + MinIterations = hundred +) + +type BenchCase func(context.Context, TimerManager, int) error +type BenchFunction func(*testing.B) + +func WrapCase(bench BenchCase) BenchFunction { + name := getName(bench) + return func(b *testing.B) { + ctx := context.Background() + b.ResetTimer() + err := bench(ctx, b, b.N) + require.NoError(b, err, "case='%s'", name) + } +} + +func getAllCases() []*CaseDefinition { + return []*CaseDefinition{ + { + Bench: CanaryIncCase, + Count: million, + Size: -1, + Runtime: MinimumRuntime, + RequiredIterations: ten, + }, + { + Bench: GlobalCanaryIncCase, + Count: million, + Size: -1, + Runtime: MinimumRuntime, + RequiredIterations: ten, + }, + { + Bench: BSONFlatDocumentEncoding, + Count: tenThousand, + Size: 75310000, + Runtime: StandardRuntime, + }, + { + Bench: BSONFlatDocumentDecodingLazy, + Count: tenThousand, + Size: 75310000, + Runtime: StandardRuntime, + }, + { + Bench: BSONFlatDocumentDecoding, + Count: tenThousand, + Size: 75310000, + Runtime: StandardRuntime, + }, + { + Bench: BSONDeepDocumentEncoding, + Count: tenThousand, + Size: 19640000, + Runtime: StandardRuntime, + }, + { + Bench: BSONDeepDocumentDecodingLazy, + Count: tenThousand, + Size: 19640000, + Runtime: StandardRuntime, + }, + { + Bench: BSONDeepDocumentDecoding, + Count: tenThousand, + Size: 19640000, + Runtime: StandardRuntime, + }, + // { + // Bench: BSONFullDocumentEncoding, + // Count: tenThousand, + // Size: 57340000, + // Runtime: StandardRuntime, + // }, + // { + // Bench: BSONFullDocumentDecodingLazy, + // Count: tenThousand, + // Size: 57340000, + // Runtime: StandardRuntime, + // }, + // { + // Bench: BSONFullDocumentDecoding, + // Count: tenThousand, + // Size: 57340000, + // Runtime: StandardRuntime, + // }, + // { + // Bench: BSONFullReaderDecoding, + // Count: tenThousand, + // Size: 57340000, + // Runtime: StandardRuntime, + // }, + { + Bench: BSONFlatMapDecoding, + Count: tenThousand, + Size: 75310000, + Runtime: StandardRuntime, + }, + { + Bench: BSONFlatMapEncoding, + Count: tenThousand, + Size: 75310000, + Runtime: StandardRuntime, + }, + { + Bench: BSONDeepMapDecoding, + Count: tenThousand, + Size: 19640000, + Runtime: StandardRuntime, + }, + { + Bench: BSONDeepMapEncoding, + Count: tenThousand, + Size: 19640000, + Runtime: StandardRuntime, + }, + // { + // Bench: BSONFullMapDecoding, + // Count: tenThousand, + // Size: 57340000, + // Runtime: StandardRuntime, + // }, + // { + // Bench: BSONFullMapEncoding, + // Count: tenThousand, + // Size: 57340000, + // Runtime: StandardRuntime, + // }, + { + Bench: BSONFlatStructDecoding, + Count: tenThousand, + Size: 75310000, + Runtime: StandardRuntime, + }, + { + Bench: BSONFlatStructTagsDecoding, + Count: tenThousand, + Size: 75310000, + Runtime: StandardRuntime, + }, + { + Bench: BSONFlatStructEncoding, + Count: tenThousand, + Size: 75310000, + Runtime: StandardRuntime, + }, + { + Bench: BSONFlatStructTagsEncoding, + Count: tenThousand, + Size: 75310000, + Runtime: StandardRuntime, + }, + { + Bench: SingleRunCommand, + Count: tenThousand, + Size: 160000, + Runtime: StandardRuntime, + }, + { + Bench: SingleFindOneByID, + Count: tenThousand, + Size: 16220000, + Runtime: StandardRuntime, + }, + { + Bench: SingleInsertSmallDocument, + Count: tenThousand, + Size: 2750000, + Runtime: StandardRuntime, + }, + { + Bench: SingleInsertLargeDocument, + Count: ten, + Size: 27310890, + Runtime: StandardRuntime, + }, + { + Bench: MultiFindMany, + Count: tenThousand, + Size: 16220000, + Runtime: StandardRuntime, + }, + { + Bench: MultiInsertSmallDocument, + Count: tenThousand, + Size: 2750000, + Runtime: StandardRuntime, + }, + { + Bench: MultiInsertLargeDocument, + Count: ten, + Size: 27310890, + Runtime: StandardRuntime, + RequiredIterations: tenThousand, + }, + } +} diff --git a/vendor/go.mongodb.org/mongo-driver/benchmark/harness_case.go b/vendor/go.mongodb.org/mongo-driver/benchmark/harness_case.go new file mode 100644 index 0000000..d9cfd5f --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/benchmark/harness_case.go @@ -0,0 +1,154 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package benchmark + +import ( + "context" + "fmt" + "path/filepath" + "reflect" + "runtime" + "strings" + "time" +) + +type CaseDefinition struct { + Bench BenchCase + Count int + Size int + RequiredIterations int + Runtime time.Duration + + cumulativeRuntime time.Duration + elapsed time.Duration + startAt time.Time + isRunning bool +} + +// TimerManager is a subset of the testing.B tool, used to manage +// setup code. +type TimerManager interface { + ResetTimer() + StartTimer() + StopTimer() +} + +func (c *CaseDefinition) ResetTimer() { + c.startAt = time.Now() + c.elapsed = 0 + c.isRunning = true +} + +func (c *CaseDefinition) StartTimer() { + c.startAt = time.Now() + c.isRunning = true +} + +func (c *CaseDefinition) StopTimer() { + if !c.isRunning { + return + } + c.elapsed += time.Since(c.startAt) + c.isRunning = false +} + +func (c *CaseDefinition) roundedRuntime() time.Duration { + return roundDurationMS(c.Runtime) +} + +func (c *CaseDefinition) Run(ctx context.Context) *BenchResult { + out := &BenchResult{ + Trials: 1, + DataSize: c.Size, + Name: c.Name(), + Operations: c.Count, + } + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, 2*ExecutionTimeout) + defer cancel() + + fmt.Println("=== RUN", out.Name) + if c.RequiredIterations == 0 { + c.RequiredIterations = MinIterations + } + +benchRepeat: + for { + if ctx.Err() != nil { + break + } + if c.cumulativeRuntime >= c.Runtime { + if out.Trials >= c.RequiredIterations { + break + } else if c.cumulativeRuntime >= ExecutionTimeout { + break + } + } + + res := Result{ + Iterations: c.Count, + } + + c.StartTimer() + res.Error = c.Bench(ctx, c, c.Count) + c.StopTimer() + res.Duration = c.elapsed + c.cumulativeRuntime += res.Duration + + switch res.Error { + case context.DeadlineExceeded: + break benchRepeat + case context.Canceled: + break benchRepeat + case nil: + out.Trials++ + c.elapsed = 0 + out.Raw = append(out.Raw, res) + default: + continue + } + + } + + out.Duration = out.totalDuration() + fmt.Printf(" --- REPORT: count=%d trials=%d requiredTrials=%d runtime=%s\n", + c.Count, out.Trials, c.RequiredIterations, c.Runtime) + if out.HasErrors() { + fmt.Printf(" --- ERRORS: %s\n", strings.Join(out.errReport(), "\n ")) + fmt.Printf("--- FAIL: %s (%s)\n", out.Name, out.roundedRuntime()) + } else { + fmt.Printf("--- PASS: %s (%s)\n", out.Name, out.roundedRuntime()) + } + + return out + +} + +func (c *CaseDefinition) String() string { + return fmt.Sprintf("name=%s, count=%d, runtime=%s timeout=%s", + c.Name(), c.Count, c.Runtime, ExecutionTimeout) +} + +func (c *CaseDefinition) Name() string { return getName(c.Bench) } +func getName(i interface{}) string { + n := runtime.FuncForPC(reflect.ValueOf(i).Pointer()).Name() + parts := strings.Split(n, ".") + if len(parts) > 1 { + return parts[len(parts)-1] + } + + return n + +} + +func getProjectRoot() string { return filepath.Dir(getDirectoryOfFile()) } + +func getDirectoryOfFile() string { + _, file, _, _ := runtime.Caller(1) + + return filepath.Dir(file) +} diff --git a/vendor/go.mongodb.org/mongo-driver/benchmark/harness_main.go b/vendor/go.mongodb.org/mongo-driver/benchmark/harness_main.go new file mode 100644 index 0000000..b69bc35 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/benchmark/harness_main.go @@ -0,0 +1,69 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package benchmark + +import ( + "context" + "encoding/json" + "flag" + "fmt" + "io/ioutil" + "os" +) + +func DriverBenchmarkMain() int { + var hasErrors bool + var outputFileName string + flag.StringVar(&outputFileName, "output", "perf.json", "path to write the 'perf.json' file") + flag.Parse() + + ctx := context.Background() + output := []interface{}{} + for _, res := range runDriverCases(ctx) { + if res.HasErrors() { + hasErrors = true + } + + evg, err := res.EvergreenPerfFormat() + if err != nil { + hasErrors = true + continue + } + + output = append(output, evg...) + } + + evgOutput, err := json.MarshalIndent(map[string]interface{}{"results": output}, "", " ") + if err != nil { + return 1 + } + evgOutput = append(evgOutput, []byte("\n")...) + + if outputFileName == "" { + fmt.Println(string(evgOutput)) + } else if err := ioutil.WriteFile(outputFileName, evgOutput, 0644); err != nil { + fmt.Fprintf(os.Stderr, "problem writing file '%s': %s", outputFileName, err.Error()) + return 1 + } + + if hasErrors { + return 1 + } + + return 0 +} + +func runDriverCases(ctx context.Context) []*BenchResult { + cases := getAllCases() + + results := []*BenchResult{} + for _, bc := range cases { + results = append(results, bc.Run(ctx)) + } + + return results +} diff --git a/vendor/go.mongodb.org/mongo-driver/benchmark/harness_results.go b/vendor/go.mongodb.org/mongo-driver/benchmark/harness_results.go new file mode 100644 index 0000000..ad5e2e6 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/benchmark/harness_results.go @@ -0,0 +1,140 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package benchmark + +import ( + "fmt" + "time" + + "github.com/montanaflynn/stats" +) + +type BenchResult struct { + Name string + Trials int + Duration time.Duration + Raw []Result + DataSize int + Operations int + hasErrors *bool +} + +func (r *BenchResult) EvergreenPerfFormat() ([]interface{}, error) { + timings := r.timings() + + median, err := stats.Median(timings) + if err != nil { + return nil, err + } + + min, err := stats.Min(timings) + if err != nil { + return nil, err + } + + max, err := stats.Max(timings) + if err != nil { + return nil, err + } + + out := []interface{}{ + map[string]interface{}{ + "name": r.Name + "-throughput", + "results": map[string]interface{}{ + "1": map[string]interface{}{ + "seconds": r.Duration.Round(time.Millisecond).Seconds(), + "ops_per_second": r.getThroughput(median), + "ops_per_second_values": []float64{ + r.getThroughput(min), + r.getThroughput(max), + }, + }, + }, + }, + } + + if r.DataSize > 0 { + out = append(out, interface{}(map[string]interface{}{ + "name": r.Name + "-MB-adjusted", + "results": map[string]interface{}{ + "1": map[string]interface{}{ + "seconds": r.Duration.Round(time.Millisecond).Seconds(), + "ops_per_second": r.adjustResults(median), + "ops_per_second_values": []float64{ + r.adjustResults(min), + r.adjustResults(max), + }, + }, + }, + })) + } + + return out, nil +} + +func (r *BenchResult) timings() []float64 { + out := []float64{} + for _, r := range r.Raw { + out = append(out, r.Duration.Seconds()) + } + return out +} + +func (r *BenchResult) totalDuration() time.Duration { + var out time.Duration + for _, trial := range r.Raw { + out += trial.Duration + } + return out +} + +func (r *BenchResult) adjustResults(data float64) float64 { return float64(r.DataSize) / data } +func (r *BenchResult) getThroughput(data float64) float64 { return float64(r.Operations) / data } +func (r *BenchResult) roundedRuntime() time.Duration { return roundDurationMS(r.Duration) } + +func (r *BenchResult) String() string { + return fmt.Sprintf("name=%s, trials=%d, secs=%s", r.Name, r.Trials, r.Duration) +} + +func (r *BenchResult) HasErrors() bool { + if r.hasErrors == nil { + var val bool + for _, res := range r.Raw { + if res.Error != nil { + val = true + break + } + } + r.hasErrors = &val + } + + return *r.hasErrors +} + +func (r *BenchResult) errReport() []string { + errs := []string{} + for _, res := range r.Raw { + if res.Error != nil { + errs = append(errs, res.Error.Error()) + } + } + return errs +} + +type Result struct { + Duration time.Duration + Iterations int + Error error +} + +func roundDurationMS(d time.Duration) time.Duration { + rounded := d.Round(time.Millisecond) + if rounded == 1<<63-1 { + return 0 + } + return rounded +} diff --git a/vendor/go.mongodb.org/mongo-driver/benchmark/multi.go b/vendor/go.mongodb.org/mongo-driver/benchmark/multi.go new file mode 100644 index 0000000..1a4faaa --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/benchmark/multi.go @@ -0,0 +1,142 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package benchmark + +import ( + "context" + "errors" + + "go.mongodb.org/mongo-driver/x/bsonx" +) + +func MultiFindMany(ctx context.Context, tm TimerManager, iters int) error { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + db, err := getClientDB(ctx) + if err != nil { + return err + } + defer db.Client().Disconnect(ctx) + + db = db.Client().Database("perftest") + if err = db.Drop(ctx); err != nil { + return err + } + + doc, err := loadSourceDocument(getProjectRoot(), perfDataDir, singleAndMultiDataDir, tweetData) + if err != nil { + return err + } + + coll := db.Collection("corpus") + + payload := make([]interface{}, iters) + for idx := range payload { + payload[idx] = doc + } + + if _, err = coll.InsertMany(ctx, payload); err != nil { + return err + } + + tm.ResetTimer() + + cursor, err := coll.Find(ctx, bsonx.Doc{}) + if err != nil { + return err + } + defer cursor.Close(ctx) + + counter := 0 + for cursor.Next(ctx) { + err = cursor.Err() + if err != nil { + return err + } + if len(cursor.Current) == 0 { + return errors.New("error retrieving document") + } + + counter++ + } + + if counter != iters { + return errors.New("problem iterating cursors") + + } + + tm.StopTimer() + + if err = cursor.Close(ctx); err != nil { + return err + } + + if err = db.Drop(ctx); err != nil { + return err + } + + return nil +} + +func multiInsertCase(ctx context.Context, tm TimerManager, iters int, data string) error { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + db, err := getClientDB(ctx) + if err != nil { + return err + } + defer db.Client().Disconnect(ctx) + + db = db.Client().Database("perftest") + if err = db.Drop(ctx); err != nil { + return err + } + + doc, err := loadSourceDocument(getProjectRoot(), perfDataDir, singleAndMultiDataDir, data) + if err != nil { + return err + } + + err = db.RunCommand(ctx, bsonx.Doc{{"create", bsonx.String("corpus")}}).Err() + if err != nil { + return err + } + + payload := make([]interface{}, iters) + for idx := range payload { + payload[idx] = doc + } + + coll := db.Collection("corpus") + + tm.ResetTimer() + res, err := coll.InsertMany(ctx, payload) + if err != nil { + return err + } + tm.StopTimer() + + if len(res.InsertedIDs) != iters { + return errors.New("bulk operation did not complete") + } + + if err = db.Drop(ctx); err != nil { + return err + } + + return nil +} + +func MultiInsertSmallDocument(ctx context.Context, tm TimerManager, iters int) error { + return multiInsertCase(ctx, tm, iters, smallData) +} + +func MultiInsertLargeDocument(ctx context.Context, tm TimerManager, iters int) error { + return multiInsertCase(ctx, tm, iters, largeData) +} diff --git a/vendor/go.mongodb.org/mongo-driver/benchmark/multi_test.go b/vendor/go.mongodb.org/mongo-driver/benchmark/multi_test.go new file mode 100644 index 0000000..62131ff --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/benchmark/multi_test.go @@ -0,0 +1,13 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package benchmark + +import "testing" + +func BenchmarkMultiFindMany(b *testing.B) { WrapCase(MultiFindMany)(b) } +func BenchmarkMultiInsertSmallDocument(b *testing.B) { WrapCase(MultiInsertSmallDocument)(b) } +func BenchmarkMultiInsertLargeDocument(b *testing.B) { WrapCase(MultiInsertLargeDocument)(b) } diff --git a/vendor/go.mongodb.org/mongo-driver/benchmark/single.go b/vendor/go.mongodb.org/mongo-driver/benchmark/single.go new file mode 100644 index 0000000..86e8e79 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/benchmark/single.go @@ -0,0 +1,174 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package benchmark + +import ( + "context" + "errors" + + "go.mongodb.org/mongo-driver/internal/testutil" + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" + "go.mongodb.org/mongo-driver/x/bsonx" +) + +const ( + singleAndMultiDataDir = "single_and_multi_document" + tweetData = "tweet.json" + smallData = "small_doc.json" + largeData = "large_doc.json" +) + +func getClientDB(ctx context.Context) (*mongo.Database, error) { + cs, err := testutil.GetConnString() + if err != nil { + return nil, err + } + client, err := mongo.NewClient(options.Client().ApplyURI(cs.String())) + if err != nil { + return nil, err + } + if err = client.Connect(ctx); err != nil { + return nil, err + } + + db := client.Database(testutil.GetDBName(cs)) + return db, nil +} + +func SingleRunCommand(ctx context.Context, tm TimerManager, iters int) error { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + db, err := getClientDB(ctx) + if err != nil { + return err + } + defer db.Client().Disconnect(ctx) + + cmd := bsonx.Doc{{"ismaster", bsonx.Boolean(true)}} + + tm.ResetTimer() + for i := 0; i < iters; i++ { + var doc bsonx.Doc + err := db.RunCommand(ctx, cmd).Decode(&doc) + if err != nil { + return err + } + // read the document and then throw it away to prevent + out, err := doc.MarshalBSON() + if len(out) == 0 { + return errors.New("output of ismaster is empty") + } + } + tm.StopTimer() + + return nil +} + +func SingleFindOneByID(ctx context.Context, tm TimerManager, iters int) error { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + db, err := getClientDB(ctx) + if err != nil { + return err + } + + db = db.Client().Database("perftest") + if err = db.Drop(ctx); err != nil { + return err + } + + doc, err := loadSourceDocument(getProjectRoot(), perfDataDir, singleAndMultiDataDir, tweetData) + if err != nil { + return err + } + coll := db.Collection("corpus") + for i := 0; i < iters; i++ { + id := int32(i) + res, err := coll.InsertOne(ctx, doc.Set("_id", bsonx.Int32(id))) + if err != nil { + return err + } + if res.InsertedID == nil { + return errors.New("insert failed") + } + } + + tm.ResetTimer() + + for i := 0; i < iters; i++ { + res := coll.FindOne(ctx, bsonx.Doc{{"_id", bsonx.Int32(int32(i))}}) + if res == nil { + return errors.New("find one query produced nil result") + } + } + + tm.StopTimer() + + if err = db.Drop(ctx); err != nil { + return err + } + + return nil +} + +func singleInsertCase(ctx context.Context, tm TimerManager, iters int, data string) error { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + db, err := getClientDB(ctx) + if err != nil { + return err + } + defer db.Client().Disconnect(ctx) + + db = db.Client().Database("perftest") + if err = db.Drop(ctx); err != nil { + return err + } + + doc, err := loadSourceDocument(getProjectRoot(), perfDataDir, singleAndMultiDataDir, data) + if err != nil { + return err + } + + err = db.RunCommand(ctx, bsonx.Doc{{"create", bsonx.String("corpus")}}).Err() + if err != nil { + return err + } + + coll := db.Collection("corpus") + + tm.ResetTimer() + + for i := 0; i < iters; i++ { + if _, err = coll.InsertOne(ctx, doc); err != nil { + return err + } + + // TODO: should be remove after resolving GODRIVER-468 + _ = doc.Delete("_id") + } + + tm.StopTimer() + + if err = db.Drop(ctx); err != nil { + return err + } + + return nil +} + +func SingleInsertSmallDocument(ctx context.Context, tm TimerManager, iters int) error { + return singleInsertCase(ctx, tm, iters, smallData) +} + +func SingleInsertLargeDocument(ctx context.Context, tm TimerManager, iters int) error { + return singleInsertCase(ctx, tm, iters, largeData) +} diff --git a/vendor/go.mongodb.org/mongo-driver/benchmark/single_test.go b/vendor/go.mongodb.org/mongo-driver/benchmark/single_test.go new file mode 100644 index 0000000..c4cc14f --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/benchmark/single_test.go @@ -0,0 +1,14 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package benchmark + +import "testing" + +func BenchmarkSingleRunCommand(b *testing.B) { WrapCase(SingleRunCommand)(b) } +func BenchmarkSingleFindOneByID(b *testing.B) { WrapCase(SingleFindOneByID)(b) } +func BenchmarkSingleInsertSmallDocument(b *testing.B) { WrapCase(SingleInsertSmallDocument)(b) } +func BenchmarkSingleInsertLargeDocument(b *testing.B) { WrapCase(SingleInsertLargeDocument)(b) } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/benchmark_test.go b/vendor/go.mongodb.org/mongo-driver/bson/benchmark_test.go new file mode 100644 index 0000000..8528223 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/benchmark_test.go @@ -0,0 +1,134 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bson + +import ( + "testing" +) + +type encodetest struct { + Field1String string + Field1Int64 int64 + Field1Float64 float64 + Field2String string + Field2Int64 int64 + Field2Float64 float64 + Field3String string + Field3Int64 int64 + Field3Float64 float64 + Field4String string + Field4Int64 int64 + Field4Float64 float64 +} + +type nestedtest1 struct { + Nested nestedtest2 +} + +type nestedtest2 struct { + Nested nestedtest3 +} + +type nestedtest3 struct { + Nested nestedtest4 +} + +type nestedtest4 struct { + Nested nestedtest5 +} + +type nestedtest5 struct { + Nested nestedtest6 +} + +type nestedtest6 struct { + Nested nestedtest7 +} + +type nestedtest7 struct { + Nested nestedtest8 +} + +type nestedtest8 struct { + Nested nestedtest9 +} + +type nestedtest9 struct { + Nested nestedtest10 +} + +type nestedtest10 struct { + Nested nestedtest11 +} + +type nestedtest11 struct { + Nested encodetest +} + +var encodetestInstance = encodetest{ + Field1String: "foo", + Field1Int64: 1, + Field1Float64: 3.0, + Field2String: "bar", + Field2Int64: 2, + Field2Float64: 3.1, + Field3String: "baz", + Field3Int64: 3, + Field3Float64: 3.14, + Field4String: "qux", + Field4Int64: 4, + Field4Float64: 3.141, +} + +var nestedInstance = nestedtest1{ + nestedtest2{ + nestedtest3{ + nestedtest4{ + nestedtest5{ + nestedtest6{ + nestedtest7{ + nestedtest8{ + nestedtest9{ + nestedtest10{ + nestedtest11{ + encodetest{ + Field1String: "foo", + Field1Int64: 1, + Field1Float64: 3.0, + Field2String: "bar", + Field2Int64: 2, + Field2Float64: 3.1, + Field3String: "baz", + Field3Int64: 3, + Field3Float64: 3.14, + Field4String: "qux", + Field4Int64: 4, + Field4Float64: 3.141, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, +} + +func BenchmarkEncoding(b *testing.B) { + for i := 0; i < b.N; i++ { + _, _ = Marshal(encodetestInstance) + } +} + +func BenchmarkEncodingNested(b *testing.B) { + for i := 0; i < b.N; i++ { + _, _ = Marshal(nestedInstance) + } +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bson.go b/vendor/go.mongodb.org/mongo-driver/bson/bson.go new file mode 100644 index 0000000..37bf981 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bson.go @@ -0,0 +1,60 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 +// +// Based on gopkg.in/mgo.v2/bson by Gustavo Niemeyer +// See THIRD-PARTY-NOTICES for original license terms. + +// +build go1.9 + +package bson // import "go.mongodb.org/mongo-driver/bson" + +import ( + "go.mongodb.org/mongo-driver/bson/primitive" +) + +// Zeroer allows custom struct types to implement a report of zero +// state. All struct types that don't implement Zeroer or where IsZero +// returns false are considered to be not zero. +type Zeroer interface { + IsZero() bool +} + +// D represents a BSON Document. This type can be used to represent BSON in a concise and readable +// manner. It should generally be used when serializing to BSON. For deserializing, the Raw or +// Document types should be used. +// +// Example usage: +// +// bson.D{{"foo", "bar"}, {"hello", "world"}, {"pi", 3.14159}} +// +// This type should be used in situations where order matters, such as MongoDB commands. If the +// order is not important, a map is more comfortable and concise. +type D = primitive.D + +// E represents a BSON element for a D. It is usually used inside a D. +type E = primitive.E + +// M is an unordered, concise representation of a BSON Document. It should generally be used to +// serialize BSON when the order of the elements of a BSON document do not matter. If the element +// order matters, use a D instead. +// +// Example usage: +// +// bson.M{"foo": "bar", "hello": "world", "pi": 3.14159} +// +// This type is handled in the encoders as a regular map[string]interface{}. The elements will be +// serialized in an undefined, random order, and the order will be different each time. +type M = primitive.M + +// An A represents a BSON array. This type can be used to represent a BSON array in a concise and +// readable manner. It should generally be used when serializing to BSON. For deserializing, the +// RawArray or Array types should be used. +// +// Example usage: +// +// bson.A{"bar", "world", 3.14159, bson.D{{"qux", 12345}}} +// +type A = primitive.A diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bson_1_8.go b/vendor/go.mongodb.org/mongo-driver/bson/bson_1_8.go new file mode 100644 index 0000000..caf5f50 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bson_1_8.go @@ -0,0 +1,91 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +// +build !go1.9 + +package bson // import "go.mongodb.org/mongo-driver/bson" + +import ( + "math" + "strconv" + "strings" +) + +// Zeroer allows custom struct types to implement a report of zero +// state. All struct types that don't implement Zeroer or where IsZero +// returns false are considered to be not zero. +type Zeroer interface { + IsZero() bool +} + +// D represents a BSON Document. This type can be used to represent BSON in a concise and readable +// manner. It should generally be used when serializing to BSON. For deserializing, the Raw or +// Document types should be used. +// +// Example usage: +// +// primitive.D{{"foo", "bar"}, {"hello", "world"}, {"pi", 3.14159}} +// +// This type should be used in situations where order matters, such as MongoDB commands. If the +// order is not important, a map is more comfortable and concise. +type D []E + +// Map creates a map from the elements of the D. +func (d D) Map() M { + m := make(M, len(d)) + for _, e := range d { + m[e.Key] = e.Value + } + return m +} + +// E represents a BSON element for a D. It is usually used inside a D. +type E struct { + Key string + Value interface{} +} + +// M is an unordered, concise representation of a BSON Document. It should generally be used to +// serialize BSON when the order of the elements of a BSON document do not matter. If the element +// order matters, use a D instead. +// +// Example usage: +// +// primitive.M{"foo": "bar", "hello": "world", "pi": 3.14159} +// +// This type is handled in the encoders as a regular map[string]interface{}. The elements will be +// serialized in an undefined, random order, and the order will be different each time. +type M map[string]interface{} + +// An A represents a BSON array. This type can be used to represent a BSON array in a concise and +// readable manner. It should generally be used when serializing to BSON. For deserializing, the +// RawArray or Array types should be used. +// +// Example usage: +// +// primitive.A{"bar", "world", 3.14159, primitive.D{{"qux", 12345}}} +// +type A []interface{} + +func formatDouble(f float64) string { + var s string + if math.IsInf(f, 1) { + s = "Infinity" + } else if math.IsInf(f, -1) { + s = "-Infinity" + } else if math.IsNaN(f) { + s = "NaN" + } else { + // Print exactly one decimalType place for integers; otherwise, print as many are necessary to + // perfectly represent it. + s = strconv.FormatFloat(f, 'G', -1, 64) + if !strings.ContainsRune(s, '.') { + s += ".0" + } + } + + return s +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bson_corpus_spec_test.go b/vendor/go.mongodb.org/mongo-driver/bson/bson_corpus_spec_test.go new file mode 100644 index 0000000..1bc3f16 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bson_corpus_spec_test.go @@ -0,0 +1,371 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bson + +import ( + "encoding/hex" + "encoding/json" + "fmt" + "io/ioutil" + "math" + "path" + "strconv" + "strings" + "testing" + "unicode" + "unicode/utf8" + + "github.com/google/go-cmp/cmp" + "github.com/stretchr/testify/require" + "github.com/tidwall/pretty" + "go.mongodb.org/mongo-driver/bson/bsoncodec" + "go.mongodb.org/mongo-driver/bson/bsonrw" +) + +type testCase struct { + Description string `json:"description"` + BsonType string `json:"bson_type"` + TestKey *string `json:"test_key"` + Valid []validityTestCase `json:"valid"` + DecodeErrors []decodeErrorTestCase `json:"decodeErrors"` + ParseErrors []parseErrorTestCase `json:"parseErrors"` + Deprecated *bool `json:"deprecated"` +} + +type validityTestCase struct { + Description string `json:"description"` + CanonicalBson string `json:"canonical_bson"` + CanonicalExtJSON string `json:"canonical_extjson"` + RelaxedExtJSON *string `json:"relaxed_extjson"` + DegenerateBSON *string `json:"degenerate_bson"` + DegenerateExtJSON *string `json:"degenerate_extjson"` + ConvertedBSON *string `json:"converted_bson"` + ConvertedExtJSON *string `json:"converted_extjson"` + Lossy *bool `json:"lossy"` +} + +type decodeErrorTestCase struct { + Description string `json:"description"` + Bson string `json:"bson"` +} + +type parseErrorTestCase struct { + Description string `json:"description"` + String string `json:"string"` +} + +const dataDir = "../data" + +var dvd bsoncodec.DefaultValueDecoders +var dve bsoncodec.DefaultValueEncoders + +var dc = bsoncodec.DecodeContext{Registry: NewRegistryBuilder().Build()} +var ec = bsoncodec.EncodeContext{Registry: NewRegistryBuilder().Build()} + +func findJSONFilesInDir(t *testing.T, dir string) []string { + files := make([]string, 0) + + entries, err := ioutil.ReadDir(dir) + require.NoError(t, err) + + for _, entry := range entries { + if entry.IsDir() || path.Ext(entry.Name()) != ".json" { + continue + } + + files = append(files, entry.Name()) + } + + return files +} + +func needsEscapedUnicode(bsonType string) bool { + return bsonType == "0x02" || bsonType == "0x0D" || bsonType == "0x0E" || bsonType == "0x0F" +} + +func unescapeUnicode(s, bsonType string) string { + if !needsEscapedUnicode(bsonType) { + return s + } + + newS := "" + + for i := 0; i < len(s); i++ { + c := s[i] + switch c { + case '\\': + switch s[i+1] { + case 'u': + us := s[i : i+6] + u, err := strconv.Unquote(strings.Replace(strconv.Quote(us), `\\u`, `\u`, 1)) + if err != nil { + return "" + } + for _, r := range u { + if r < ' ' { + newS += fmt.Sprintf(`\u%04x`, r) + } else { + newS += string(r) + } + } + i += 5 + default: + newS += string(c) + } + default: + if c > unicode.MaxASCII { + r, size := utf8.DecodeRune([]byte(s[i:])) + newS += string(r) + i += size - 1 + } else { + newS += string(c) + } + } + } + + return newS +} + +func formatDouble(f float64) string { + var s string + if math.IsInf(f, 1) { + s = "Infinity" + } else if math.IsInf(f, -1) { + s = "-Infinity" + } else if math.IsNaN(f) { + s = "NaN" + } else { + // Print exactly one decimalType place for integers; otherwise, print as many are necessary to + // perfectly represent it. + s = strconv.FormatFloat(f, 'G', -1, 64) + if !strings.ContainsRune(s, 'E') && !strings.ContainsRune(s, '.') { + s += ".0" + } + } + + return s +} + +func normalizeCanonicalDouble(t *testing.T, key string, cEJ string) string { + // Unmarshal string into map + cEJMap := make(map[string]map[string]string) + err := json.Unmarshal([]byte(cEJ), &cEJMap) + require.NoError(t, err) + + // Parse the float contained by the map. + expectedString := cEJMap[key]["$numberDouble"] + expectedFloat, err := strconv.ParseFloat(expectedString, 64) + + // Normalize the string + return fmt.Sprintf(`{"%s":{"$numberDouble":"%s"}}`, key, formatDouble(expectedFloat)) +} + +func normalizeRelaxedDouble(t *testing.T, key string, rEJ string) string { + // Unmarshal string into map + rEJMap := make(map[string]float64) + err := json.Unmarshal([]byte(rEJ), &rEJMap) + if err != nil { + return normalizeCanonicalDouble(t, key, rEJ) + } + + // Parse the float contained by the map. + expectedFloat := rEJMap[key] + + // Normalize the string + return fmt.Sprintf(`{"%s":%s}`, key, formatDouble(expectedFloat)) +} + +// bsonToNative decodes the BSON bytes (b) into a native Document +func bsonToNative(t *testing.T, b []byte, bType, testDesc string) D { + var doc D + err := Unmarshal(b, &doc) + expectNoError(t, err, fmt.Sprintf("%s: decoding %s BSON", testDesc, bType)) + return doc +} + +// nativeToBSON encodes the native Document (doc) into canonical BSON and compares it to the expected +// canonical BSON (cB) +func nativeToBSON(t *testing.T, cB []byte, doc D, testDesc, bType, docSrcDesc string) { + actual, err := Marshal(doc) + expectNoError(t, err, fmt.Sprintf("%s: encoding %s BSON", testDesc, bType)) + + if diff := cmp.Diff(cB, actual); diff != "" { + t.Errorf("%s: 'native_to_bson(%s) = cB' failed (-want, +got):\n-%v\n+%v\n", + testDesc, docSrcDesc, cB, actual) + t.FailNow() + } +} + +// jsonToNative decodes the extended JSON string (ej) into a native Document +func jsonToNative(t *testing.T, ej, ejType, testDesc string) D { + var doc D + err := UnmarshalExtJSON([]byte(ej), ejType != "relaxed", &doc) + expectNoError(t, err, fmt.Sprintf("%s: decoding %s extended JSON", testDesc, ejType)) + return doc +} + +// nativeToJSON encodes the native Document (doc) into an extended JSON string +func nativeToJSON(t *testing.T, ej string, doc D, testDesc, ejType, ejShortName, docSrcDesc string) { + actualEJ, err := MarshalExtJSON(doc, ejType != "relaxed", true) + expectNoError(t, err, fmt.Sprintf("%s: encoding %s extended JSON", testDesc, ejType)) + + if diff := cmp.Diff(ej, string(actualEJ)); diff != "" { + t.Errorf("%s: 'native_to_%s_extended_json(%s) = %s' failed (-want, +got):\n%s\n", + testDesc, ejType, docSrcDesc, ejShortName, diff) + t.FailNow() + } +} + +func runTest(t *testing.T, file string) { + filepath := path.Join(dataDir, file) + content, err := ioutil.ReadFile(filepath) + require.NoError(t, err) + + // Remove ".json" from filename. + file = file[:len(file)-5] + testName := "bson_corpus--" + file + + t.Run(testName, func(t *testing.T) { + var test testCase + require.NoError(t, json.Unmarshal(content, &test)) + + for _, v := range test.Valid { + // get canonical BSON + cB, err := hex.DecodeString(v.CanonicalBson) + expectNoError(t, err, fmt.Sprintf("%s: reading canonical BSON", v.Description)) + + // get canonical extended JSON + cEJ := unescapeUnicode(string(pretty.Ugly([]byte(v.CanonicalExtJSON))), test.BsonType) + if test.BsonType == "0x01" { + cEJ = normalizeCanonicalDouble(t, *test.TestKey, cEJ) + } + + /*** canonical BSON round-trip tests ***/ + doc := bsonToNative(t, cB, "canonical", v.Description) + + // native_to_bson(bson_to_native(cB)) = cB + nativeToBSON(t, cB, doc, v.Description, "canonical", "bson_to_native(cB)") + + // native_to_canonical_extended_json(bson_to_native(cB)) = cEJ + nativeToJSON(t, cEJ, doc, v.Description, "canonical", "cEJ", "bson_to_native(cB)") + + // native_to_relaxed_extended_json(bson_to_native(cB)) = rEJ (if rEJ exists) + if v.RelaxedExtJSON != nil { + rEJ := unescapeUnicode(string(pretty.Ugly([]byte(*v.RelaxedExtJSON))), test.BsonType) + if test.BsonType == "0x01" { + rEJ = normalizeRelaxedDouble(t, *test.TestKey, rEJ) + } + + nativeToJSON(t, rEJ, doc, v.Description, "relaxed", "rEJ", "bson_to_native(cB)") + + /*** relaxed extended JSON round-trip tests (if exists) ***/ + doc = jsonToNative(t, rEJ, "relaxed", v.Description) + + // native_to_relaxed_extended_json(json_to_native(rEJ)) = rEJ + nativeToJSON(t, rEJ, doc, v.Description, "relaxed", "eJR", "json_to_native(rEJ)") + } + + /*** canonical extended JSON round-trip tests ***/ + doc = jsonToNative(t, cEJ, "canonical", v.Description) + + // native_to_canonical_extended_json(json_to_native(cEJ)) = cEJ + nativeToJSON(t, cEJ, doc, v.Description, "canonical", "cEJ", "json_to_native(cEJ)") + + // native_to_bson(json_to_native(cEJ)) = cb (unless lossy) + if v.Lossy == nil || !*v.Lossy { + nativeToBSON(t, cB, doc, v.Description, "canonical", "json_to_native(cEJ)") + } + + /*** degenerate BSON round-trip tests (if exists) ***/ + if v.DegenerateBSON != nil { + dB, err := hex.DecodeString(*v.DegenerateBSON) + expectNoError(t, err, fmt.Sprintf("%s: reading degenerate BSON", v.Description)) + + doc = bsonToNative(t, dB, "degenerate", v.Description) + + // native_to_bson(bson_to_native(dB)) = cB + nativeToBSON(t, cB, doc, v.Description, "degenerate", "bson_to_native(dB)") + } + + /*** degenerate JSON round-trip tests (if exists) ***/ + if v.DegenerateExtJSON != nil { + dEJ := unescapeUnicode(string(pretty.Ugly([]byte(*v.DegenerateExtJSON))), test.BsonType) + if test.BsonType == "0x01" { + dEJ = normalizeCanonicalDouble(t, *test.TestKey, dEJ) + } + + doc = jsonToNative(t, dEJ, "degenerate canonical", v.Description) + + // native_to_canonical_extended_json(json_to_native(dEJ)) = cEJ + nativeToJSON(t, cEJ, doc, v.Description, "degenerate canonical", "cEJ", "json_to_native(dEJ)") + + // native_to_bson(json_to_native(dEJ)) = cB (unless lossy) + if v.Lossy == nil || !*v.Lossy { + nativeToBSON(t, cB, doc, v.Description, "canonical", "json_to_native(dEJ)") + } + } + } + + for _, d := range test.DecodeErrors { + b, err := hex.DecodeString(d.Bson) + expectNoError(t, err, d.Description) + + var doc D + err = Unmarshal(b, &doc) + expectError(t, err, fmt.Sprintf("%s: expected decode error", d.Description)) + } + + for _, p := range test.ParseErrors { + // skip DBRef tests + if strings.Contains(p.Description, "Bad DBRef") { + continue + } + + s := unescapeUnicode(p.String, test.BsonType) + if test.BsonType == "0x13" { + s = fmt.Sprintf(`{"$numberDecimal": "%s"}`, s) + } + + switch test.BsonType { + case "0x00": + var doc D + err := UnmarshalExtJSON([]byte(s), true, &doc) + expectError(t, err, fmt.Sprintf("%s: expected parse error", p.Description)) + case "0x13": + ejvr, err := bsonrw.NewExtJSONValueReader(strings.NewReader(s), true) + expectNoError(t, err, fmt.Sprintf("error creating value reader: %s", err)) + _, err = ejvr.ReadDecimal128() + expectError(t, err, fmt.Sprintf("%s: expected parse error", p.Description)) + default: + t.Errorf("Update test to check for parse errors for type %s", test.BsonType) + t.Fail() + } + } + }) +} + +func Test_BsonCorpus(t *testing.T) { + for _, file := range findJSONFilesInDir(t, dataDir) { + runTest(t, file) + } +} + +func expectNoError(t *testing.T, err error, desc string) { + if err != nil { + t.Helper() + t.Errorf("%s: Unepexted error: %v", desc, err) + t.FailNow() + } +} + +func expectError(t *testing.T, err error, desc string) { + if err == nil { + t.Helper() + t.Errorf("%s: Expected error", desc) + t.FailNow() + } +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bson_test.go b/vendor/go.mongodb.org/mongo-driver/bson/bson_test.go new file mode 100644 index 0000000..a2f5726 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bson_test.go @@ -0,0 +1,113 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bson + +import ( + "bytes" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/stretchr/testify/require" + "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" +) + +func noerr(t *testing.T, err error) { + if err != nil { + t.Helper() + t.Errorf("Unexpected error: (%T)%v", err, err) + t.FailNow() + } +} + +func requireErrEqual(t *testing.T, err1 error, err2 error) { require.True(t, compareErrors(err1, err2)) } + +func TestTimeRoundTrip(t *testing.T) { + val := struct { + Value time.Time + ID string + }{ + ID: "time-rt-test", + } + + if !val.Value.IsZero() { + t.Errorf("Did not get zero time as expected.") + } + + bsonOut, err := Marshal(val) + noerr(t, err) + rtval := struct { + Value time.Time + ID string + }{} + + err = Unmarshal(bsonOut, &rtval) + noerr(t, err) + if !cmp.Equal(val, rtval) { + t.Errorf("Did not round trip properly. got %v; want %v", val, rtval) + } + if !rtval.Value.IsZero() { + t.Errorf("Did not get zero time as expected.") + } +} + +func TestNonNullTimeRoundTrip(t *testing.T) { + now := time.Now() + now = time.Unix(now.Unix(), 0) + val := struct { + Value time.Time + ID string + }{ + ID: "time-rt-test", + Value: now, + } + + bsonOut, err := Marshal(val) + noerr(t, err) + rtval := struct { + Value time.Time + ID string + }{} + + err = Unmarshal(bsonOut, &rtval) + noerr(t, err) + if !cmp.Equal(val, rtval) { + t.Errorf("Did not round trip properly. got %v; want %v", val, rtval) + } +} + +func TestD(t *testing.T) { + t.Run("can marshal", func(t *testing.T) { + d := D{{"foo", "bar"}, {"hello", "world"}, {"pi", 3.14159}} + idx, want := bsoncore.AppendDocumentStart(nil) + want = bsoncore.AppendStringElement(want, "foo", "bar") + want = bsoncore.AppendStringElement(want, "hello", "world") + want = bsoncore.AppendDoubleElement(want, "pi", 3.14159) + want, err := bsoncore.AppendDocumentEnd(want, idx) + noerr(t, err) + got, err := Marshal(d) + noerr(t, err) + if !bytes.Equal(got, want) { + t.Errorf("Marshaled documents do not match. got %v; want %v", Raw(got), Raw(want)) + } + }) + t.Run("can unmarshal", func(t *testing.T) { + want := D{{"foo", "bar"}, {"hello", "world"}, {"pi", 3.14159}} + idx, doc := bsoncore.AppendDocumentStart(nil) + doc = bsoncore.AppendStringElement(doc, "foo", "bar") + doc = bsoncore.AppendStringElement(doc, "hello", "world") + doc = bsoncore.AppendDoubleElement(doc, "pi", 3.14159) + doc, err := bsoncore.AppendDocumentEnd(doc, idx) + noerr(t, err) + var got D + err = Unmarshal(doc, &got) + noerr(t, err) + if !cmp.Equal(got, want) { + t.Errorf("Unmarshaled documents do not match. got %v; want %v", got, want) + } + }) +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go new file mode 100644 index 0000000..0ebc9a1 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go @@ -0,0 +1,163 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec // import "go.mongodb.org/mongo-driver/bson/bsoncodec" + +import ( + "fmt" + "reflect" + "strings" + + "go.mongodb.org/mongo-driver/bson/bsonrw" + "go.mongodb.org/mongo-driver/bson/bsontype" +) + +// Marshaler is an interface implemented by types that can marshal themselves +// into a BSON document represented as bytes. The bytes returned must be a valid +// BSON document if the error is nil. +type Marshaler interface { + MarshalBSON() ([]byte, error) +} + +// ValueMarshaler is an interface implemented by types that can marshal +// themselves into a BSON value as bytes. The type must be the valid type for +// the bytes returned. The bytes and byte type together must be valid if the +// error is nil. +type ValueMarshaler interface { + MarshalBSONValue() (bsontype.Type, []byte, error) +} + +// Unmarshaler is an interface implemented by types that can unmarshal a BSON +// document representation of themselves. The BSON bytes can be assumed to be +// valid. UnmarshalBSON must copy the BSON bytes if it wishes to retain the data +// after returning. +type Unmarshaler interface { + UnmarshalBSON([]byte) error +} + +// ValueUnmarshaler is an interface implemented by types that can unmarshal a +// BSON value representaiton of themselves. The BSON bytes and type can be +// assumed to be valid. UnmarshalBSONValue must copy the BSON value bytes if it +// wishes to retain the data after returning. +type ValueUnmarshaler interface { + UnmarshalBSONValue(bsontype.Type, []byte) error +} + +// ValueEncoderError is an error returned from a ValueEncoder when the provided value can't be +// encoded by the ValueEncoder. +type ValueEncoderError struct { + Name string + Types []reflect.Type + Kinds []reflect.Kind + Received reflect.Value +} + +func (vee ValueEncoderError) Error() string { + typeKinds := make([]string, 0, len(vee.Types)+len(vee.Kinds)) + for _, t := range vee.Types { + typeKinds = append(typeKinds, t.String()) + } + for _, k := range vee.Kinds { + if k == reflect.Map { + typeKinds = append(typeKinds, "map[string]*") + continue + } + typeKinds = append(typeKinds, k.String()) + } + received := vee.Received.Kind().String() + if vee.Received.IsValid() { + received = vee.Received.Type().String() + } + return fmt.Sprintf("%s can only encode valid %s, but got %s", vee.Name, strings.Join(typeKinds, ", "), received) +} + +// ValueDecoderError is an error returned from a ValueDecoder when the provided value can't be +// decoded by the ValueDecoder. +type ValueDecoderError struct { + Name string + Types []reflect.Type + Kinds []reflect.Kind + Received reflect.Value +} + +func (vde ValueDecoderError) Error() string { + typeKinds := make([]string, 0, len(vde.Types)+len(vde.Kinds)) + for _, t := range vde.Types { + typeKinds = append(typeKinds, t.String()) + } + for _, k := range vde.Kinds { + if k == reflect.Map { + typeKinds = append(typeKinds, "map[string]*") + continue + } + typeKinds = append(typeKinds, k.String()) + } + received := vde.Received.Kind().String() + if vde.Received.IsValid() { + received = vde.Received.Type().String() + } + return fmt.Sprintf("%s can only decode valid and settable %s, but got %s", vde.Name, strings.Join(typeKinds, ", "), received) +} + +// EncodeContext is the contextual information required for a Codec to encode a +// value. +type EncodeContext struct { + *Registry + MinSize bool +} + +// DecodeContext is the contextual information required for a Codec to decode a +// value. +type DecodeContext struct { + *Registry + Truncate bool + // Ancestor is the type of a containing document. This is mainly used to determine what type + // should be used when decoding an embedded document into an empty interface. For example, if + // Ancestor is a bson.M, BSON embedded document values being decoded into an empty interface + // will be decoded into a bson.M. + Ancestor reflect.Type +} + +// ValueCodec is the interface that groups the methods to encode and decode +// values. +type ValueCodec interface { + ValueEncoder + ValueDecoder +} + +// ValueEncoder is the interface implemented by types that can handle the encoding of a value. +type ValueEncoder interface { + EncodeValue(EncodeContext, bsonrw.ValueWriter, reflect.Value) error +} + +// ValueEncoderFunc is an adapter function that allows a function with the correct signature to be +// used as a ValueEncoder. +type ValueEncoderFunc func(EncodeContext, bsonrw.ValueWriter, reflect.Value) error + +// EncodeValue implements the ValueEncoder interface. +func (fn ValueEncoderFunc) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + return fn(ec, vw, val) +} + +// ValueDecoder is the interface implemented by types that can handle the decoding of a value. +type ValueDecoder interface { + DecodeValue(DecodeContext, bsonrw.ValueReader, reflect.Value) error +} + +// ValueDecoderFunc is an adapter function that allows a function with the correct signature to be +// used as a ValueDecoder. +type ValueDecoderFunc func(DecodeContext, bsonrw.ValueReader, reflect.Value) error + +// DecodeValue implements the ValueDecoder interface. +func (fn ValueDecoderFunc) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + return fn(dc, vr, val) +} + +// CodecZeroer is the interface implemented by Codecs that can also determine if +// a value of the type that would be encoded is zero. +type CodecZeroer interface { + IsTypeZero(interface{}) bool +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec_test.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec_test.go new file mode 100644 index 0000000..2890884 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec_test.go @@ -0,0 +1,145 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "fmt" + "reflect" + "testing" + "time" + + "go.mongodb.org/mongo-driver/bson/bsonrw" + "go.mongodb.org/mongo-driver/bson/bsontype" + "go.mongodb.org/mongo-driver/bson/primitive" +) + +func ExampleValueEncoder() { + var _ ValueEncoderFunc = func(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if val.Kind() != reflect.String { + return ValueEncoderError{Name: "StringEncodeValue", Kinds: []reflect.Kind{reflect.String}, Received: val} + } + + return vw.WriteString(val.String()) + } +} + +func ExampleValueDecoder() { + var _ ValueDecoderFunc = func(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Kind() != reflect.String { + return ValueDecoderError{Name: "StringDecodeValue", Kinds: []reflect.Kind{reflect.String}, Received: val} + } + + if vr.Type() != bsontype.String { + return fmt.Errorf("cannot decode %v into a string type", vr.Type()) + } + + str, err := vr.ReadString() + if err != nil { + return err + } + val.SetString(str) + return nil + } +} + +func noerr(t *testing.T, err error) { + if err != nil { + t.Helper() + t.Errorf("Unexpected error: (%T)%v", err, err) + t.FailNow() + } +} + +func compareTime(t1, t2 time.Time) bool { + if t1.Location() != t2.Location() { + return false + } + return t1.Equal(t2) +} + +func compareErrors(err1, err2 error) bool { + if err1 == nil && err2 == nil { + return true + } + + if err1 == nil || err2 == nil { + return false + } + + if err1.Error() != err2.Error() { + return false + } + + return true +} + +func compareDecimal128(d1, d2 primitive.Decimal128) bool { + d1H, d1L := d1.GetBytes() + d2H, d2L := d2.GetBytes() + + if d1H != d2H { + return false + } + + if d1L != d2L { + return false + } + + return true +} + +func compareStrings(s1, s2 string) bool { return s1 == s2 } + +type noPrivateFields struct { + a string +} + +func compareNoPrivateFields(npf1, npf2 noPrivateFields) bool { + return npf1.a != npf2.a // We don't want these to be equal +} + +type zeroTest struct { + reportZero bool +} + +func (z zeroTest) IsZero() bool { return z.reportZero } + +func compareZeroTest(_, _ zeroTest) bool { return true } + +type nonZeroer struct { + value bool +} + +type llCodec struct { + t *testing.T + decodeval interface{} + encodeval interface{} + err error +} + +func (llc *llCodec) EncodeValue(_ EncodeContext, _ bsonrw.ValueWriter, i interface{}) error { + if llc.err != nil { + return llc.err + } + + llc.encodeval = i + return nil +} + +func (llc *llCodec) DecodeValue(_ DecodeContext, _ bsonrw.ValueReader, val reflect.Value) error { + if llc.err != nil { + return llc.err + } + + if !reflect.TypeOf(llc.decodeval).AssignableTo(val.Type()) { + llc.t.Errorf("decodeval must be assignable to val provided to DecodeValue, but is not. decodeval %T; val %T", llc.decodeval, val) + return nil + } + + val.Set(reflect.ValueOf(llc.decodeval)) + return nil +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go new file mode 100644 index 0000000..65cd1c0 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go @@ -0,0 +1,1014 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "encoding/json" + "errors" + "fmt" + "math" + "net/url" + "reflect" + "strconv" + "time" + + "go.mongodb.org/mongo-driver/bson/bsonrw" + "go.mongodb.org/mongo-driver/bson/bsontype" + "go.mongodb.org/mongo-driver/bson/primitive" + "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" +) + +var defaultValueDecoders DefaultValueDecoders + +// DefaultValueDecoders is a namespace type for the default ValueDecoders used +// when creating a registry. +type DefaultValueDecoders struct{} + +// RegisterDefaultDecoders will register the decoder methods attached to DefaultValueDecoders with +// the provided RegistryBuilder. +// +// There is no support for decoding map[string]interface{} becuase there is no decoder for +// interface{}, so users must either register this decoder themselves or use the +// EmptyInterfaceDecoder avaialble in the bson package. +func (dvd DefaultValueDecoders) RegisterDefaultDecoders(rb *RegistryBuilder) { + if rb == nil { + panic(errors.New("argument to RegisterDefaultDecoders must not be nil")) + } + + rb. + RegisterDecoder(tBinary, ValueDecoderFunc(dvd.BinaryDecodeValue)). + RegisterDecoder(tUndefined, ValueDecoderFunc(dvd.UndefinedDecodeValue)). + RegisterDecoder(tDateTime, ValueDecoderFunc(dvd.DateTimeDecodeValue)). + RegisterDecoder(tNull, ValueDecoderFunc(dvd.NullDecodeValue)). + RegisterDecoder(tRegex, ValueDecoderFunc(dvd.RegexDecodeValue)). + RegisterDecoder(tDBPointer, ValueDecoderFunc(dvd.DBPointerDecodeValue)). + RegisterDecoder(tTimestamp, ValueDecoderFunc(dvd.TimestampDecodeValue)). + RegisterDecoder(tMinKey, ValueDecoderFunc(dvd.MinKeyDecodeValue)). + RegisterDecoder(tMaxKey, ValueDecoderFunc(dvd.MaxKeyDecodeValue)). + RegisterDecoder(tJavaScript, ValueDecoderFunc(dvd.JavaScriptDecodeValue)). + RegisterDecoder(tSymbol, ValueDecoderFunc(dvd.SymbolDecodeValue)). + RegisterDecoder(tByteSlice, ValueDecoderFunc(dvd.ByteSliceDecodeValue)). + RegisterDecoder(tTime, ValueDecoderFunc(dvd.TimeDecodeValue)). + RegisterDecoder(tEmpty, ValueDecoderFunc(dvd.EmptyInterfaceDecodeValue)). + RegisterDecoder(tOID, ValueDecoderFunc(dvd.ObjectIDDecodeValue)). + RegisterDecoder(tDecimal, ValueDecoderFunc(dvd.Decimal128DecodeValue)). + RegisterDecoder(tJSONNumber, ValueDecoderFunc(dvd.JSONNumberDecodeValue)). + RegisterDecoder(tURL, ValueDecoderFunc(dvd.URLDecodeValue)). + RegisterDecoder(tValueUnmarshaler, ValueDecoderFunc(dvd.ValueUnmarshalerDecodeValue)). + RegisterDecoder(tUnmarshaler, ValueDecoderFunc(dvd.UnmarshalerDecodeValue)). + RegisterDecoder(tCoreDocument, ValueDecoderFunc(dvd.CoreDocumentDecodeValue)). + RegisterDecoder(tCodeWithScope, ValueDecoderFunc(dvd.CodeWithScopeDecodeValue)). + RegisterDefaultDecoder(reflect.Bool, ValueDecoderFunc(dvd.BooleanDecodeValue)). + RegisterDefaultDecoder(reflect.Int, ValueDecoderFunc(dvd.IntDecodeValue)). + RegisterDefaultDecoder(reflect.Int8, ValueDecoderFunc(dvd.IntDecodeValue)). + RegisterDefaultDecoder(reflect.Int16, ValueDecoderFunc(dvd.IntDecodeValue)). + RegisterDefaultDecoder(reflect.Int32, ValueDecoderFunc(dvd.IntDecodeValue)). + RegisterDefaultDecoder(reflect.Int64, ValueDecoderFunc(dvd.IntDecodeValue)). + RegisterDefaultDecoder(reflect.Uint, ValueDecoderFunc(dvd.UintDecodeValue)). + RegisterDefaultDecoder(reflect.Uint8, ValueDecoderFunc(dvd.UintDecodeValue)). + RegisterDefaultDecoder(reflect.Uint16, ValueDecoderFunc(dvd.UintDecodeValue)). + RegisterDefaultDecoder(reflect.Uint32, ValueDecoderFunc(dvd.UintDecodeValue)). + RegisterDefaultDecoder(reflect.Uint64, ValueDecoderFunc(dvd.UintDecodeValue)). + RegisterDefaultDecoder(reflect.Float32, ValueDecoderFunc(dvd.FloatDecodeValue)). + RegisterDefaultDecoder(reflect.Float64, ValueDecoderFunc(dvd.FloatDecodeValue)). + RegisterDefaultDecoder(reflect.Array, ValueDecoderFunc(dvd.ArrayDecodeValue)). + RegisterDefaultDecoder(reflect.Map, ValueDecoderFunc(dvd.MapDecodeValue)). + RegisterDefaultDecoder(reflect.Slice, ValueDecoderFunc(dvd.SliceDecodeValue)). + RegisterDefaultDecoder(reflect.String, ValueDecoderFunc(dvd.StringDecodeValue)). + RegisterDefaultDecoder(reflect.Struct, &StructCodec{cache: make(map[reflect.Type]*structDescription), parser: DefaultStructTagParser}). + RegisterDefaultDecoder(reflect.Ptr, NewPointerCodec()). + RegisterTypeMapEntry(bsontype.Double, tFloat64). + RegisterTypeMapEntry(bsontype.String, tString). + RegisterTypeMapEntry(bsontype.Array, tA). + RegisterTypeMapEntry(bsontype.Binary, tBinary). + RegisterTypeMapEntry(bsontype.Undefined, tUndefined). + RegisterTypeMapEntry(bsontype.ObjectID, tOID). + RegisterTypeMapEntry(bsontype.Boolean, tBool). + RegisterTypeMapEntry(bsontype.DateTime, tDateTime). + RegisterTypeMapEntry(bsontype.Regex, tRegex). + RegisterTypeMapEntry(bsontype.DBPointer, tDBPointer). + RegisterTypeMapEntry(bsontype.JavaScript, tJavaScript). + RegisterTypeMapEntry(bsontype.Symbol, tSymbol). + RegisterTypeMapEntry(bsontype.CodeWithScope, tCodeWithScope). + RegisterTypeMapEntry(bsontype.Int32, tInt32). + RegisterTypeMapEntry(bsontype.Int64, tInt64). + RegisterTypeMapEntry(bsontype.Timestamp, tTimestamp). + RegisterTypeMapEntry(bsontype.Decimal128, tDecimal). + RegisterTypeMapEntry(bsontype.MinKey, tMinKey). + RegisterTypeMapEntry(bsontype.MaxKey, tMaxKey). + RegisterTypeMapEntry(bsontype.Type(0), tD) +} + +// BooleanDecodeValue is the ValueDecoderFunc for bool types. +func (dvd DefaultValueDecoders) BooleanDecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if vr.Type() != bsontype.Boolean { + return fmt.Errorf("cannot decode %v into a boolean", vr.Type()) + } + if !val.IsValid() || !val.CanSet() || val.Kind() != reflect.Bool { + return ValueDecoderError{Name: "BooleanDecodeValue", Kinds: []reflect.Kind{reflect.Bool}, Received: val} + } + + b, err := vr.ReadBoolean() + val.SetBool(b) + return err +} + +// IntDecodeValue is the ValueDecoderFunc for bool types. +func (dvd DefaultValueDecoders) IntDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + var i64 int64 + var err error + switch vr.Type() { + case bsontype.Int32: + i32, err := vr.ReadInt32() + if err != nil { + return err + } + i64 = int64(i32) + case bsontype.Int64: + i64, err = vr.ReadInt64() + if err != nil { + return err + } + case bsontype.Double: + f64, err := vr.ReadDouble() + if err != nil { + return err + } + if !dc.Truncate && math.Floor(f64) != f64 { + return errors.New("IntDecodeValue can only truncate float64 to an integer type when truncation is enabled") + } + if f64 > float64(math.MaxInt64) { + return fmt.Errorf("%g overflows int64", f64) + } + i64 = int64(f64) + default: + return fmt.Errorf("cannot decode %v into an integer type", vr.Type()) + } + + if !val.CanSet() { + return ValueDecoderError{ + Name: "IntDecodeValue", + Kinds: []reflect.Kind{reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int}, + Received: val, + } + } + + switch val.Kind() { + case reflect.Int8: + if i64 < math.MinInt8 || i64 > math.MaxInt8 { + return fmt.Errorf("%d overflows int8", i64) + } + case reflect.Int16: + if i64 < math.MinInt16 || i64 > math.MaxInt16 { + return fmt.Errorf("%d overflows int16", i64) + } + case reflect.Int32: + if i64 < math.MinInt32 || i64 > math.MaxInt32 { + return fmt.Errorf("%d overflows int32", i64) + } + case reflect.Int64: + case reflect.Int: + if int64(int(i64)) != i64 { // Can we fit this inside of an int + return fmt.Errorf("%d overflows int", i64) + } + default: + return ValueDecoderError{ + Name: "IntDecodeValue", + Kinds: []reflect.Kind{reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int}, + Received: val, + } + } + + val.SetInt(i64) + return nil +} + +// UintDecodeValue is the ValueDecoderFunc for uint types. +func (dvd DefaultValueDecoders) UintDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + var i64 int64 + var err error + switch vr.Type() { + case bsontype.Int32: + i32, err := vr.ReadInt32() + if err != nil { + return err + } + i64 = int64(i32) + case bsontype.Int64: + i64, err = vr.ReadInt64() + if err != nil { + return err + } + case bsontype.Double: + f64, err := vr.ReadDouble() + if err != nil { + return err + } + if !dc.Truncate && math.Floor(f64) != f64 { + return errors.New("UintDecodeValue can only truncate float64 to an integer type when truncation is enabled") + } + if f64 > float64(math.MaxInt64) { + return fmt.Errorf("%g overflows int64", f64) + } + i64 = int64(f64) + default: + return fmt.Errorf("cannot decode %v into an integer type", vr.Type()) + } + + if !val.CanSet() { + return ValueDecoderError{ + Name: "UintDecodeValue", + Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint}, + Received: val, + } + } + + switch val.Kind() { + case reflect.Uint8: + if i64 < 0 || i64 > math.MaxUint8 { + return fmt.Errorf("%d overflows uint8", i64) + } + case reflect.Uint16: + if i64 < 0 || i64 > math.MaxUint16 { + return fmt.Errorf("%d overflows uint16", i64) + } + case reflect.Uint32: + if i64 < 0 || i64 > math.MaxUint32 { + return fmt.Errorf("%d overflows uint32", i64) + } + case reflect.Uint64: + if i64 < 0 { + return fmt.Errorf("%d overflows uint64", i64) + } + case reflect.Uint: + if i64 < 0 || int64(uint(i64)) != i64 { // Can we fit this inside of an uint + return fmt.Errorf("%d overflows uint", i64) + } + default: + return ValueDecoderError{ + Name: "UintDecodeValue", + Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint}, + Received: val, + } + } + + val.SetUint(uint64(i64)) + return nil +} + +// FloatDecodeValue is the ValueDecoderFunc for float types. +func (dvd DefaultValueDecoders) FloatDecodeValue(ec DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + var f float64 + var err error + switch vr.Type() { + case bsontype.Int32: + i32, err := vr.ReadInt32() + if err != nil { + return err + } + f = float64(i32) + case bsontype.Int64: + i64, err := vr.ReadInt64() + if err != nil { + return err + } + f = float64(i64) + case bsontype.Double: + f, err = vr.ReadDouble() + if err != nil { + return err + } + default: + return fmt.Errorf("cannot decode %v into a float32 or float64 type", vr.Type()) + } + + if !val.CanSet() { + return ValueDecoderError{Name: "FloatDecodeValue", Kinds: []reflect.Kind{reflect.Float32, reflect.Float64}, Received: val} + } + + switch val.Kind() { + case reflect.Float32: + if !ec.Truncate && float64(float32(f)) != f { + return errors.New("FloatDecodeValue can only convert float64 to float32 when truncation is allowed") + } + case reflect.Float64: + default: + return ValueDecoderError{Name: "FloatDecodeValue", Kinds: []reflect.Kind{reflect.Float32, reflect.Float64}, Received: val} + } + + val.SetFloat(f) + return nil +} + +// StringDecodeValue is the ValueDecoderFunc for string types. +func (dvd DefaultValueDecoders) StringDecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + var str string + var err error + switch vr.Type() { + // TODO(GODRIVER-577): Handle JavaScript and Symbol BSON types when allowed. + case bsontype.String: + str, err = vr.ReadString() + if err != nil { + return err + } + default: + return fmt.Errorf("cannot decode %v into a string type", vr.Type()) + } + if !val.CanSet() || val.Kind() != reflect.String { + return ValueDecoderError{Name: "StringDecodeValue", Kinds: []reflect.Kind{reflect.String}, Received: val} + } + + val.SetString(str) + return nil +} + +// JavaScriptDecodeValue is the ValueDecoderFunc for the primitive.JavaScript type. +func (DefaultValueDecoders) JavaScriptDecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tJavaScript { + return ValueDecoderError{Name: "BinaryDecodeValue", Types: []reflect.Type{tJavaScript}, Received: val} + } + + if vr.Type() != bsontype.JavaScript { + return fmt.Errorf("cannot decode %v into a primitive.JavaScript", vr.Type()) + } + + js, err := vr.ReadJavascript() + if err != nil { + return err + } + + val.SetString(js) + return nil +} + +// SymbolDecodeValue is the ValueDecoderFunc for the primitive.Symbol type. +func (DefaultValueDecoders) SymbolDecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tSymbol { + return ValueDecoderError{Name: "BinaryDecodeValue", Types: []reflect.Type{tSymbol}, Received: val} + } + + if vr.Type() != bsontype.Symbol { + return fmt.Errorf("cannot decode %v into a primitive.Symbol", vr.Type()) + } + + symbol, err := vr.ReadSymbol() + if err != nil { + return err + } + + val.SetString(symbol) + return nil +} + +// BinaryDecodeValue is the ValueDecoderFunc for Binary. +func (DefaultValueDecoders) BinaryDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tBinary { + return ValueDecoderError{Name: "BinaryDecodeValue", Types: []reflect.Type{tBinary}, Received: val} + } + + if vr.Type() != bsontype.Binary { + return fmt.Errorf("cannot decode %v into a Binary", vr.Type()) + } + + data, subtype, err := vr.ReadBinary() + if err != nil { + return err + } + + val.Set(reflect.ValueOf(primitive.Binary{Subtype: subtype, Data: data})) + return nil +} + +// UndefinedDecodeValue is the ValueDecoderFunc for Undefined. +func (DefaultValueDecoders) UndefinedDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tUndefined { + return ValueDecoderError{Name: "UndefinedDecodeValue", Types: []reflect.Type{tUndefined}, Received: val} + } + + if vr.Type() != bsontype.Undefined { + return fmt.Errorf("cannot decode %v into an Undefined", vr.Type()) + } + + val.Set(reflect.ValueOf(primitive.Undefined{})) + return vr.ReadUndefined() +} + +// ObjectIDDecodeValue is the ValueDecoderFunc for primitive.ObjectID. +func (dvd DefaultValueDecoders) ObjectIDDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tOID { + return ValueDecoderError{Name: "ObjectIDDecodeValue", Types: []reflect.Type{tOID}, Received: val} + } + + if vr.Type() != bsontype.ObjectID { + return fmt.Errorf("cannot decode %v into an ObjectID", vr.Type()) + } + oid, err := vr.ReadObjectID() + val.Set(reflect.ValueOf(oid)) + return err +} + +// DateTimeDecodeValue is the ValueDecoderFunc for DateTime. +func (DefaultValueDecoders) DateTimeDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tDateTime { + return ValueDecoderError{Name: "DateTimeDecodeValue", Types: []reflect.Type{tDateTime}, Received: val} + } + + if vr.Type() != bsontype.DateTime { + return fmt.Errorf("cannot decode %v into a DateTime", vr.Type()) + } + + dt, err := vr.ReadDateTime() + if err != nil { + return err + } + + val.Set(reflect.ValueOf(primitive.DateTime(dt))) + return nil +} + +// NullDecodeValue is the ValueDecoderFunc for Null. +func (DefaultValueDecoders) NullDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tNull { + return ValueDecoderError{Name: "NullDecodeValue", Types: []reflect.Type{tNull}, Received: val} + } + + if vr.Type() != bsontype.Null { + return fmt.Errorf("cannot decode %v into a Null", vr.Type()) + } + + val.Set(reflect.ValueOf(primitive.Null{})) + return vr.ReadNull() +} + +// RegexDecodeValue is the ValueDecoderFunc for Regex. +func (DefaultValueDecoders) RegexDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tRegex { + return ValueDecoderError{Name: "RegexDecodeValue", Types: []reflect.Type{tRegex}, Received: val} + } + + if vr.Type() != bsontype.Regex { + return fmt.Errorf("cannot decode %v into a Regex", vr.Type()) + } + + pattern, options, err := vr.ReadRegex() + if err != nil { + return err + } + + val.Set(reflect.ValueOf(primitive.Regex{Pattern: pattern, Options: options})) + return nil +} + +// DBPointerDecodeValue is the ValueDecoderFunc for DBPointer. +func (DefaultValueDecoders) DBPointerDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tDBPointer { + return ValueDecoderError{Name: "DBPointerDecodeValue", Types: []reflect.Type{tDBPointer}, Received: val} + } + + if vr.Type() != bsontype.DBPointer { + return fmt.Errorf("cannot decode %v into a DBPointer", vr.Type()) + } + + ns, pointer, err := vr.ReadDBPointer() + if err != nil { + return err + } + + val.Set(reflect.ValueOf(primitive.DBPointer{DB: ns, Pointer: pointer})) + return nil +} + +// TimestampDecodeValue is the ValueDecoderFunc for Timestamp. +func (DefaultValueDecoders) TimestampDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tTimestamp { + return ValueDecoderError{Name: "TimestampDecodeValue", Types: []reflect.Type{tTimestamp}, Received: val} + } + + if vr.Type() != bsontype.Timestamp { + return fmt.Errorf("cannot decode %v into a Timestamp", vr.Type()) + } + + t, incr, err := vr.ReadTimestamp() + if err != nil { + return err + } + + val.Set(reflect.ValueOf(primitive.Timestamp{T: t, I: incr})) + return nil +} + +// MinKeyDecodeValue is the ValueDecoderFunc for MinKey. +func (DefaultValueDecoders) MinKeyDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tMinKey { + return ValueDecoderError{Name: "MinKeyDecodeValue", Types: []reflect.Type{tMinKey}, Received: val} + } + + if vr.Type() != bsontype.MinKey { + return fmt.Errorf("cannot decode %v into a MinKey", vr.Type()) + } + + val.Set(reflect.ValueOf(primitive.MinKey{})) + return vr.ReadMinKey() +} + +// MaxKeyDecodeValue is the ValueDecoderFunc for MaxKey. +func (DefaultValueDecoders) MaxKeyDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tMaxKey { + return ValueDecoderError{Name: "MaxKeyDecodeValue", Types: []reflect.Type{tMaxKey}, Received: val} + } + + if vr.Type() != bsontype.MaxKey { + return fmt.Errorf("cannot decode %v into a MaxKey", vr.Type()) + } + + val.Set(reflect.ValueOf(primitive.MaxKey{})) + return vr.ReadMaxKey() +} + +// Decimal128DecodeValue is the ValueDecoderFunc for primitive.Decimal128. +func (dvd DefaultValueDecoders) Decimal128DecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if vr.Type() != bsontype.Decimal128 { + return fmt.Errorf("cannot decode %v into a primitive.Decimal128", vr.Type()) + } + + if !val.CanSet() || val.Type() != tDecimal { + return ValueDecoderError{Name: "Decimal128DecodeValue", Types: []reflect.Type{tDecimal}, Received: val} + } + d128, err := vr.ReadDecimal128() + val.Set(reflect.ValueOf(d128)) + return err +} + +// JSONNumberDecodeValue is the ValueDecoderFunc for json.Number. +func (dvd DefaultValueDecoders) JSONNumberDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tJSONNumber { + return ValueDecoderError{Name: "JSONNumberDecodeValue", Types: []reflect.Type{tJSONNumber}, Received: val} + } + + switch vr.Type() { + case bsontype.Double: + f64, err := vr.ReadDouble() + if err != nil { + return err + } + val.Set(reflect.ValueOf(json.Number(strconv.FormatFloat(f64, 'g', -1, 64)))) + case bsontype.Int32: + i32, err := vr.ReadInt32() + if err != nil { + return err + } + val.Set(reflect.ValueOf(json.Number(strconv.FormatInt(int64(i32), 10)))) + case bsontype.Int64: + i64, err := vr.ReadInt64() + if err != nil { + return err + } + val.Set(reflect.ValueOf(json.Number(strconv.FormatInt(i64, 10)))) + default: + return fmt.Errorf("cannot decode %v into a json.Number", vr.Type()) + } + + return nil +} + +// URLDecodeValue is the ValueDecoderFunc for url.URL. +func (dvd DefaultValueDecoders) URLDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if vr.Type() != bsontype.String { + return fmt.Errorf("cannot decode %v into a *url.URL", vr.Type()) + } + + str, err := vr.ReadString() + if err != nil { + return err + } + + u, err := url.Parse(str) + if err != nil { + return err + } + + if !val.CanSet() || val.Type() != tURL { + return ValueDecoderError{Name: "URLDecodeValue", Types: []reflect.Type{tURL}, Received: val} + } + + val.Set(reflect.ValueOf(u).Elem()) + return nil +} + +// TimeDecodeValue is the ValueDecoderFunc for time.Time. +func (dvd DefaultValueDecoders) TimeDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if vr.Type() != bsontype.DateTime { + return fmt.Errorf("cannot decode %v into a time.Time", vr.Type()) + } + + dt, err := vr.ReadDateTime() + if err != nil { + return err + } + + if !val.CanSet() || val.Type() != tTime { + return ValueDecoderError{Name: "TimeDecodeValue", Types: []reflect.Type{tTime}, Received: val} + } + + val.Set(reflect.ValueOf(time.Unix(dt/1000, dt%1000*1000000).UTC())) + return nil +} + +// ByteSliceDecodeValue is the ValueDecoderFunc for []byte. +func (dvd DefaultValueDecoders) ByteSliceDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if vr.Type() != bsontype.Binary && vr.Type() != bsontype.Null { + return fmt.Errorf("cannot decode %v into a []byte", vr.Type()) + } + + if !val.CanSet() || val.Type() != tByteSlice { + return ValueDecoderError{Name: "ByteSliceDecodeValue", Types: []reflect.Type{tByteSlice}, Received: val} + } + + if vr.Type() == bsontype.Null { + val.Set(reflect.Zero(val.Type())) + return vr.ReadNull() + } + + data, subtype, err := vr.ReadBinary() + if err != nil { + return err + } + if subtype != 0x00 { + return fmt.Errorf("ByteSliceDecodeValue can only be used to decode subtype 0x00 for %s, got %v", bsontype.Binary, subtype) + } + + val.Set(reflect.ValueOf(data)) + return nil +} + +// MapDecodeValue is the ValueDecoderFunc for map[string]* types. +func (dvd DefaultValueDecoders) MapDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Kind() != reflect.Map || val.Type().Key().Kind() != reflect.String { + return ValueDecoderError{Name: "MapDecodeValue", Kinds: []reflect.Kind{reflect.Map}, Received: val} + } + + switch vr.Type() { + case bsontype.Type(0), bsontype.EmbeddedDocument: + case bsontype.Null: + val.Set(reflect.Zero(val.Type())) + return vr.ReadNull() + default: + return fmt.Errorf("cannot decode %v into a %s", vr.Type(), val.Type()) + } + + dr, err := vr.ReadDocument() + if err != nil { + return err + } + + if val.IsNil() { + val.Set(reflect.MakeMap(val.Type())) + } + + eType := val.Type().Elem() + decoder, err := dc.LookupDecoder(eType) + if err != nil { + return err + } + + if eType == tEmpty { + dc.Ancestor = val.Type() + } + + keyType := val.Type().Key() + for { + key, vr, err := dr.ReadElement() + if err == bsonrw.ErrEOD { + break + } + if err != nil { + return err + } + + elem := reflect.New(eType).Elem() + + err = decoder.DecodeValue(dc, vr, elem) + if err != nil { + return err + } + + val.SetMapIndex(reflect.ValueOf(key).Convert(keyType), elem) + } + return nil +} + +// ArrayDecodeValue is the ValueDecoderFunc for array types. +func (dvd DefaultValueDecoders) ArrayDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.IsValid() || val.Kind() != reflect.Array { + return ValueDecoderError{Name: "ArrayDecodeValue", Kinds: []reflect.Kind{reflect.Array}, Received: val} + } + + switch vr.Type() { + case bsontype.Array: + case bsontype.Type(0), bsontype.EmbeddedDocument: + if val.Type().Elem() != tE { + return fmt.Errorf("cannot decode document into %s", val.Type()) + } + default: + return fmt.Errorf("cannot decode %v into an array", vr.Type()) + } + + var elemsFunc func(DecodeContext, bsonrw.ValueReader, reflect.Value) ([]reflect.Value, error) + switch val.Type().Elem() { + case tE: + elemsFunc = dvd.decodeD + default: + elemsFunc = dvd.decodeDefault + } + + elems, err := elemsFunc(dc, vr, val) + if err != nil { + return err + } + + if len(elems) > val.Len() { + return fmt.Errorf("more elements returned in array than can fit inside %s", val.Type()) + } + + for idx, elem := range elems { + val.Index(idx).Set(elem) + } + + return nil +} + +// SliceDecodeValue is the ValueDecoderFunc for slice types. +func (dvd DefaultValueDecoders) SliceDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Kind() != reflect.Slice { + return ValueDecoderError{Name: "SliceDecodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val} + } + + switch vr.Type() { + case bsontype.Array: + case bsontype.Null: + val.Set(reflect.Zero(val.Type())) + return vr.ReadNull() + case bsontype.Type(0), bsontype.EmbeddedDocument: + if val.Type().Elem() != tE { + return fmt.Errorf("cannot decode document into %s", val.Type()) + } + default: + return fmt.Errorf("cannot decode %v into a slice", vr.Type()) + } + + var elemsFunc func(DecodeContext, bsonrw.ValueReader, reflect.Value) ([]reflect.Value, error) + switch val.Type().Elem() { + case tE: + dc.Ancestor = val.Type() + elemsFunc = dvd.decodeD + default: + elemsFunc = dvd.decodeDefault + } + + elems, err := elemsFunc(dc, vr, val) + if err != nil { + return err + } + + if val.IsNil() { + val.Set(reflect.MakeSlice(val.Type(), 0, len(elems))) + } + + val.SetLen(0) + val.Set(reflect.Append(val, elems...)) + + return nil +} + +// ValueUnmarshalerDecodeValue is the ValueDecoderFunc for ValueUnmarshaler implementations. +func (dvd DefaultValueDecoders) ValueUnmarshalerDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.IsValid() || (!val.Type().Implements(tValueUnmarshaler) && !reflect.PtrTo(val.Type()).Implements(tValueUnmarshaler)) { + return ValueDecoderError{Name: "ValueUnmarshalerDecodeValue", Types: []reflect.Type{tValueUnmarshaler}, Received: val} + } + + if val.Kind() == reflect.Ptr && val.IsNil() { + if !val.CanSet() { + return ValueDecoderError{Name: "ValueUnmarshalerDecodeValue", Types: []reflect.Type{tValueUnmarshaler}, Received: val} + } + val.Set(reflect.New(val.Type().Elem())) + } + + if !val.Type().Implements(tValueUnmarshaler) { + if !val.CanAddr() { + return ValueDecoderError{Name: "ValueUnmarshalerDecodeValue", Types: []reflect.Type{tValueUnmarshaler}, Received: val} + } + val = val.Addr() // If they type doesn't implement the interface, a pointer to it must. + } + + t, src, err := bsonrw.Copier{}.CopyValueToBytes(vr) + if err != nil { + return err + } + + fn := val.Convert(tValueUnmarshaler).MethodByName("UnmarshalBSONValue") + errVal := fn.Call([]reflect.Value{reflect.ValueOf(t), reflect.ValueOf(src)})[0] + if !errVal.IsNil() { + return errVal.Interface().(error) + } + return nil +} + +// UnmarshalerDecodeValue is the ValueDecoderFunc for Unmarshaler implementations. +func (dvd DefaultValueDecoders) UnmarshalerDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.IsValid() || (!val.Type().Implements(tUnmarshaler) && !reflect.PtrTo(val.Type()).Implements(tUnmarshaler)) { + return ValueDecoderError{Name: "UnmarshalerDecodeValue", Types: []reflect.Type{tUnmarshaler}, Received: val} + } + + if val.Kind() == reflect.Ptr && val.IsNil() { + if !val.CanSet() { + return ValueDecoderError{Name: "UnmarshalerDecodeValue", Types: []reflect.Type{tUnmarshaler}, Received: val} + } + val.Set(reflect.New(val.Type().Elem())) + } + + if !val.Type().Implements(tUnmarshaler) { + if !val.CanAddr() { + return ValueDecoderError{Name: "UnmarshalerDecodeValue", Types: []reflect.Type{tUnmarshaler}, Received: val} + } + val = val.Addr() // If they type doesn't implement the interface, a pointer to it must. + } + + _, src, err := bsonrw.Copier{}.CopyValueToBytes(vr) + if err != nil { + return err + } + + fn := val.Convert(tUnmarshaler).MethodByName("UnmarshalBSON") + errVal := fn.Call([]reflect.Value{reflect.ValueOf(src)})[0] + if !errVal.IsNil() { + return errVal.Interface().(error) + } + return nil +} + +// EmptyInterfaceDecodeValue is the ValueDecoderFunc for interface{}. +func (dvd DefaultValueDecoders) EmptyInterfaceDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tEmpty { + return ValueDecoderError{Name: "EmptyInterfaceDecodeValue", Types: []reflect.Type{tEmpty}, Received: val} + } + + rtype, err := dc.LookupTypeMapEntry(vr.Type()) + if err != nil { + switch vr.Type() { + case bsontype.EmbeddedDocument: + if dc.Ancestor != nil { + rtype = dc.Ancestor + break + } + rtype = tD + case bsontype.Null: + val.Set(reflect.Zero(val.Type())) + return vr.ReadNull() + default: + return err + } + } + + decoder, err := dc.LookupDecoder(rtype) + if err != nil { + return err + } + + elem := reflect.New(rtype).Elem() + err = decoder.DecodeValue(dc, vr, elem) + if err != nil { + return err + } + + val.Set(elem) + return nil +} + +// CoreDocumentDecodeValue is the ValueDecoderFunc for bsoncore.Document. +func (DefaultValueDecoders) CoreDocumentDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tCoreDocument { + return ValueDecoderError{Name: "CoreDocumentDecodeValue", Types: []reflect.Type{tCoreDocument}, Received: val} + } + + if val.IsNil() { + val.Set(reflect.MakeSlice(val.Type(), 0, 0)) + } + + val.SetLen(0) + + cdoc, err := bsonrw.Copier{}.AppendDocumentBytes(val.Interface().(bsoncore.Document), vr) + val.Set(reflect.ValueOf(cdoc)) + return err +} + +func (dvd DefaultValueDecoders) decodeDefault(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) ([]reflect.Value, error) { + elems := make([]reflect.Value, 0) + + ar, err := vr.ReadArray() + if err != nil { + return nil, err + } + + eType := val.Type().Elem() + + decoder, err := dc.LookupDecoder(eType) + if err != nil { + return nil, err + } + + for { + vr, err := ar.ReadValue() + if err == bsonrw.ErrEOA { + break + } + if err != nil { + return nil, err + } + + elem := reflect.New(eType).Elem() + + err = decoder.DecodeValue(dc, vr, elem) + if err != nil { + return nil, err + } + elems = append(elems, elem) + } + + return elems, nil +} + +// CodeWithScopeDecodeValue is the ValueDecoderFunc for CodeWithScope. +func (dvd DefaultValueDecoders) CodeWithScopeDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tCodeWithScope { + return ValueDecoderError{Name: "CodeWithScopeDecodeValue", Types: []reflect.Type{tCodeWithScope}, Received: val} + } + + if vr.Type() != bsontype.CodeWithScope { + return fmt.Errorf("cannot decode %v into a primitive.CodeWithScope", vr.Type()) + } + + code, dr, err := vr.ReadCodeWithScope() + if err != nil { + return err + } + + scope := reflect.New(tD).Elem() + + elems, err := dvd.decodeElemsFromDocumentReader(dc, dr) + if err != nil { + return err + } + + scope.Set(reflect.MakeSlice(tD, 0, len(elems))) + scope.Set(reflect.Append(scope, elems...)) + + val.Set(reflect.ValueOf(primitive.CodeWithScope{Code: primitive.JavaScript(code), Scope: scope.Interface().(primitive.D)})) + return nil +} + +func (dvd DefaultValueDecoders) decodeD(dc DecodeContext, vr bsonrw.ValueReader, _ reflect.Value) ([]reflect.Value, error) { + switch vr.Type() { + case bsontype.Type(0), bsontype.EmbeddedDocument: + default: + return nil, fmt.Errorf("cannot decode %v into a D", vr.Type()) + } + + dr, err := vr.ReadDocument() + if err != nil { + return nil, err + } + + return dvd.decodeElemsFromDocumentReader(dc, dr) +} + +func (DefaultValueDecoders) decodeElemsFromDocumentReader(dc DecodeContext, dr bsonrw.DocumentReader) ([]reflect.Value, error) { + decoder, err := dc.LookupDecoder(tEmpty) + if err != nil { + return nil, err + } + + elems := make([]reflect.Value, 0) + for { + key, vr, err := dr.ReadElement() + if err == bsonrw.ErrEOD { + break + } + if err != nil { + return nil, err + } + + val := reflect.New(tEmpty).Elem() + err = decoder.DecodeValue(dc, vr, val) + if err != nil { + return nil, err + } + + elems = append(elems, reflect.ValueOf(primitive.E{Key: key, Value: val.Interface()})) + } + + return elems, nil +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders_test.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders_test.go new file mode 100644 index 0000000..6e6e039 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders_test.go @@ -0,0 +1,2870 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "math" + "net/url" + "reflect" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "go.mongodb.org/mongo-driver/bson/bsonrw" + "go.mongodb.org/mongo-driver/bson/bsonrw/bsonrwtest" + "go.mongodb.org/mongo-driver/bson/bsontype" + "go.mongodb.org/mongo-driver/bson/primitive" + "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" +) + +func TestDefaultValueDecoders(t *testing.T) { + var dvd DefaultValueDecoders + var wrong = func(string, string) string { return "wrong" } + + type mybool bool + type myint8 int8 + type myint16 int16 + type myint32 int32 + type myint64 int64 + type myint int + type myuint8 uint8 + type myuint16 uint16 + type myuint32 uint32 + type myuint64 uint64 + type myuint uint + type myfloat32 float32 + type myfloat64 float64 + type mystring string + + const cansetreflectiontest = "cansetreflectiontest" + const cansettest = "cansettest" + + now := time.Now().Truncate(time.Millisecond) + d128 := primitive.NewDecimal128(12345, 67890) + var pbool = func(b bool) *bool { return &b } + var pi32 = func(i32 int32) *int32 { return &i32 } + var pi64 = func(i64 int64) *int64 { return &i64 } + + type subtest struct { + name string + val interface{} + dctx *DecodeContext + llvrw *bsonrwtest.ValueReaderWriter + invoke bsonrwtest.Invoked + err error + } + + testCases := []struct { + name string + vd ValueDecoder + subtests []subtest + }{ + { + "BooleanDecodeValue", + ValueDecoderFunc(dvd.BooleanDecodeValue), + []subtest{ + { + "wrong type", + wrong, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Boolean}, + bsonrwtest.Nothing, + ValueDecoderError{Name: "BooleanDecodeValue", Kinds: []reflect.Kind{reflect.Bool}, Received: reflect.ValueOf(wrong)}, + }, + { + "type not boolean", + bool(false), + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.String}, + bsonrwtest.Nothing, + fmt.Errorf("cannot decode %v into a boolean", bsontype.String), + }, + { + "fast path", + bool(true), + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Boolean, Return: bool(true)}, + bsonrwtest.ReadBoolean, + nil, + }, + { + "reflection path", + mybool(true), + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Boolean, Return: bool(true)}, + bsonrwtest.ReadBoolean, + nil, + }, + { + "reflection path error", + mybool(true), + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Boolean, Return: bool(true), Err: errors.New("ReadBoolean Error"), ErrAfter: bsonrwtest.ReadBoolean}, + bsonrwtest.ReadBoolean, errors.New("ReadBoolean Error"), + }, + { + "can set false", + cansettest, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Boolean}, + bsonrwtest.Nothing, + ValueDecoderError{Name: "BooleanDecodeValue", Kinds: []reflect.Kind{reflect.Bool}}, + }, + }, + }, + { + "IntDecodeValue", + ValueDecoderFunc(dvd.IntDecodeValue), + []subtest{ + { + "wrong type", + wrong, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Int32, Return: int32(0)}, + bsonrwtest.ReadInt32, + ValueDecoderError{ + Name: "IntDecodeValue", + Kinds: []reflect.Kind{reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int}, + Received: reflect.ValueOf(wrong), + }, + }, + { + "type not int32/int64", + 0, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.String}, + bsonrwtest.Nothing, + fmt.Errorf("cannot decode %v into an integer type", bsontype.String), + }, + { + "ReadInt32 error", + 0, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Int32, Return: int32(0), Err: errors.New("ReadInt32 error"), ErrAfter: bsonrwtest.ReadInt32}, + bsonrwtest.ReadInt32, + errors.New("ReadInt32 error"), + }, + { + "ReadInt64 error", + 0, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Int64, Return: int64(0), Err: errors.New("ReadInt64 error"), ErrAfter: bsonrwtest.ReadInt64}, + bsonrwtest.ReadInt64, + errors.New("ReadInt64 error"), + }, + { + "ReadDouble error", + 0, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Double, Return: float64(0), Err: errors.New("ReadDouble error"), ErrAfter: bsonrwtest.ReadDouble}, + bsonrwtest.ReadDouble, + errors.New("ReadDouble error"), + }, + { + "ReadDouble", int64(3), &DecodeContext{}, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Double, Return: float64(3.00)}, bsonrwtest.ReadDouble, + nil, + }, + { + "ReadDouble (truncate)", int64(3), &DecodeContext{Truncate: true}, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Double, Return: float64(3.14)}, bsonrwtest.ReadDouble, + nil, + }, + { + "ReadDouble (no truncate)", int64(0), nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Double, Return: float64(3.14)}, bsonrwtest.ReadDouble, + errors.New("IntDecodeValue can only truncate float64 to an integer type when truncation is enabled"), + }, + { + "ReadDouble overflows int64", int64(0), nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Double, Return: math.MaxFloat64}, bsonrwtest.ReadDouble, + fmt.Errorf("%g overflows int64", math.MaxFloat64), + }, + {"int8/fast path", int8(127), nil, &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Int32, Return: int32(127)}, bsonrwtest.ReadInt32, nil}, + {"int16/fast path", int16(32676), nil, &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Int32, Return: int32(32676)}, bsonrwtest.ReadInt32, nil}, + {"int32/fast path", int32(1234), nil, &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Int32, Return: int32(1234)}, bsonrwtest.ReadInt32, nil}, + {"int64/fast path", int64(1234), nil, &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Int64, Return: int64(1234)}, bsonrwtest.ReadInt64, nil}, + {"int/fast path", int(1234), nil, &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Int64, Return: int64(1234)}, bsonrwtest.ReadInt64, nil}, + { + "int8/fast path - nil", (*int8)(nil), nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Int32, Return: int32(0)}, bsonrwtest.ReadInt32, + ValueDecoderError{ + Name: "IntDecodeValue", + Kinds: []reflect.Kind{reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int}, + Received: reflect.ValueOf((*int8)(nil)), + }, + }, + { + "int16/fast path - nil", (*int16)(nil), nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Int32, Return: int32(0)}, bsonrwtest.ReadInt32, + ValueDecoderError{ + Name: "IntDecodeValue", + Kinds: []reflect.Kind{reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int}, + Received: reflect.ValueOf((*int16)(nil)), + }, + }, + { + "int32/fast path - nil", (*int32)(nil), nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Int32, Return: int32(0)}, bsonrwtest.ReadInt32, + ValueDecoderError{ + Name: "IntDecodeValue", + Kinds: []reflect.Kind{reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int}, + Received: reflect.ValueOf((*int32)(nil)), + }, + }, + { + "int64/fast path - nil", (*int64)(nil), nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Int32, Return: int32(0)}, bsonrwtest.ReadInt32, + ValueDecoderError{ + Name: "IntDecodeValue", + Kinds: []reflect.Kind{reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int}, + Received: reflect.ValueOf((*int64)(nil)), + }, + }, + { + "int/fast path - nil", (*int)(nil), nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Int32, Return: int32(0)}, bsonrwtest.ReadInt32, + ValueDecoderError{ + Name: "IntDecodeValue", + Kinds: []reflect.Kind{reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int}, + Received: reflect.ValueOf((*int)(nil)), + }, + }, + { + "int8/fast path - overflow", int8(0), nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Int32, Return: int32(129)}, bsonrwtest.ReadInt32, + fmt.Errorf("%d overflows int8", 129), + }, + { + "int16/fast path - overflow", int16(0), nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Int32, Return: int32(32768)}, bsonrwtest.ReadInt32, + fmt.Errorf("%d overflows int16", 32768), + }, + { + "int32/fast path - overflow", int32(0), nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Int64, Return: int64(2147483648)}, bsonrwtest.ReadInt64, + fmt.Errorf("%d overflows int32", 2147483648), + }, + { + "int8/fast path - overflow (negative)", int8(0), nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Int32, Return: int32(-129)}, bsonrwtest.ReadInt32, + fmt.Errorf("%d overflows int8", -129), + }, + { + "int16/fast path - overflow (negative)", int16(0), nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Int32, Return: int32(-32769)}, bsonrwtest.ReadInt32, + fmt.Errorf("%d overflows int16", -32769), + }, + { + "int32/fast path - overflow (negative)", int32(0), nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Int64, Return: int64(-2147483649)}, bsonrwtest.ReadInt64, + fmt.Errorf("%d overflows int32", -2147483649), + }, + { + "int8/reflection path", myint8(127), nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Int32, Return: int32(127)}, bsonrwtest.ReadInt32, + nil, + }, + { + "int16/reflection path", myint16(255), nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Int32, Return: int32(255)}, bsonrwtest.ReadInt32, + nil, + }, + { + "int32/reflection path", myint32(511), nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Int32, Return: int32(511)}, bsonrwtest.ReadInt32, + nil, + }, + { + "int64/reflection path", myint64(1023), nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Int32, Return: int32(1023)}, bsonrwtest.ReadInt32, + nil, + }, + { + "int/reflection path", myint(2047), nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Int32, Return: int32(2047)}, bsonrwtest.ReadInt32, + nil, + }, + { + "int8/reflection path - overflow", myint8(0), nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Int32, Return: int32(129)}, bsonrwtest.ReadInt32, + fmt.Errorf("%d overflows int8", 129), + }, + { + "int16/reflection path - overflow", myint16(0), nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Int32, Return: int32(32768)}, bsonrwtest.ReadInt32, + fmt.Errorf("%d overflows int16", 32768), + }, + { + "int32/reflection path - overflow", myint32(0), nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Int64, Return: int64(2147483648)}, bsonrwtest.ReadInt64, + fmt.Errorf("%d overflows int32", 2147483648), + }, + { + "int8/reflection path - overflow (negative)", myint8(0), nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Int32, Return: int32(-129)}, bsonrwtest.ReadInt32, + fmt.Errorf("%d overflows int8", -129), + }, + { + "int16/reflection path - overflow (negative)", myint16(0), nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Int32, Return: int32(-32769)}, bsonrwtest.ReadInt32, + fmt.Errorf("%d overflows int16", -32769), + }, + { + "int32/reflection path - overflow (negative)", myint32(0), nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Int64, Return: int64(-2147483649)}, bsonrwtest.ReadInt64, + fmt.Errorf("%d overflows int32", -2147483649), + }, + { + "can set false", + cansettest, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Int32, Return: int32(0)}, + bsonrwtest.Nothing, + ValueDecoderError{ + Name: "IntDecodeValue", + Kinds: []reflect.Kind{reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int}, + }, + }, + }, + }, + { + "UintDecodeValue", + ValueDecoderFunc(dvd.UintDecodeValue), + []subtest{ + { + "wrong type", + wrong, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Int32, Return: int32(0)}, + bsonrwtest.ReadInt32, + ValueDecoderError{ + Name: "UintDecodeValue", + Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint}, + Received: reflect.ValueOf(wrong), + }, + }, + { + "type not int32/int64", + 0, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.String}, + bsonrwtest.Nothing, + fmt.Errorf("cannot decode %v into an integer type", bsontype.String), + }, + { + "ReadInt32 error", + uint(0), + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Int32, Return: int32(0), Err: errors.New("ReadInt32 error"), ErrAfter: bsonrwtest.ReadInt32}, + bsonrwtest.ReadInt32, + errors.New("ReadInt32 error"), + }, + { + "ReadInt64 error", + uint(0), + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Int64, Return: int64(0), Err: errors.New("ReadInt64 error"), ErrAfter: bsonrwtest.ReadInt64}, + bsonrwtest.ReadInt64, + errors.New("ReadInt64 error"), + }, + { + "ReadDouble error", + 0, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Double, Return: float64(0), Err: errors.New("ReadDouble error"), ErrAfter: bsonrwtest.ReadDouble}, + bsonrwtest.ReadDouble, + errors.New("ReadDouble error"), + }, + { + "ReadDouble", uint64(3), &DecodeContext{}, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Double, Return: float64(3.00)}, bsonrwtest.ReadDouble, + nil, + }, + { + "ReadDouble (truncate)", uint64(3), &DecodeContext{Truncate: true}, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Double, Return: float64(3.14)}, bsonrwtest.ReadDouble, + nil, + }, + { + "ReadDouble (no truncate)", uint64(0), nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Double, Return: float64(3.14)}, bsonrwtest.ReadDouble, + errors.New("UintDecodeValue can only truncate float64 to an integer type when truncation is enabled"), + }, + { + "ReadDouble overflows int64", uint64(0), nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Double, Return: math.MaxFloat64}, bsonrwtest.ReadDouble, + fmt.Errorf("%g overflows int64", math.MaxFloat64), + }, + {"uint8/fast path", uint8(127), nil, &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Int32, Return: int32(127)}, bsonrwtest.ReadInt32, nil}, + {"uint16/fast path", uint16(255), nil, &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Int32, Return: int32(255)}, bsonrwtest.ReadInt32, nil}, + {"uint32/fast path", uint32(1234), nil, &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Int32, Return: int32(1234)}, bsonrwtest.ReadInt32, nil}, + {"uint64/fast path", uint64(1234), nil, &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Int64, Return: int64(1234)}, bsonrwtest.ReadInt64, nil}, + {"uint/fast path", uint(1234), nil, &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Int64, Return: int64(1234)}, bsonrwtest.ReadInt64, nil}, + { + "uint8/fast path - nil", (*uint8)(nil), nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Int32, Return: int32(0)}, bsonrwtest.ReadInt32, + ValueDecoderError{ + Name: "UintDecodeValue", + Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint}, + Received: reflect.ValueOf((*uint8)(nil)), + }, + }, + { + "uint16/fast path - nil", (*uint16)(nil), nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Int32, Return: int32(0)}, bsonrwtest.ReadInt32, + ValueDecoderError{ + Name: "UintDecodeValue", + Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint}, + Received: reflect.ValueOf((*uint16)(nil)), + }, + }, + { + "uint32/fast path - nil", (*uint32)(nil), nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Int32, Return: int32(0)}, bsonrwtest.ReadInt32, + ValueDecoderError{ + Name: "UintDecodeValue", + Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint}, + Received: reflect.ValueOf((*uint32)(nil)), + }, + }, + { + "uint64/fast path - nil", (*uint64)(nil), nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Int32, Return: int32(0)}, bsonrwtest.ReadInt32, + ValueDecoderError{ + Name: "UintDecodeValue", + Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint}, + Received: reflect.ValueOf((*uint64)(nil)), + }, + }, + { + "uint/fast path - nil", (*uint)(nil), nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Int32, Return: int32(0)}, bsonrwtest.ReadInt32, + ValueDecoderError{ + Name: "UintDecodeValue", + Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint}, + Received: reflect.ValueOf((*uint)(nil)), + }, + }, + { + "uint8/fast path - overflow", uint8(0), nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Int32, Return: int32(1 << 8)}, bsonrwtest.ReadInt32, + fmt.Errorf("%d overflows uint8", 1<<8), + }, + { + "uint16/fast path - overflow", uint16(0), nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Int32, Return: int32(1 << 16)}, bsonrwtest.ReadInt32, + fmt.Errorf("%d overflows uint16", 1<<16), + }, + { + "uint32/fast path - overflow", uint32(0), nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Int64, Return: int64(1 << 32)}, bsonrwtest.ReadInt64, + fmt.Errorf("%d overflows uint32", 1<<32), + }, + { + "uint8/fast path - overflow (negative)", uint8(0), nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Int32, Return: int32(-1)}, bsonrwtest.ReadInt32, + fmt.Errorf("%d overflows uint8", -1), + }, + { + "uint16/fast path - overflow (negative)", uint16(0), nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Int32, Return: int32(-1)}, bsonrwtest.ReadInt32, + fmt.Errorf("%d overflows uint16", -1), + }, + { + "uint32/fast path - overflow (negative)", uint32(0), nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Int64, Return: int64(-1)}, bsonrwtest.ReadInt64, + fmt.Errorf("%d overflows uint32", -1), + }, + { + "uint64/fast path - overflow (negative)", uint64(0), nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Int64, Return: int64(-1)}, bsonrwtest.ReadInt64, + fmt.Errorf("%d overflows uint64", -1), + }, + { + "uint/fast path - overflow (negative)", uint(0), nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Int64, Return: int64(-1)}, bsonrwtest.ReadInt64, + fmt.Errorf("%d overflows uint", -1), + }, + { + "uint8/reflection path", myuint8(127), nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Int32, Return: int32(127)}, bsonrwtest.ReadInt32, + nil, + }, + { + "uint16/reflection path", myuint16(255), nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Int32, Return: int32(255)}, bsonrwtest.ReadInt32, + nil, + }, + { + "uint32/reflection path", myuint32(511), nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Int32, Return: int32(511)}, bsonrwtest.ReadInt32, + nil, + }, + { + "uint64/reflection path", myuint64(1023), nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Int32, Return: int32(1023)}, bsonrwtest.ReadInt32, + nil, + }, + { + "uint/reflection path", myuint(2047), nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Int32, Return: int32(2047)}, bsonrwtest.ReadInt32, + nil, + }, + { + "uint8/reflection path - overflow", myuint8(0), nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Int32, Return: int32(1 << 8)}, bsonrwtest.ReadInt32, + fmt.Errorf("%d overflows uint8", 1<<8), + }, + { + "uint16/reflection path - overflow", myuint16(0), nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Int32, Return: int32(1 << 16)}, bsonrwtest.ReadInt32, + fmt.Errorf("%d overflows uint16", 1<<16), + }, + { + "uint32/reflection path - overflow", myuint32(0), nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Int64, Return: int64(1 << 32)}, bsonrwtest.ReadInt64, + fmt.Errorf("%d overflows uint32", 1<<32), + }, + { + "uint8/reflection path - overflow (negative)", myuint8(0), nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Int32, Return: int32(-1)}, bsonrwtest.ReadInt32, + fmt.Errorf("%d overflows uint8", -1), + }, + { + "uint16/reflection path - overflow (negative)", myuint16(0), nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Int32, Return: int32(-1)}, bsonrwtest.ReadInt32, + fmt.Errorf("%d overflows uint16", -1), + }, + { + "uint32/reflection path - overflow (negative)", myuint32(0), nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Int64, Return: int64(-1)}, bsonrwtest.ReadInt64, + fmt.Errorf("%d overflows uint32", -1), + }, + { + "uint64/reflection path - overflow (negative)", myuint64(0), nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Int64, Return: int64(-1)}, bsonrwtest.ReadInt64, + fmt.Errorf("%d overflows uint64", -1), + }, + { + "uint/reflection path - overflow (negative)", myuint(0), nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Int64, Return: int64(-1)}, bsonrwtest.ReadInt64, + fmt.Errorf("%d overflows uint", -1), + }, + { + "can set false", + cansettest, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Int32, Return: int32(0)}, + bsonrwtest.Nothing, + ValueDecoderError{ + Name: "UintDecodeValue", + Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint}, + }, + }, + }, + }, + { + "FloatDecodeValue", + ValueDecoderFunc(dvd.FloatDecodeValue), + []subtest{ + { + "wrong type", + wrong, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Double, Return: float64(0)}, + bsonrwtest.ReadDouble, + ValueDecoderError{ + Name: "FloatDecodeValue", + Kinds: []reflect.Kind{reflect.Float32, reflect.Float64}, + Received: reflect.ValueOf(wrong), + }, + }, + { + "type not double", + 0, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.String}, + bsonrwtest.Nothing, + fmt.Errorf("cannot decode %v into a float32 or float64 type", bsontype.String), + }, + { + "ReadDouble error", + float64(0), + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Double, Return: float64(0), Err: errors.New("ReadDouble error"), ErrAfter: bsonrwtest.ReadDouble}, + bsonrwtest.ReadDouble, + errors.New("ReadDouble error"), + }, + { + "ReadInt32 error", + float64(0), + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Int32, Return: int32(0), Err: errors.New("ReadInt32 error"), ErrAfter: bsonrwtest.ReadInt32}, + bsonrwtest.ReadInt32, + errors.New("ReadInt32 error"), + }, + { + "ReadInt64 error", + float64(0), + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Int64, Return: int64(0), Err: errors.New("ReadInt64 error"), ErrAfter: bsonrwtest.ReadInt64}, + bsonrwtest.ReadInt64, + errors.New("ReadInt64 error"), + }, + { + "float64/int32", float32(32.0), nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Int32, Return: int32(32)}, bsonrwtest.ReadInt32, + nil, + }, + { + "float64/int64", float32(64.0), nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Int64, Return: int64(64)}, bsonrwtest.ReadInt64, + nil, + }, + { + "float32/fast path (equal)", float32(3.0), nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Double, Return: float64(3.0)}, bsonrwtest.ReadDouble, + nil, + }, + { + "float64/fast path", float64(3.14159), nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Double, Return: float64(3.14159)}, bsonrwtest.ReadDouble, + nil, + }, + { + "float32/fast path (truncate)", float32(3.14), &DecodeContext{Truncate: true}, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Double, Return: float64(3.14)}, bsonrwtest.ReadDouble, + nil, + }, + { + "float32/fast path (no truncate)", float32(0), nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Double, Return: float64(3.14)}, bsonrwtest.ReadDouble, + errors.New("FloatDecodeValue can only convert float64 to float32 when truncation is allowed"), + }, + { + "float32/fast path - nil", (*float32)(nil), nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Double, Return: float64(0)}, bsonrwtest.ReadDouble, + ValueDecoderError{ + Name: "FloatDecodeValue", + Kinds: []reflect.Kind{reflect.Float32, reflect.Float64}, + Received: reflect.ValueOf((*float32)(nil)), + }, + }, + { + "float64/fast path - nil", (*float64)(nil), nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Double, Return: float64(0)}, bsonrwtest.ReadDouble, + ValueDecoderError{ + Name: "FloatDecodeValue", + Kinds: []reflect.Kind{reflect.Float32, reflect.Float64}, + Received: reflect.ValueOf((*float64)(nil)), + }, + }, + { + "float32/reflection path (equal)", myfloat32(3.0), nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Double, Return: float64(3.0)}, bsonrwtest.ReadDouble, + nil, + }, + { + "float64/reflection path", myfloat64(3.14159), nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Double, Return: float64(3.14159)}, bsonrwtest.ReadDouble, + nil, + }, + { + "float32/reflection path (truncate)", myfloat32(3.14), &DecodeContext{Truncate: true}, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Double, Return: float64(3.14)}, bsonrwtest.ReadDouble, + nil, + }, + { + "float32/reflection path (no truncate)", myfloat32(0), nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Double, Return: float64(3.14)}, bsonrwtest.ReadDouble, + errors.New("FloatDecodeValue can only convert float64 to float32 when truncation is allowed"), + }, + { + "can set false", + cansettest, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Double, Return: float64(0)}, + bsonrwtest.Nothing, + ValueDecoderError{ + Name: "FloatDecodeValue", + Kinds: []reflect.Kind{reflect.Float32, reflect.Float64}, + }, + }, + }, + }, + { + "TimeDecodeValue", + ValueDecoderFunc(dvd.TimeDecodeValue), + []subtest{ + { + "wrong type", + wrong, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.DateTime, Return: int64(0)}, + bsonrwtest.ReadDateTime, + ValueDecoderError{Name: "TimeDecodeValue", Types: []reflect.Type{tTime}, Received: reflect.ValueOf(wrong)}, + }, + { + "ReadDateTime error", + time.Time{}, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.DateTime, Return: int64(0), Err: errors.New("ReadDateTime error"), ErrAfter: bsonrwtest.ReadDateTime}, + bsonrwtest.ReadDateTime, + errors.New("ReadDateTime error"), + }, + { + "time.Time", + now, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.DateTime, Return: int64(now.UnixNano() / int64(time.Millisecond))}, + bsonrwtest.ReadDateTime, + nil, + }, + { + "can set false", + cansettest, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.DateTime, Return: int64(0)}, + bsonrwtest.Nothing, + ValueDecoderError{Name: "TimeDecodeValue", Types: []reflect.Type{tTime}}, + }, + }, + }, + { + "MapDecodeValue", + ValueDecoderFunc(dvd.MapDecodeValue), + []subtest{ + { + "wrong kind", + wrong, + nil, + &bsonrwtest.ValueReaderWriter{}, + bsonrwtest.Nothing, + ValueDecoderError{Name: "MapDecodeValue", Kinds: []reflect.Kind{reflect.Map}, Received: reflect.ValueOf(wrong)}, + }, + { + "wrong kind (non-string key)", + map[int]interface{}{}, + nil, + &bsonrwtest.ValueReaderWriter{}, + bsonrwtest.Nothing, + ValueDecoderError{ + Name: "MapDecodeValue", + Kinds: []reflect.Kind{reflect.Map}, + Received: reflect.ValueOf(map[int]interface{}{}), + }, + }, + { + "ReadDocument Error", + make(map[string]interface{}), + nil, + &bsonrwtest.ValueReaderWriter{Err: errors.New("rd error"), ErrAfter: bsonrwtest.ReadDocument}, + bsonrwtest.ReadDocument, + errors.New("rd error"), + }, + { + "Lookup Error", + map[string]string{}, + &DecodeContext{Registry: NewRegistryBuilder().Build()}, + &bsonrwtest.ValueReaderWriter{}, + bsonrwtest.ReadDocument, + ErrNoDecoder{Type: reflect.TypeOf(string(""))}, + }, + { + "ReadElement Error", + make(map[string]interface{}), + &DecodeContext{Registry: buildDefaultRegistry()}, + &bsonrwtest.ValueReaderWriter{Err: errors.New("re error"), ErrAfter: bsonrwtest.ReadElement}, + bsonrwtest.ReadElement, + errors.New("re error"), + }, + { + "can set false", + cansettest, + nil, + &bsonrwtest.ValueReaderWriter{}, + bsonrwtest.Nothing, + ValueDecoderError{Name: "MapDecodeValue", Kinds: []reflect.Kind{reflect.Map}}, + }, + { + "wrong BSON type", + map[string]interface{}{}, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.String}, + bsonrwtest.Nothing, + errors.New("cannot decode string into a map[string]interface {}"), + }, + }, + }, + { + "ArrayDecodeValue", + ValueDecoderFunc(dvd.ArrayDecodeValue), + []subtest{ + { + "wrong kind", + wrong, + nil, + &bsonrwtest.ValueReaderWriter{}, + bsonrwtest.Nothing, + ValueDecoderError{Name: "ArrayDecodeValue", Kinds: []reflect.Kind{reflect.Array}, Received: reflect.ValueOf(wrong)}, + }, + { + "can set false", + cansettest, + nil, + &bsonrwtest.ValueReaderWriter{}, + bsonrwtest.Nothing, + ValueDecoderError{Name: "ArrayDecodeValue", Kinds: []reflect.Kind{reflect.Array}}, + }, + { + "Not Type Array", + [1]interface{}{}, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.String}, + bsonrwtest.Nothing, + errors.New("cannot decode string into an array"), + }, + { + "ReadArray Error", + [1]interface{}{}, + nil, + &bsonrwtest.ValueReaderWriter{Err: errors.New("ra error"), ErrAfter: bsonrwtest.ReadArray, BSONType: bsontype.Array}, + bsonrwtest.ReadArray, + errors.New("ra error"), + }, + { + "Lookup Error", + [1]string{}, + &DecodeContext{Registry: NewRegistryBuilder().Build()}, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Array}, + bsonrwtest.ReadArray, + ErrNoDecoder{Type: reflect.TypeOf(string(""))}, + }, + { + "ReadValue Error", + [1]string{}, + &DecodeContext{Registry: buildDefaultRegistry()}, + &bsonrwtest.ValueReaderWriter{Err: errors.New("rv error"), ErrAfter: bsonrwtest.ReadValue, BSONType: bsontype.Array}, + bsonrwtest.ReadValue, + errors.New("rv error"), + }, + { + "DecodeValue Error", + [1]string{}, + &DecodeContext{Registry: buildDefaultRegistry()}, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Array}, + bsonrwtest.ReadValue, + errors.New("cannot decode array into a string type"), + }, + { + "Document but not D", + [1]string{}, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Type(0)}, + bsonrwtest.Nothing, + errors.New("cannot decode document into [1]string"), + }, + { + "EmbeddedDocument but not D", + [1]string{}, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.EmbeddedDocument}, + bsonrwtest.Nothing, + errors.New("cannot decode document into [1]string"), + }, + }, + }, + { + "SliceDecodeValue", + ValueDecoderFunc(dvd.SliceDecodeValue), + []subtest{ + { + "wrong kind", + wrong, + nil, + &bsonrwtest.ValueReaderWriter{}, + bsonrwtest.Nothing, + ValueDecoderError{Name: "SliceDecodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: reflect.ValueOf(wrong)}, + }, + { + "can set false", + cansettest, + nil, + &bsonrwtest.ValueReaderWriter{}, + bsonrwtest.Nothing, + ValueDecoderError{Name: "SliceDecodeValue", Kinds: []reflect.Kind{reflect.Slice}}, + }, + { + "Not Type Array", + []interface{}{}, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.String}, + bsonrwtest.Nothing, + errors.New("cannot decode string into a slice"), + }, + { + "ReadArray Error", + []interface{}{}, + nil, + &bsonrwtest.ValueReaderWriter{Err: errors.New("ra error"), ErrAfter: bsonrwtest.ReadArray, BSONType: bsontype.Array}, + bsonrwtest.ReadArray, + errors.New("ra error"), + }, + { + "Lookup Error", + []string{}, + &DecodeContext{Registry: NewRegistryBuilder().Build()}, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Array}, + bsonrwtest.ReadArray, + ErrNoDecoder{Type: reflect.TypeOf(string(""))}, + }, + { + "ReadValue Error", + []string{}, + &DecodeContext{Registry: buildDefaultRegistry()}, + &bsonrwtest.ValueReaderWriter{Err: errors.New("rv error"), ErrAfter: bsonrwtest.ReadValue, BSONType: bsontype.Array}, + bsonrwtest.ReadValue, + errors.New("rv error"), + }, + { + "DecodeValue Error", + []string{}, + &DecodeContext{Registry: buildDefaultRegistry()}, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Array}, + bsonrwtest.ReadValue, + errors.New("cannot decode array into a string type"), + }, + { + "Document but not D", + []string{}, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Type(0)}, + bsonrwtest.Nothing, + errors.New("cannot decode document into []string"), + }, + { + "EmbeddedDocument but not D", + []string{}, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.EmbeddedDocument}, + bsonrwtest.Nothing, + errors.New("cannot decode document into []string"), + }, + }, + }, + { + "ObjectIDDecodeValue", + ValueDecoderFunc(dvd.ObjectIDDecodeValue), + []subtest{ + { + "wrong type", + wrong, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.ObjectID}, + bsonrwtest.Nothing, + ValueDecoderError{Name: "ObjectIDDecodeValue", Types: []reflect.Type{tOID}, Received: reflect.ValueOf(wrong)}, + }, + { + "type not objectID", + primitive.ObjectID{}, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.String}, + bsonrwtest.Nothing, + fmt.Errorf("cannot decode %v into an ObjectID", bsontype.String), + }, + { + "ReadObjectID Error", + primitive.ObjectID{}, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.ObjectID, Err: errors.New("roid error"), ErrAfter: bsonrwtest.ReadObjectID}, + bsonrwtest.ReadObjectID, + errors.New("roid error"), + }, + { + "can set false", + cansettest, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.ObjectID, Return: primitive.ObjectID{}}, + bsonrwtest.Nothing, + ValueDecoderError{Name: "ObjectIDDecodeValue", Types: []reflect.Type{tOID}}, + }, + { + "success", + primitive.ObjectID{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C}, + nil, + &bsonrwtest.ValueReaderWriter{ + BSONType: bsontype.ObjectID, + Return: primitive.ObjectID{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C}, + }, + bsonrwtest.ReadObjectID, + nil, + }, + }, + }, + { + "Decimal128DecodeValue", + ValueDecoderFunc(dvd.Decimal128DecodeValue), + []subtest{ + { + "wrong type", + wrong, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Decimal128}, + bsonrwtest.Nothing, + ValueDecoderError{Name: "Decimal128DecodeValue", Types: []reflect.Type{tDecimal}, Received: reflect.ValueOf(wrong)}, + }, + { + "type not decimal128", + primitive.Decimal128{}, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.String}, + bsonrwtest.Nothing, + fmt.Errorf("cannot decode %v into a primitive.Decimal128", bsontype.String), + }, + { + "ReadDecimal128 Error", + primitive.Decimal128{}, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Decimal128, Err: errors.New("rd128 error"), ErrAfter: bsonrwtest.ReadDecimal128}, + bsonrwtest.ReadDecimal128, + errors.New("rd128 error"), + }, + { + "can set false", + cansettest, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Decimal128, Return: d128}, + bsonrwtest.Nothing, + ValueDecoderError{Name: "Decimal128DecodeValue", Types: []reflect.Type{tDecimal}}, + }, + { + "success", + d128, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Decimal128, Return: d128}, + bsonrwtest.ReadDecimal128, + nil, + }, + }, + }, + { + "JSONNumberDecodeValue", + ValueDecoderFunc(dvd.JSONNumberDecodeValue), + []subtest{ + { + "wrong type", + wrong, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.ObjectID}, + bsonrwtest.Nothing, + ValueDecoderError{Name: "JSONNumberDecodeValue", Types: []reflect.Type{tJSONNumber}, Received: reflect.ValueOf(wrong)}, + }, + { + "type not double/int32/int64", + json.Number(""), + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.String}, + bsonrwtest.Nothing, + fmt.Errorf("cannot decode %v into a json.Number", bsontype.String), + }, + { + "ReadDouble Error", + json.Number(""), + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Double, Err: errors.New("rd error"), ErrAfter: bsonrwtest.ReadDouble}, + bsonrwtest.ReadDouble, + errors.New("rd error"), + }, + { + "ReadInt32 Error", + json.Number(""), + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Int32, Err: errors.New("ri32 error"), ErrAfter: bsonrwtest.ReadInt32}, + bsonrwtest.ReadInt32, + errors.New("ri32 error"), + }, + { + "ReadInt64 Error", + json.Number(""), + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Int64, Err: errors.New("ri64 error"), ErrAfter: bsonrwtest.ReadInt64}, + bsonrwtest.ReadInt64, + errors.New("ri64 error"), + }, + { + "can set false", + cansettest, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.ObjectID, Return: primitive.ObjectID{}}, + bsonrwtest.Nothing, + ValueDecoderError{Name: "JSONNumberDecodeValue", Types: []reflect.Type{tJSONNumber}}, + }, + { + "success/double", + json.Number("3.14159"), + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Double, Return: float64(3.14159)}, + bsonrwtest.ReadDouble, + nil, + }, + { + "success/int32", + json.Number("12345"), + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Int32, Return: int32(12345)}, + bsonrwtest.ReadInt32, + nil, + }, + { + "success/int64", + json.Number("1234567890"), + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Int64, Return: int64(1234567890)}, + bsonrwtest.ReadInt64, + nil, + }, + }, + }, + { + "URLDecodeValue", + ValueDecoderFunc(dvd.URLDecodeValue), + []subtest{ + { + "wrong type", + wrong, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Int32}, + bsonrwtest.Nothing, + fmt.Errorf("cannot decode %v into a *url.URL", bsontype.Int32), + }, + { + "type not *url.URL", + int64(0), + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.String, Return: string("http://example.com")}, + bsonrwtest.ReadString, + ValueDecoderError{Name: "URLDecodeValue", Types: []reflect.Type{tURL}, Received: reflect.ValueOf(int64(0))}, + }, + { + "ReadString error", + url.URL{}, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.String, Err: errors.New("rs error"), ErrAfter: bsonrwtest.ReadString}, + bsonrwtest.ReadString, + errors.New("rs error"), + }, + { + "url.Parse error", + url.URL{}, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.String, Return: string("not-valid-%%%%://")}, + bsonrwtest.ReadString, + errors.New("parse not-valid-%%%%://: first path segment in URL cannot contain colon"), + }, + { + "can set false", + cansettest, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.String, Return: string("http://example.com")}, + bsonrwtest.Nothing, + ValueDecoderError{Name: "URLDecodeValue", Types: []reflect.Type{tURL}}, + }, + { + "url.URL", + url.URL{Scheme: "http", Host: "example.com"}, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.String, Return: string("http://example.com")}, + bsonrwtest.ReadString, + nil, + }, + }, + }, + { + "ByteSliceDecodeValue", + ValueDecoderFunc(dvd.ByteSliceDecodeValue), + []subtest{ + { + "wrong type", + wrong, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Int32}, + bsonrwtest.Nothing, + fmt.Errorf("cannot decode %v into a []byte", bsontype.Int32), + }, + { + "type not []byte", + int64(0), + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Binary, Return: bsoncore.Value{Type: bsontype.Binary}}, + bsonrwtest.Nothing, + ValueDecoderError{Name: "ByteSliceDecodeValue", Types: []reflect.Type{tByteSlice}, Received: reflect.ValueOf(int64(0))}, + }, + { + "ReadBinary error", + []byte{}, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Binary, Err: errors.New("rb error"), ErrAfter: bsonrwtest.ReadBinary}, + bsonrwtest.ReadBinary, + errors.New("rb error"), + }, + { + "incorrect subtype", + []byte{}, + nil, + &bsonrwtest.ValueReaderWriter{ + BSONType: bsontype.Binary, + Return: bsoncore.Value{ + Type: bsontype.Binary, + Data: bsoncore.AppendBinary(nil, 0xFF, []byte{0x01, 0x02, 0x03}), + }, + }, + bsonrwtest.ReadBinary, + fmt.Errorf("ByteSliceDecodeValue can only be used to decode subtype 0x00 for %s, got %v", bsontype.Binary, byte(0xFF)), + }, + { + "can set false", + cansettest, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Binary, Return: bsoncore.AppendBinary(nil, 0x00, []byte{0x01, 0x02, 0x03})}, + bsonrwtest.Nothing, + ValueDecoderError{Name: "ByteSliceDecodeValue", Types: []reflect.Type{tByteSlice}}, + }, + }, + }, + { + "ValueUnmarshalerDecodeValue", + ValueDecoderFunc(dvd.ValueUnmarshalerDecodeValue), + []subtest{ + { + "wrong type", + wrong, + nil, + nil, + bsonrwtest.Nothing, + ValueDecoderError{ + Name: "ValueUnmarshalerDecodeValue", + Types: []reflect.Type{tValueUnmarshaler}, + Received: reflect.ValueOf(wrong), + }, + }, + { + "copy error", + &testValueUnmarshaler{}, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.String, Err: errors.New("copy error"), ErrAfter: bsonrwtest.ReadString}, + bsonrwtest.ReadString, + errors.New("copy error"), + }, + { + "ValueUnmarshaler", + &testValueUnmarshaler{t: bsontype.String, val: bsoncore.AppendString(nil, "hello, world")}, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.String, Return: string("hello, world")}, + bsonrwtest.ReadString, + nil, + }, + }, + }, + { + "UnmarshalerDecodeValue", + ValueDecoderFunc(dvd.UnmarshalerDecodeValue), + []subtest{ + { + "wrong type", + wrong, + nil, + nil, + bsonrwtest.Nothing, + ValueDecoderError{Name: "UnmarshalerDecodeValue", Types: []reflect.Type{tUnmarshaler}, Received: reflect.ValueOf(wrong)}, + }, + { + "copy error", + &testUnmarshaler{}, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.String, Err: errors.New("copy error"), ErrAfter: bsonrwtest.ReadString}, + bsonrwtest.ReadString, + errors.New("copy error"), + }, + { + "Unmarshaler", + testUnmarshaler{Val: bsoncore.AppendDouble(nil, 3.14159)}, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Double, Return: float64(3.14159)}, + bsonrwtest.ReadDouble, + nil, + }, + }, + }, + { + "PointerCodec.DecodeValue", + NewPointerCodec(), + []subtest{ + { + "not valid", nil, nil, nil, bsonrwtest.Nothing, + ValueDecoderError{Name: "PointerCodec.DecodeValue", Kinds: []reflect.Kind{reflect.Ptr}, Received: reflect.Value{}}, + }, + { + "can set", cansettest, nil, nil, bsonrwtest.Nothing, + ValueDecoderError{Name: "PointerCodec.DecodeValue", Kinds: []reflect.Kind{reflect.Ptr}}, + }, + { + "No Decoder", &wrong, &DecodeContext{Registry: buildDefaultRegistry()}, nil, bsonrwtest.Nothing, + ErrNoDecoder{Type: reflect.TypeOf(wrong)}, + }, + }, + }, + { + "BinaryDecodeValue", + ValueDecoderFunc(dvd.BinaryDecodeValue), + []subtest{ + { + "wrong type", + wrong, + nil, + &bsonrwtest.ValueReaderWriter{}, + bsonrwtest.Nothing, + ValueDecoderError{Name: "BinaryDecodeValue", Types: []reflect.Type{tBinary}, Received: reflect.ValueOf(wrong)}, + }, + { + "type not binary", + primitive.Binary{}, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.String}, + bsonrwtest.Nothing, + fmt.Errorf("cannot decode %v into a Binary", bsontype.String), + }, + { + "ReadBinary Error", + primitive.Binary{}, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Binary, Err: errors.New("rb error"), ErrAfter: bsonrwtest.ReadBinary}, + bsonrwtest.ReadBinary, + errors.New("rb error"), + }, + { + "Binary/success", + primitive.Binary{Data: []byte{0x01, 0x02, 0x03}, Subtype: 0xFF}, + nil, + &bsonrwtest.ValueReaderWriter{ + BSONType: bsontype.Binary, + Return: bsoncore.Value{ + Type: bsontype.Binary, + Data: bsoncore.AppendBinary(nil, 0xFF, []byte{0x01, 0x02, 0x03}), + }, + }, + bsonrwtest.ReadBinary, + nil, + }, + }, + }, + { + "UndefinedDecodeValue", + ValueDecoderFunc(dvd.UndefinedDecodeValue), + []subtest{ + { + "wrong type", + wrong, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Undefined}, + bsonrwtest.Nothing, + ValueDecoderError{Name: "UndefinedDecodeValue", Types: []reflect.Type{tUndefined}, Received: reflect.ValueOf(wrong)}, + }, + { + "type not undefined", + primitive.Undefined{}, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.String}, + bsonrwtest.Nothing, + fmt.Errorf("cannot decode %v into an Undefined", bsontype.String), + }, + { + "ReadUndefined Error", + primitive.Undefined{}, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Undefined, Err: errors.New("ru error"), ErrAfter: bsonrwtest.ReadUndefined}, + bsonrwtest.ReadUndefined, + errors.New("ru error"), + }, + { + "ReadUndefined/success", + primitive.Undefined{}, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Undefined}, + bsonrwtest.ReadUndefined, + nil, + }, + }, + }, + { + "DateTimeDecodeValue", + ValueDecoderFunc(dvd.DateTimeDecodeValue), + []subtest{ + { + "wrong type", + wrong, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.DateTime}, + bsonrwtest.Nothing, + ValueDecoderError{Name: "DateTimeDecodeValue", Types: []reflect.Type{tDateTime}, Received: reflect.ValueOf(wrong)}, + }, + { + "type not datetime", + primitive.DateTime(0), + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.String}, + bsonrwtest.Nothing, + fmt.Errorf("cannot decode %v into a DateTime", bsontype.String), + }, + { + "ReadDateTime Error", + primitive.DateTime(0), + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.DateTime, Err: errors.New("rdt error"), ErrAfter: bsonrwtest.ReadDateTime}, + bsonrwtest.ReadDateTime, + errors.New("rdt error"), + }, + { + "success", + primitive.DateTime(1234567890), + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.DateTime, Return: int64(1234567890)}, + bsonrwtest.ReadDateTime, + nil, + }, + }, + }, + { + "NullDecodeValue", + ValueDecoderFunc(dvd.NullDecodeValue), + []subtest{ + { + "wrong type", + wrong, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Null}, + bsonrwtest.Nothing, + ValueDecoderError{Name: "NullDecodeValue", Types: []reflect.Type{tNull}, Received: reflect.ValueOf(wrong)}, + }, + { + "type not null", + primitive.Null{}, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.String}, + bsonrwtest.Nothing, + fmt.Errorf("cannot decode %v into a Null", bsontype.String), + }, + { + "ReadNull Error", + primitive.Null{}, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Null, Err: errors.New("rn error"), ErrAfter: bsonrwtest.ReadNull}, + bsonrwtest.ReadNull, + errors.New("rn error"), + }, + { + "success", + primitive.Null{}, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Null}, + bsonrwtest.ReadNull, + nil, + }, + }, + }, + { + "RegexDecodeValue", + ValueDecoderFunc(dvd.RegexDecodeValue), + []subtest{ + { + "wrong type", + wrong, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Regex}, + bsonrwtest.Nothing, + ValueDecoderError{Name: "RegexDecodeValue", Types: []reflect.Type{tRegex}, Received: reflect.ValueOf(wrong)}, + }, + { + "type not regex", + primitive.Regex{}, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.String}, + bsonrwtest.Nothing, + fmt.Errorf("cannot decode %v into a Regex", bsontype.String), + }, + { + "ReadRegex Error", + primitive.Regex{}, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Regex, Err: errors.New("rr error"), ErrAfter: bsonrwtest.ReadRegex}, + bsonrwtest.ReadRegex, + errors.New("rr error"), + }, + { + "success", + primitive.Regex{Pattern: "foo", Options: "bar"}, + nil, + &bsonrwtest.ValueReaderWriter{ + BSONType: bsontype.Regex, + Return: bsoncore.Value{ + Type: bsontype.Regex, + Data: bsoncore.AppendRegex(nil, "foo", "bar"), + }, + }, + bsonrwtest.ReadRegex, + nil, + }, + }, + }, + { + "DBPointerDecodeValue", + ValueDecoderFunc(dvd.DBPointerDecodeValue), + []subtest{ + { + "wrong type", + wrong, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.DBPointer}, + bsonrwtest.Nothing, + ValueDecoderError{Name: "DBPointerDecodeValue", Types: []reflect.Type{tDBPointer}, Received: reflect.ValueOf(wrong)}, + }, + { + "type not dbpointer", + primitive.DBPointer{}, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.String}, + bsonrwtest.Nothing, + fmt.Errorf("cannot decode %v into a DBPointer", bsontype.String), + }, + { + "ReadDBPointer Error", + primitive.DBPointer{}, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.DBPointer, Err: errors.New("rdbp error"), ErrAfter: bsonrwtest.ReadDBPointer}, + bsonrwtest.ReadDBPointer, + errors.New("rdbp error"), + }, + { + "success", + primitive.DBPointer{ + DB: "foobar", + Pointer: primitive.ObjectID{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C}, + }, + nil, + &bsonrwtest.ValueReaderWriter{ + BSONType: bsontype.DBPointer, + Return: bsoncore.Value{ + Type: bsontype.DBPointer, + Data: bsoncore.AppendDBPointer( + nil, "foobar", primitive.ObjectID{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C}, + ), + }, + }, + bsonrwtest.ReadDBPointer, + nil, + }, + }, + }, + { + "TimestampDecodeValue", + ValueDecoderFunc(dvd.TimestampDecodeValue), + []subtest{ + { + "wrong type", + wrong, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Timestamp}, + bsonrwtest.Nothing, + ValueDecoderError{Name: "TimestampDecodeValue", Types: []reflect.Type{tTimestamp}, Received: reflect.ValueOf(wrong)}, + }, + { + "type not timestamp", + primitive.Timestamp{}, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.String}, + bsonrwtest.Nothing, + fmt.Errorf("cannot decode %v into a Timestamp", bsontype.String), + }, + { + "ReadTimestamp Error", + primitive.Timestamp{}, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Timestamp, Err: errors.New("rt error"), ErrAfter: bsonrwtest.ReadTimestamp}, + bsonrwtest.ReadTimestamp, + errors.New("rt error"), + }, + { + "success", + primitive.Timestamp{T: 12345, I: 67890}, + nil, + &bsonrwtest.ValueReaderWriter{ + BSONType: bsontype.Timestamp, + Return: bsoncore.Value{ + Type: bsontype.Timestamp, + Data: bsoncore.AppendTimestamp(nil, 12345, 67890), + }, + }, + bsonrwtest.ReadTimestamp, + nil, + }, + }, + }, + { + "MinKeyDecodeValue", + ValueDecoderFunc(dvd.MinKeyDecodeValue), + []subtest{ + { + "wrong type", + wrong, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.MinKey}, + bsonrwtest.Nothing, + ValueDecoderError{Name: "MinKeyDecodeValue", Types: []reflect.Type{tMinKey}, Received: reflect.ValueOf(wrong)}, + }, + { + "type not null", + primitive.MinKey{}, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.String}, + bsonrwtest.Nothing, + fmt.Errorf("cannot decode %v into a MinKey", bsontype.String), + }, + { + "ReadMinKey Error", + primitive.MinKey{}, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.MinKey, Err: errors.New("rn error"), ErrAfter: bsonrwtest.ReadMinKey}, + bsonrwtest.ReadMinKey, + errors.New("rn error"), + }, + { + "success", + primitive.MinKey{}, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.MinKey}, + bsonrwtest.ReadMinKey, + nil, + }, + }, + }, + { + "MaxKeyDecodeValue", + ValueDecoderFunc(dvd.MaxKeyDecodeValue), + []subtest{ + { + "wrong type", + wrong, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.MaxKey}, + bsonrwtest.Nothing, + ValueDecoderError{Name: "MaxKeyDecodeValue", Types: []reflect.Type{tMaxKey}, Received: reflect.ValueOf(wrong)}, + }, + { + "type not null", + primitive.MaxKey{}, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.String}, + bsonrwtest.Nothing, + fmt.Errorf("cannot decode %v into a MaxKey", bsontype.String), + }, + { + "ReadMaxKey Error", + primitive.MaxKey{}, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.MaxKey, Err: errors.New("rn error"), ErrAfter: bsonrwtest.ReadMaxKey}, + bsonrwtest.ReadMaxKey, + errors.New("rn error"), + }, + { + "success", + primitive.MaxKey{}, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.MaxKey}, + bsonrwtest.ReadMaxKey, + nil, + }, + }, + }, + { + "JavaScriptDecodeValue", + ValueDecoderFunc(dvd.JavaScriptDecodeValue), + []subtest{ + { + "wrong type", + wrong, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.JavaScript, Return: ""}, + bsonrwtest.Nothing, + ValueDecoderError{Name: "BinaryDecodeValue", Types: []reflect.Type{tJavaScript}, Received: reflect.ValueOf(wrong)}, + }, + { + "type not Javascript", + primitive.JavaScript(""), + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.String}, + bsonrwtest.Nothing, + fmt.Errorf("cannot decode %v into a primitive.JavaScript", bsontype.String), + }, + { + "ReadJavascript Error", + primitive.JavaScript(""), + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.JavaScript, Err: errors.New("rjs error"), ErrAfter: bsonrwtest.ReadJavascript}, + bsonrwtest.ReadJavascript, + errors.New("rjs error"), + }, + { + "JavaScript/success", + primitive.JavaScript("var hello = 'world';"), + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.JavaScript, Return: "var hello = 'world';"}, + bsonrwtest.ReadJavascript, + nil, + }, + }, + }, + { + "SymbolDecodeValue", + ValueDecoderFunc(dvd.SymbolDecodeValue), + []subtest{ + { + "wrong type", + wrong, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Symbol, Return: ""}, + bsonrwtest.Nothing, + ValueDecoderError{Name: "BinaryDecodeValue", Types: []reflect.Type{tSymbol}, Received: reflect.ValueOf(wrong)}, + }, + { + "type not Symbol", + primitive.Symbol(""), + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.String}, + bsonrwtest.Nothing, + fmt.Errorf("cannot decode %v into a primitive.Symbol", bsontype.String), + }, + { + "ReadSymbol Error", + primitive.Symbol(""), + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Symbol, Err: errors.New("rjs error"), ErrAfter: bsonrwtest.ReadSymbol}, + bsonrwtest.ReadSymbol, + errors.New("rjs error"), + }, + { + "Symbol/success", + primitive.Symbol("var hello = 'world';"), + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Symbol, Return: "var hello = 'world';"}, + bsonrwtest.ReadSymbol, + nil, + }, + }, + }, + { + "CoreDocumentDecodeValue", + ValueDecoderFunc(dvd.CoreDocumentDecodeValue), + []subtest{ + { + "wrong type", + wrong, + nil, + &bsonrwtest.ValueReaderWriter{}, + bsonrwtest.Nothing, + ValueDecoderError{ + Name: "CoreDocumentDecodeValue", + Types: []reflect.Type{tCoreDocument}, + Received: reflect.ValueOf(wrong), + }, + }, + { + "*bsoncore.Document is nil", + (*bsoncore.Document)(nil), + nil, + nil, + bsonrwtest.Nothing, + ValueDecoderError{ + Name: "CoreDocumentDecodeValue", + Types: []reflect.Type{tCoreDocument}, + Received: reflect.ValueOf((*bsoncore.Document)(nil)), + }, + }, + { + "Copy error", + bsoncore.Document{}, + nil, + &bsonrwtest.ValueReaderWriter{Err: errors.New("copy error"), ErrAfter: bsonrwtest.ReadDocument}, + bsonrwtest.ReadDocument, + errors.New("copy error"), + }, + }, + }, + { + "StructCodec.DecodeValue", + defaultStructCodec, + []subtest{ + { + "Not struct", + reflect.New(reflect.TypeOf(struct{ Foo string }{})).Elem().Interface(), + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.String}, + bsonrwtest.Nothing, + errors.New("cannot decode string into a struct { Foo string }"), + }, + }, + }, + { + "CodeWithScopeDecodeValue", + ValueDecoderFunc(dvd.CodeWithScopeDecodeValue), + []subtest{ + { + "wrong type", + wrong, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.CodeWithScope}, + bsonrwtest.Nothing, + ValueDecoderError{ + Name: "CodeWithScopeDecodeValue", + Types: []reflect.Type{tCodeWithScope}, + Received: reflect.ValueOf(wrong), + }, + }, + { + "type not codewithscope", + primitive.CodeWithScope{}, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.String}, + bsonrwtest.Nothing, + fmt.Errorf("cannot decode %v into a primitive.CodeWithScope", bsontype.String), + }, + { + "ReadCodeWithScope Error", + primitive.CodeWithScope{}, + nil, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.CodeWithScope, Err: errors.New("rcws error"), ErrAfter: bsonrwtest.ReadCodeWithScope}, + bsonrwtest.ReadCodeWithScope, + errors.New("rcws error"), + }, + { + "decodeDocument Error", + primitive.CodeWithScope{ + Code: "var hello = 'world';", + Scope: primitive.D{{"foo", nil}}, + }, + &DecodeContext{Registry: buildDefaultRegistry()}, + &bsonrwtest.ValueReaderWriter{BSONType: bsontype.CodeWithScope, Err: errors.New("dd error"), ErrAfter: bsonrwtest.ReadElement}, + bsonrwtest.ReadElement, + errors.New("dd error"), + }, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + for _, rc := range tc.subtests { + t.Run(rc.name, func(t *testing.T) { + var dc DecodeContext + if rc.dctx != nil { + dc = *rc.dctx + } + llvrw := new(bsonrwtest.ValueReaderWriter) + if rc.llvrw != nil { + llvrw = rc.llvrw + } + llvrw.T = t + // var got interface{} + if rc.val == cansetreflectiontest { // We're doing a CanSet reflection test + err := tc.vd.DecodeValue(dc, llvrw, reflect.Value{}) + if !compareErrors(err, rc.err) { + t.Errorf("Errors do not match. got %v; want %v", err, rc.err) + } + + val := reflect.New(reflect.TypeOf(rc.val)).Elem() + err = tc.vd.DecodeValue(dc, llvrw, val) + if !compareErrors(err, rc.err) { + t.Errorf("Errors do not match. got %v; want %v", err, rc.err) + } + return + } + if rc.val == cansettest { // We're doing an IsValid and CanSet test + wanterr, ok := rc.err.(ValueDecoderError) + if !ok { + t.Fatalf("Error must be a DecodeValueError, but got a %T", rc.err) + } + + err := tc.vd.DecodeValue(dc, llvrw, reflect.Value{}) + wanterr.Received = reflect.ValueOf(nil) + if !compareErrors(err, wanterr) { + t.Errorf("Errors do not match. got %v; want %v", err, wanterr) + } + + err = tc.vd.DecodeValue(dc, llvrw, reflect.ValueOf(int(12345))) + wanterr.Received = reflect.ValueOf(int(12345)) + if !compareErrors(err, wanterr) { + t.Errorf("Errors do not match. got %v; want %v", err, wanterr) + } + return + } + var val reflect.Value + if rtype := reflect.TypeOf(rc.val); rtype != nil { + val = reflect.New(rtype).Elem() + } + want := rc.val + defer func() { + if err := recover(); err != nil { + fmt.Println(t.Name()) + panic(err) + } + }() + err := tc.vd.DecodeValue(dc, llvrw, val) + if !compareErrors(err, rc.err) { + t.Errorf("Errors do not match. got %v; want %v", err, rc.err) + } + invoked := llvrw.Invoked + if !cmp.Equal(invoked, rc.invoke) { + t.Errorf("Incorrect method invoked. got %v; want %v", invoked, rc.invoke) + } + var got interface{} + if val.IsValid() && val.CanInterface() { + got = val.Interface() + } + if rc.err == nil && !cmp.Equal(got, want, cmp.Comparer(compareDecimal128)) { + t.Errorf("Values do not match. got (%T)%v; want (%T)%v", got, got, want, want) + } + }) + } + }) + } + + t.Run("CodeWithScopeCodec/DecodeValue/success", func(t *testing.T) { + dc := DecodeContext{Registry: buildDefaultRegistry()} + b := bsoncore.BuildDocument(nil, + bsoncore.AppendCodeWithScopeElement( + nil, "foo", "var hello = 'world';", + buildDocument(bsoncore.AppendNullElement(nil, "bar")), + ), + ) + dvr := bsonrw.NewBSONDocumentReader(b) + dr, err := dvr.ReadDocument() + noerr(t, err) + _, vr, err := dr.ReadElement() + noerr(t, err) + + want := primitive.CodeWithScope{ + Code: "var hello = 'world';", + Scope: primitive.D{{"bar", nil}}, + } + val := reflect.New(tCodeWithScope).Elem() + err = dvd.CodeWithScopeDecodeValue(dc, vr, val) + noerr(t, err) + + got := val.Interface().(primitive.CodeWithScope) + if got.Code != want.Code && !cmp.Equal(got.Scope, want.Scope) { + t.Errorf("CodeWithScopes do not match. got %v; want %v", got, want) + } + }) + t.Run("ValueUnmarshalerDecodeValue/UnmarshalBSONValue error", func(t *testing.T) { + var dc DecodeContext + llvrw := &bsonrwtest.ValueReaderWriter{BSONType: bsontype.String, Return: string("hello, world!")} + llvrw.T = t + + want := errors.New("ubsonv error") + valUnmarshaler := &testValueUnmarshaler{err: want} + got := dvd.ValueUnmarshalerDecodeValue(dc, llvrw, reflect.ValueOf(valUnmarshaler)) + if !compareErrors(got, want) { + t.Errorf("Errors do not match. got %v; want %v", got, want) + } + }) + t.Run("ValueUnmarshalerDecodeValue/Unaddressable value", func(t *testing.T) { + var dc DecodeContext + llvrw := &bsonrwtest.ValueReaderWriter{BSONType: bsontype.String, Return: string("hello, world!")} + llvrw.T = t + + val := reflect.ValueOf(testValueUnmarshaler{}) + want := ValueDecoderError{Name: "ValueUnmarshalerDecodeValue", Types: []reflect.Type{tValueUnmarshaler}, Received: val} + got := dvd.ValueUnmarshalerDecodeValue(dc, llvrw, val) + if !compareErrors(got, want) { + t.Errorf("Errors do not match. got %v; want %v", got, want) + } + }) + + t.Run("SliceCodec/DecodeValue/can't set slice", func(t *testing.T) { + var val []string + want := ValueDecoderError{Name: "SliceDecodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: reflect.ValueOf(val)} + got := dvd.SliceDecodeValue(DecodeContext{}, nil, reflect.ValueOf(val)) + if !compareErrors(got, want) { + t.Errorf("Errors do not match. got %v; want %v", got, want) + } + }) + t.Run("SliceCodec/DecodeValue/too many elements", func(t *testing.T) { + idx, doc := bsoncore.AppendDocumentStart(nil) + aidx, doc := bsoncore.AppendArrayElementStart(doc, "foo") + doc = bsoncore.AppendStringElement(doc, "0", "foo") + doc = bsoncore.AppendStringElement(doc, "1", "bar") + doc, err := bsoncore.AppendArrayEnd(doc, aidx) + noerr(t, err) + doc, err = bsoncore.AppendDocumentEnd(doc, idx) + noerr(t, err) + dvr := bsonrw.NewBSONDocumentReader(doc) + noerr(t, err) + dr, err := dvr.ReadDocument() + noerr(t, err) + _, vr, err := dr.ReadElement() + noerr(t, err) + var val [1]string + want := fmt.Errorf("more elements returned in array than can fit inside %T", val) + + dc := DecodeContext{Registry: buildDefaultRegistry()} + got := dvd.ArrayDecodeValue(dc, vr, reflect.ValueOf(val)) + if !compareErrors(got, want) { + t.Errorf("Errors do not match. got %v; want %v", got, want) + } + }) + + t.Run("success path", func(t *testing.T) { + oid := primitive.NewObjectID() + oids := []primitive.ObjectID{primitive.NewObjectID(), primitive.NewObjectID(), primitive.NewObjectID()} + var str = new(string) + *str = "bar" + now := time.Now().Truncate(time.Millisecond).UTC() + murl, err := url.Parse("https://mongodb.com/random-url?hello=world") + if err != nil { + t.Errorf("Error parsing URL: %v", err) + t.FailNow() + } + decimal128, err := primitive.ParseDecimal128("1.5e10") + if err != nil { + t.Errorf("Error parsing decimal128: %v", err) + t.FailNow() + } + + testCases := []struct { + name string + value interface{} + b []byte + err error + }{ + { + "map[string]int", + map[string]int32{"foo": 1}, + []byte{ + 0x0E, 0x00, 0x00, 0x00, + 0x10, 'f', 'o', 'o', 0x00, + 0x01, 0x00, 0x00, 0x00, + 0x00, + }, + nil, + }, + { + "map[string]primitive.ObjectID", + map[string]primitive.ObjectID{"foo": oid}, + func() []byte { + idx, doc := bsoncore.AppendDocumentStart(nil) + doc = bsoncore.AppendObjectIDElement(doc, "foo", oid) + doc, _ = bsoncore.AppendDocumentEnd(doc, idx) + return doc + }(), + nil, + }, + { + "map[string][]int32", + map[string][]int32{"Z": {1, 2, 3}}, + buildDocumentArray(func(doc []byte) []byte { + doc = bsoncore.AppendInt32Element(doc, "0", 1) + doc = bsoncore.AppendInt32Element(doc, "1", 2) + return bsoncore.AppendInt32Element(doc, "2", 3) + }), + nil, + }, + { + "map[string][]primitive.ObjectID", + map[string][]primitive.ObjectID{"Z": oids}, + buildDocumentArray(func(doc []byte) []byte { + doc = bsoncore.AppendObjectIDElement(doc, "0", oids[0]) + doc = bsoncore.AppendObjectIDElement(doc, "1", oids[1]) + return bsoncore.AppendObjectIDElement(doc, "2", oids[2]) + }), + nil, + }, + { + "map[string][]json.Number(int64)", + map[string][]json.Number{"Z": {json.Number("5"), json.Number("10")}}, + buildDocumentArray(func(doc []byte) []byte { + doc = bsoncore.AppendInt64Element(doc, "0", 5) + return bsoncore.AppendInt64Element(doc, "1", 10) + }), + nil, + }, + { + "map[string][]json.Number(float64)", + map[string][]json.Number{"Z": {json.Number("5"), json.Number("10.1")}}, + buildDocumentArray(func(doc []byte) []byte { + doc = bsoncore.AppendInt64Element(doc, "0", 5) + return bsoncore.AppendDoubleElement(doc, "1", 10.1) + }), + nil, + }, + { + "map[string][]*url.URL", + map[string][]*url.URL{"Z": {murl}}, + buildDocumentArray(func(doc []byte) []byte { + return bsoncore.AppendStringElement(doc, "0", murl.String()) + }), + nil, + }, + { + "map[string][]primitive.Decimal128", + map[string][]primitive.Decimal128{"Z": {decimal128}}, + buildDocumentArray(func(doc []byte) []byte { + return bsoncore.AppendDecimal128Element(doc, "0", decimal128) + }), + nil, + }, + { + "map[mystring]interface{}", + map[mystring]interface{}{"pi": 3.14159}, + buildDocument(bsoncore.AppendDoubleElement(nil, "pi", 3.14159)), + nil, + }, + { + "-", + struct { + A string `bson:"-"` + }{ + A: "", + }, + []byte{0x05, 0x00, 0x00, 0x00, 0x00}, + nil, + }, + { + "omitempty", + struct { + A string `bson:",omitempty"` + }{ + A: "", + }, + []byte{0x05, 0x00, 0x00, 0x00, 0x00}, + nil, + }, + { + "omitempty, empty time", + struct { + A time.Time `bson:",omitempty"` + }{ + A: time.Time{}, + }, + []byte{0x05, 0x00, 0x00, 0x00, 0x00}, + nil, + }, + { + "no private fields", + noPrivateFields{a: "should be empty"}, + []byte{0x05, 0x00, 0x00, 0x00, 0x00}, + nil, + }, + { + "minsize", + struct { + A int64 `bson:",minsize"` + }{ + A: 12345, + }, + buildDocument(bsoncore.AppendInt32Element(nil, "a", 12345)), + nil, + }, + { + "inline", + struct { + Foo struct { + A int64 `bson:",minsize"` + } `bson:",inline"` + }{ + Foo: struct { + A int64 `bson:",minsize"` + }{ + A: 12345, + }, + }, + buildDocument(bsoncore.AppendInt32Element(nil, "a", 12345)), + nil, + }, + { + "inline map", + struct { + Foo map[string]string `bson:",inline"` + }{ + Foo: map[string]string{"foo": "bar"}, + }, + buildDocument(bsoncore.AppendStringElement(nil, "foo", "bar")), + nil, + }, + { + "alternate name bson:name", + struct { + A string `bson:"foo"` + }{ + A: "bar", + }, + buildDocument(bsoncore.AppendStringElement(nil, "foo", "bar")), + nil, + }, + { + "alternate name", + struct { + A string `bson:"foo"` + }{ + A: "bar", + }, + buildDocument(bsoncore.AppendStringElement(nil, "foo", "bar")), + nil, + }, + { + "inline, omitempty", + struct { + A string + Foo zeroTest `bson:"omitempty,inline"` + }{ + A: "bar", + Foo: zeroTest{true}, + }, + buildDocument(bsoncore.AppendStringElement(nil, "a", "bar")), + nil, + }, + { + "struct{}", + struct { + A bool + B int32 + C int64 + D uint16 + E uint64 + F float64 + G string + H map[string]string + I []byte + K [2]string + L struct { + M string + } + Q primitive.ObjectID + T []struct{} + Y json.Number + Z time.Time + AA json.Number + AB *url.URL + AC primitive.Decimal128 + AD *time.Time + AE *testValueUnmarshaler + AF *bool + AG *bool + AH *int32 + AI *int64 + AJ *primitive.ObjectID + AK *primitive.ObjectID + AL testValueUnmarshaler + AM interface{} + AN interface{} + AO interface{} + AP primitive.D + AQ primitive.A + AR [2]primitive.E + AS []byte + AT map[string]interface{} + AU primitive.CodeWithScope + AV primitive.M + AW primitive.D + AX map[string]interface{} + AY []primitive.E + AZ interface{} + }{ + A: true, + B: 123, + C: 456, + D: 789, + E: 101112, + F: 3.14159, + G: "Hello, world", + H: map[string]string{"foo": "bar"}, + I: []byte{0x01, 0x02, 0x03}, + K: [2]string{"baz", "qux"}, + L: struct { + M string + }{ + M: "foobar", + }, + Q: oid, + T: nil, + Y: json.Number("5"), + Z: now, + AA: json.Number("10.1"), + AB: murl, + AC: decimal128, + AD: &now, + AE: &testValueUnmarshaler{t: bsontype.String, val: bsoncore.AppendString(nil, "hello, world!")}, + AF: func(b bool) *bool { return &b }(true), + AG: nil, + AH: func(i32 int32) *int32 { return &i32 }(12345), + AI: func(i64 int64) *int64 { return &i64 }(1234567890), + AJ: &oid, + AK: nil, + AL: testValueUnmarshaler{t: bsontype.String, val: bsoncore.AppendString(nil, "hello, world!")}, + AM: "hello, world", + AN: int32(12345), + AO: oid, + AP: primitive.D{{"foo", "bar"}}, + AQ: primitive.A{"foo", "bar"}, + AR: [2]primitive.E{{"hello", "world"}, {"pi", 3.14159}}, + AS: nil, + AT: nil, + AU: primitive.CodeWithScope{Code: "var hello = 'world';", Scope: primitive.D{{"pi", 3.14159}}}, + AV: primitive.M{"foo": primitive.M{"bar": "baz"}}, + AW: primitive.D{{"foo", primitive.D{{"bar", "baz"}}}}, + AX: map[string]interface{}{"foo": map[string]interface{}{"bar": "baz"}}, + AY: []primitive.E{{"foo", []primitive.E{{"bar", "baz"}}}}, + AZ: primitive.D{{"foo", primitive.D{{"bar", "baz"}}}}, + }, + buildDocument(func(doc []byte) []byte { + doc = bsoncore.AppendBooleanElement(doc, "a", true) + doc = bsoncore.AppendInt32Element(doc, "b", 123) + doc = bsoncore.AppendInt64Element(doc, "c", 456) + doc = bsoncore.AppendInt32Element(doc, "d", 789) + doc = bsoncore.AppendInt64Element(doc, "e", 101112) + doc = bsoncore.AppendDoubleElement(doc, "f", 3.14159) + doc = bsoncore.AppendStringElement(doc, "g", "Hello, world") + doc = bsoncore.AppendDocumentElement(doc, "h", buildDocument(bsoncore.AppendStringElement(nil, "foo", "bar"))) + doc = bsoncore.AppendBinaryElement(doc, "i", 0x00, []byte{0x01, 0x02, 0x03}) + doc = bsoncore.AppendArrayElement(doc, "k", + buildArray(bsoncore.AppendStringElement(bsoncore.AppendStringElement(nil, "0", "baz"), "1", "qux")), + ) + doc = bsoncore.AppendDocumentElement(doc, "l", buildDocument(bsoncore.AppendStringElement(nil, "m", "foobar"))) + doc = bsoncore.AppendObjectIDElement(doc, "q", oid) + doc = bsoncore.AppendNullElement(doc, "t") + doc = bsoncore.AppendInt64Element(doc, "y", 5) + doc = bsoncore.AppendDateTimeElement(doc, "z", now.UnixNano()/int64(time.Millisecond)) + doc = bsoncore.AppendDoubleElement(doc, "aa", 10.1) + doc = bsoncore.AppendStringElement(doc, "ab", murl.String()) + doc = bsoncore.AppendDecimal128Element(doc, "ac", decimal128) + doc = bsoncore.AppendDateTimeElement(doc, "ad", now.UnixNano()/int64(time.Millisecond)) + doc = bsoncore.AppendStringElement(doc, "ae", "hello, world!") + doc = bsoncore.AppendBooleanElement(doc, "af", true) + doc = bsoncore.AppendNullElement(doc, "ag") + doc = bsoncore.AppendInt32Element(doc, "ah", 12345) + doc = bsoncore.AppendInt32Element(doc, "ai", 1234567890) + doc = bsoncore.AppendObjectIDElement(doc, "aj", oid) + doc = bsoncore.AppendNullElement(doc, "ak") + doc = bsoncore.AppendStringElement(doc, "al", "hello, world!") + doc = bsoncore.AppendStringElement(doc, "am", "hello, world") + doc = bsoncore.AppendInt32Element(doc, "an", 12345) + doc = bsoncore.AppendObjectIDElement(doc, "ao", oid) + doc = bsoncore.AppendDocumentElement(doc, "ap", buildDocument(bsoncore.AppendStringElement(nil, "foo", "bar"))) + doc = bsoncore.AppendArrayElement(doc, "aq", + buildArray(bsoncore.AppendStringElement(bsoncore.AppendStringElement(nil, "0", "foo"), "1", "bar")), + ) + doc = bsoncore.AppendDocumentElement(doc, "ar", + buildDocument(bsoncore.AppendDoubleElement(bsoncore.AppendStringElement(nil, "hello", "world"), "pi", 3.14159)), + ) + doc = bsoncore.AppendNullElement(doc, "as") + doc = bsoncore.AppendNullElement(doc, "at") + doc = bsoncore.AppendCodeWithScopeElement(doc, "au", + "var hello = 'world';", buildDocument(bsoncore.AppendDoubleElement(nil, "pi", 3.14159)), + ) + for _, name := range [5]string{"av", "aw", "ax", "ay", "az"} { + doc = bsoncore.AppendDocumentElement(doc, name, buildDocument( + bsoncore.AppendDocumentElement(nil, "foo", buildDocument( + bsoncore.AppendStringElement(nil, "bar", "baz"), + )), + )) + } + return doc + }(nil)), + nil, + }, + { + "struct{[]interface{}}", + struct { + A []bool + B []int32 + C []int64 + D []uint16 + E []uint64 + F []float64 + G []string + H []map[string]string + I [][]byte + K [1][2]string + L []struct { + M string + } + N [][]string + R []primitive.ObjectID + T []struct{} + W []map[string]struct{} + X []map[string]struct{} + Y []map[string]struct{} + Z []time.Time + AA []json.Number + AB []*url.URL + AC []primitive.Decimal128 + AD []*time.Time + AE []*testValueUnmarshaler + AF []*bool + AG []*int32 + AH []*int64 + AI []*primitive.ObjectID + AJ []primitive.D + AK []primitive.A + AL [][2]primitive.E + }{ + A: []bool{true}, + B: []int32{123}, + C: []int64{456}, + D: []uint16{789}, + E: []uint64{101112}, + F: []float64{3.14159}, + G: []string{"Hello, world"}, + H: []map[string]string{{"foo": "bar"}}, + I: [][]byte{{0x01, 0x02, 0x03}}, + K: [1][2]string{{"baz", "qux"}}, + L: []struct { + M string + }{ + { + M: "foobar", + }, + }, + N: [][]string{{"foo", "bar"}}, + R: oids, + T: nil, + W: nil, + X: []map[string]struct{}{}, // Should be empty BSON Array + Y: []map[string]struct{}{{}}, // Should be BSON array with one element, an empty BSON SubDocument + Z: []time.Time{now, now}, + AA: []json.Number{json.Number("5"), json.Number("10.1")}, + AB: []*url.URL{murl}, + AC: []primitive.Decimal128{decimal128}, + AD: []*time.Time{&now, &now}, + AE: []*testValueUnmarshaler{ + {t: bsontype.String, val: bsoncore.AppendString(nil, "hello")}, + {t: bsontype.String, val: bsoncore.AppendString(nil, "world")}, + }, + AF: []*bool{pbool(true), nil}, + AG: []*int32{pi32(12345), nil}, + AH: []*int64{pi64(1234567890), nil, pi64(9012345678)}, + AI: []*primitive.ObjectID{&oid, nil}, + AJ: []primitive.D{{{"foo", "bar"}}, nil}, + AK: []primitive.A{{"foo", "bar"}, nil}, + AL: [][2]primitive.E{{{"hello", "world"}, {"pi", 3.14159}}}, + }, + buildDocument(func(doc []byte) []byte { + doc = appendArrayElement(doc, "a", bsoncore.AppendBooleanElement(nil, "0", true)) + doc = appendArrayElement(doc, "b", bsoncore.AppendInt32Element(nil, "0", 123)) + doc = appendArrayElement(doc, "c", bsoncore.AppendInt64Element(nil, "0", 456)) + doc = appendArrayElement(doc, "d", bsoncore.AppendInt32Element(nil, "0", 789)) + doc = appendArrayElement(doc, "e", bsoncore.AppendInt64Element(nil, "0", 101112)) + doc = appendArrayElement(doc, "f", bsoncore.AppendDoubleElement(nil, "0", 3.14159)) + doc = appendArrayElement(doc, "g", bsoncore.AppendStringElement(nil, "0", "Hello, world")) + doc = appendArrayElement(doc, "h", buildDocumentElement("0", bsoncore.AppendStringElement(nil, "foo", "bar"))) + doc = appendArrayElement(doc, "i", bsoncore.AppendBinaryElement(nil, "0", 0x00, []byte{0x01, 0x02, 0x03})) + doc = appendArrayElement(doc, "k", + buildArrayElement("0", + bsoncore.AppendStringElement(bsoncore.AppendStringElement(nil, "0", "baz"), "1", "qux")), + ) + doc = appendArrayElement(doc, "l", buildDocumentElement("0", bsoncore.AppendStringElement(nil, "m", "foobar"))) + doc = appendArrayElement(doc, "n", + buildArrayElement("0", + bsoncore.AppendStringElement(bsoncore.AppendStringElement(nil, "0", "foo"), "1", "bar")), + ) + doc = appendArrayElement(doc, "r", + bsoncore.AppendObjectIDElement( + bsoncore.AppendObjectIDElement( + bsoncore.AppendObjectIDElement(nil, + "0", oids[0]), + "1", oids[1]), + "2", oids[2]), + ) + doc = bsoncore.AppendNullElement(doc, "t") + doc = bsoncore.AppendNullElement(doc, "w") + doc = appendArrayElement(doc, "x", nil) + doc = appendArrayElement(doc, "y", buildDocumentElement("0", nil)) + doc = appendArrayElement(doc, "z", + bsoncore.AppendDateTimeElement( + bsoncore.AppendDateTimeElement( + nil, "0", now.UnixNano()/int64(time.Millisecond)), + "1", now.UnixNano()/int64(time.Millisecond)), + ) + doc = appendArrayElement(doc, "aa", bsoncore.AppendDoubleElement(bsoncore.AppendInt64Element(nil, "0", 5), "1", 10.10)) + doc = appendArrayElement(doc, "ab", bsoncore.AppendStringElement(nil, "0", murl.String())) + doc = appendArrayElement(doc, "ac", bsoncore.AppendDecimal128Element(nil, "0", decimal128)) + doc = appendArrayElement(doc, "ad", + bsoncore.AppendDateTimeElement( + bsoncore.AppendDateTimeElement(nil, "0", now.UnixNano()/int64(time.Millisecond)), + "1", now.UnixNano()/int64(time.Millisecond)), + ) + doc = appendArrayElement(doc, "ae", + bsoncore.AppendStringElement(bsoncore.AppendStringElement(nil, "0", "hello"), "1", "world"), + ) + doc = appendArrayElement(doc, "af", + bsoncore.AppendNullElement(bsoncore.AppendBooleanElement(nil, "0", true), "1"), + ) + doc = appendArrayElement(doc, "ag", + bsoncore.AppendNullElement(bsoncore.AppendInt32Element(nil, "0", 12345), "1"), + ) + doc = appendArrayElement(doc, "ah", + bsoncore.AppendInt64Element( + bsoncore.AppendNullElement(bsoncore.AppendInt64Element(nil, "0", 1234567890), "1"), + "2", 9012345678, + ), + ) + doc = appendArrayElement(doc, "ai", + bsoncore.AppendNullElement(bsoncore.AppendObjectIDElement(nil, "0", oid), "1"), + ) + doc = appendArrayElement(doc, "aj", + bsoncore.AppendNullElement( + bsoncore.AppendDocumentElement(nil, "0", buildDocument(bsoncore.AppendStringElement(nil, "foo", "bar"))), + "1", + ), + ) + doc = appendArrayElement(doc, "ak", + bsoncore.AppendNullElement( + buildArrayElement("0", + bsoncore.AppendStringElement(bsoncore.AppendStringElement(nil, "0", "foo"), "1", "bar"), + ), + "1", + ), + ) + doc = appendArrayElement(doc, "al", + buildDocumentElement( + "0", + bsoncore.AppendDoubleElement(bsoncore.AppendStringElement(nil, "hello", "world"), "pi", 3.14159), + ), + ) + return doc + }(nil)), + nil, + }, + } + + t.Run("Decode", func(t *testing.T) { + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + vr := bsonrw.NewBSONDocumentReader(tc.b) + reg := buildDefaultRegistry() + vtype := reflect.TypeOf(tc.value) + dec, err := reg.LookupDecoder(vtype) + noerr(t, err) + + gotVal := reflect.New(reflect.TypeOf(tc.value)).Elem() + err = dec.DecodeValue(DecodeContext{Registry: reg}, vr, gotVal) + noerr(t, err) + + got := gotVal.Interface() + want := tc.value + if diff := cmp.Diff( + got, want, + cmp.Comparer(compareDecimal128), + cmp.Comparer(compareNoPrivateFields), + cmp.Comparer(compareZeroTest), + cmp.Comparer(compareTime), + ); diff != "" { + t.Errorf("difference:\n%s", diff) + t.Errorf("Values are not equal.\ngot: %#v\nwant:%#v", got, want) + } + }) + } + }) + }) + + t.Run("EmptyInterfaceDecodeValue", func(t *testing.T) { + t.Run("DecodeValue", func(t *testing.T) { + testCases := []struct { + name string + val interface{} + bsontype bsontype.Type + }{ + { + "Double - float64", + float64(3.14159), + bsontype.Double, + }, + { + "String - string", + string("foo bar baz"), + bsontype.String, + }, + { + "Array - primitive.A", + primitive.A{3.14159}, + bsontype.Array, + }, + { + "Binary - Binary", + primitive.Binary{Subtype: 0xFF, Data: []byte{0x01, 0x02, 0x03}}, + bsontype.Binary, + }, + { + "Undefined - Undefined", + primitive.Undefined{}, + bsontype.Undefined, + }, + { + "ObjectID - primitive.ObjectID", + primitive.ObjectID{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C}, + bsontype.ObjectID, + }, + { + "Boolean - bool", + bool(true), + bsontype.Boolean, + }, + { + "DateTime - DateTime", + primitive.DateTime(1234567890), + bsontype.DateTime, + }, + { + "Null - Null", + nil, + bsontype.Null, + }, + { + "Regex - Regex", + primitive.Regex{Pattern: "foo", Options: "bar"}, + bsontype.Regex, + }, + { + "DBPointer - DBPointer", + primitive.DBPointer{ + DB: "foobar", + Pointer: primitive.ObjectID{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C}, + }, + bsontype.DBPointer, + }, + { + "JavaScript - JavaScript", + primitive.JavaScript("var foo = 'bar';"), + bsontype.JavaScript, + }, + { + "Symbol - Symbol", + primitive.Symbol("foobarbazlolz"), + bsontype.Symbol, + }, + { + "Int32 - int32", + int32(123456), + bsontype.Int32, + }, + { + "Int64 - int64", + int64(1234567890), + bsontype.Int64, + }, + { + "Timestamp - Timestamp", + primitive.Timestamp{T: 12345, I: 67890}, + bsontype.Timestamp, + }, + { + "Decimal128 - decimal.Decimal128", + primitive.NewDecimal128(12345, 67890), + bsontype.Decimal128, + }, + { + "MinKey - MinKey", + primitive.MinKey{}, + bsontype.MinKey, + }, + { + "MaxKey - MaxKey", + primitive.MaxKey{}, + bsontype.MaxKey, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + llvr := &bsonrwtest.ValueReaderWriter{BSONType: tc.bsontype} + + t.Run("Type Map failure", func(t *testing.T) { + if tc.bsontype == bsontype.Null { + t.Skip() + } + val := reflect.New(tEmpty).Elem() + dc := DecodeContext{Registry: NewRegistryBuilder().Build()} + want := ErrNoTypeMapEntry{Type: tc.bsontype} + got := dvd.EmptyInterfaceDecodeValue(dc, llvr, val) + if !compareErrors(got, want) { + t.Errorf("Errors are not equal. got %v; want %v", got, want) + } + }) + + t.Run("Lookup failure", func(t *testing.T) { + if tc.bsontype == bsontype.Null { + t.Skip() + } + val := reflect.New(tEmpty).Elem() + dc := DecodeContext{ + Registry: NewRegistryBuilder(). + RegisterTypeMapEntry(tc.bsontype, reflect.TypeOf(tc.val)). + Build(), + } + want := ErrNoDecoder{Type: reflect.TypeOf(tc.val)} + got := dvd.EmptyInterfaceDecodeValue(dc, llvr, val) + if !compareErrors(got, want) { + t.Errorf("Errors are not equal. got %v; want %v", got, want) + } + }) + + t.Run("DecodeValue failure", func(t *testing.T) { + if tc.bsontype == bsontype.Null { + t.Skip() + } + want := errors.New("DecodeValue failure error") + llc := &llCodec{t: t, err: want} + dc := DecodeContext{ + Registry: NewRegistryBuilder(). + RegisterDecoder(reflect.TypeOf(tc.val), llc). + RegisterTypeMapEntry(tc.bsontype, reflect.TypeOf(tc.val)). + Build(), + } + got := dvd.EmptyInterfaceDecodeValue(dc, llvr, reflect.New(tEmpty).Elem()) + if !compareErrors(got, want) { + t.Errorf("Errors are not equal. got %v; want %v", got, want) + } + }) + + t.Run("Success", func(t *testing.T) { + want := tc.val + llc := &llCodec{t: t, decodeval: tc.val} + dc := DecodeContext{ + Registry: NewRegistryBuilder(). + RegisterDecoder(reflect.TypeOf(tc.val), llc). + RegisterTypeMapEntry(tc.bsontype, reflect.TypeOf(tc.val)). + Build(), + } + got := reflect.New(tEmpty).Elem() + err := dvd.EmptyInterfaceDecodeValue(dc, llvr, got) + noerr(t, err) + if !cmp.Equal(got.Interface(), want, cmp.Comparer(compareDecimal128)) { + t.Errorf("Did not receive expected value. got %v; want %v", got.Interface(), want) + } + }) + }) + } + }) + + t.Run("non-interface{}", func(t *testing.T) { + val := uint64(1234567890) + want := ValueDecoderError{Name: "EmptyInterfaceDecodeValue", Types: []reflect.Type{tEmpty}, Received: reflect.ValueOf(val)} + got := dvd.EmptyInterfaceDecodeValue(DecodeContext{}, nil, reflect.ValueOf(val)) + if !compareErrors(got, want) { + t.Errorf("Errors are not equal. got %v; want %v", got, want) + } + }) + + t.Run("nil *interface{}", func(t *testing.T) { + var val interface{} + want := ValueDecoderError{Name: "EmptyInterfaceDecodeValue", Types: []reflect.Type{tEmpty}, Received: reflect.ValueOf(val)} + got := dvd.EmptyInterfaceDecodeValue(DecodeContext{}, nil, reflect.ValueOf(val)) + if !compareErrors(got, want) { + t.Errorf("Errors are not equal. got %v; want %v", got, want) + } + }) + + t.Run("no type registered", func(t *testing.T) { + llvr := &bsonrwtest.ValueReaderWriter{BSONType: bsontype.Double} + want := ErrNoTypeMapEntry{Type: bsontype.Double} + val := reflect.New(tEmpty).Elem() + got := dvd.EmptyInterfaceDecodeValue(DecodeContext{Registry: NewRegistryBuilder().Build()}, llvr, val) + if !compareErrors(got, want) { + t.Errorf("Errors are not equal. got %v; want %v", got, want) + } + }) + t.Run("top level document", func(t *testing.T) { + data := bsoncore.BuildDocument(nil, bsoncore.AppendDoubleElement(nil, "pi", 3.14159)) + vr := bsonrw.NewBSONDocumentReader(data) + want := primitive.D{{"pi", 3.14159}} + var got interface{} + val := reflect.ValueOf(&got).Elem() + err := dvd.EmptyInterfaceDecodeValue(DecodeContext{Registry: buildDefaultRegistry()}, vr, val) + noerr(t, err) + if !cmp.Equal(got, want) { + t.Errorf("Did not get correct result. got %v; want %v", got, want) + } + }) + }) +} + +type testValueUnmarshaler struct { + t bsontype.Type + val []byte + err error +} + +func (tvu *testValueUnmarshaler) UnmarshalBSONValue(t bsontype.Type, val []byte) error { + tvu.t, tvu.val = t, val + return tvu.err +} + +type testUnmarshaler struct { + Val []byte + Err error +} + +func (tvu *testUnmarshaler) UnmarshalBSON(val []byte) error { + tvu.Val = val + return tvu.Err +} + +func (tvu testValueUnmarshaler) Equal(tvu2 testValueUnmarshaler) bool { + return tvu.t == tvu2.t && bytes.Equal(tvu.val, tvu2.val) +} + +// buildDocumentArray inserts vals inside of an array inside of a document. +func buildDocumentArray(fn func([]byte) []byte) []byte { + aix, doc := bsoncore.AppendArrayElementStart(nil, "Z") + doc = fn(doc) + doc, _ = bsoncore.AppendArrayEnd(doc, aix) + return buildDocument(doc) +} + +func buildArray(vals []byte) []byte { + aix, doc := bsoncore.AppendArrayStart(nil) + doc = append(doc, vals...) + doc, _ = bsoncore.AppendArrayEnd(doc, aix) + return doc +} + +func buildArrayElement(key string, vals []byte) []byte { + return appendArrayElement(nil, key, vals) +} + +func appendArrayElement(dst []byte, key string, vals []byte) []byte { + aix, doc := bsoncore.AppendArrayElementStart(dst, key) + doc = append(doc, vals...) + doc, _ = bsoncore.AppendArrayEnd(doc, aix) + return doc +} + +// buildDocument inserts elems inside of a document. +func buildDocument(elems []byte) []byte { + idx, doc := bsoncore.AppendDocumentStart(nil) + doc = append(doc, elems...) + doc, _ = bsoncore.AppendDocumentEnd(doc, idx) + return doc +} + +func buildDocumentElement(key string, elems []byte) []byte { + idx, doc := bsoncore.AppendDocumentElementStart(nil, key) + doc = append(doc, elems...) + doc, _ = bsoncore.AppendDocumentEnd(doc, idx) + return doc +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go new file mode 100644 index 0000000..39ebfc7 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go @@ -0,0 +1,648 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "encoding/json" + "errors" + "fmt" + "math" + "net/url" + "reflect" + "sync" + "time" + + "go.mongodb.org/mongo-driver/bson/bsonrw" + "go.mongodb.org/mongo-driver/bson/bsontype" + "go.mongodb.org/mongo-driver/bson/primitive" + "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" +) + +var defaultValueEncoders DefaultValueEncoders + +var bvwPool = bsonrw.NewBSONValueWriterPool() + +var sliceWriterPool = sync.Pool{ + New: func() interface{} { + sw := make(bsonrw.SliceWriter, 0, 0) + return &sw + }, +} + +func encodeElement(ec EncodeContext, dw bsonrw.DocumentWriter, e primitive.E) error { + vw, err := dw.WriteDocumentElement(e.Key) + if err != nil { + return err + } + + if e.Value == nil { + return vw.WriteNull() + } + encoder, err := ec.LookupEncoder(reflect.TypeOf(e.Value)) + if err != nil { + return err + } + + err = encoder.EncodeValue(ec, vw, reflect.ValueOf(e.Value)) + if err != nil { + return err + } + return nil +} + +// DefaultValueEncoders is a namespace type for the default ValueEncoders used +// when creating a registry. +type DefaultValueEncoders struct{} + +// RegisterDefaultEncoders will register the encoder methods attached to DefaultValueEncoders with +// the provided RegistryBuilder. +func (dve DefaultValueEncoders) RegisterDefaultEncoders(rb *RegistryBuilder) { + if rb == nil { + panic(errors.New("argument to RegisterDefaultEncoders must not be nil")) + } + rb. + RegisterEncoder(tByteSlice, ValueEncoderFunc(dve.ByteSliceEncodeValue)). + RegisterEncoder(tTime, ValueEncoderFunc(dve.TimeEncodeValue)). + RegisterEncoder(tEmpty, ValueEncoderFunc(dve.EmptyInterfaceEncodeValue)). + RegisterEncoder(tOID, ValueEncoderFunc(dve.ObjectIDEncodeValue)). + RegisterEncoder(tDecimal, ValueEncoderFunc(dve.Decimal128EncodeValue)). + RegisterEncoder(tJSONNumber, ValueEncoderFunc(dve.JSONNumberEncodeValue)). + RegisterEncoder(tURL, ValueEncoderFunc(dve.URLEncodeValue)). + RegisterEncoder(tValueMarshaler, ValueEncoderFunc(dve.ValueMarshalerEncodeValue)). + RegisterEncoder(tMarshaler, ValueEncoderFunc(dve.MarshalerEncodeValue)). + RegisterEncoder(tProxy, ValueEncoderFunc(dve.ProxyEncodeValue)). + RegisterEncoder(tJavaScript, ValueEncoderFunc(dve.JavaScriptEncodeValue)). + RegisterEncoder(tSymbol, ValueEncoderFunc(dve.SymbolEncodeValue)). + RegisterEncoder(tBinary, ValueEncoderFunc(dve.BinaryEncodeValue)). + RegisterEncoder(tUndefined, ValueEncoderFunc(dve.UndefinedEncodeValue)). + RegisterEncoder(tDateTime, ValueEncoderFunc(dve.DateTimeEncodeValue)). + RegisterEncoder(tNull, ValueEncoderFunc(dve.NullEncodeValue)). + RegisterEncoder(tRegex, ValueEncoderFunc(dve.RegexEncodeValue)). + RegisterEncoder(tDBPointer, ValueEncoderFunc(dve.DBPointerEncodeValue)). + RegisterEncoder(tTimestamp, ValueEncoderFunc(dve.TimestampEncodeValue)). + RegisterEncoder(tMinKey, ValueEncoderFunc(dve.MinKeyEncodeValue)). + RegisterEncoder(tMaxKey, ValueEncoderFunc(dve.MaxKeyEncodeValue)). + RegisterEncoder(tCoreDocument, ValueEncoderFunc(dve.CoreDocumentEncodeValue)). + RegisterEncoder(tCodeWithScope, ValueEncoderFunc(dve.CodeWithScopeEncodeValue)). + RegisterDefaultEncoder(reflect.Bool, ValueEncoderFunc(dve.BooleanEncodeValue)). + RegisterDefaultEncoder(reflect.Int, ValueEncoderFunc(dve.IntEncodeValue)). + RegisterDefaultEncoder(reflect.Int8, ValueEncoderFunc(dve.IntEncodeValue)). + RegisterDefaultEncoder(reflect.Int16, ValueEncoderFunc(dve.IntEncodeValue)). + RegisterDefaultEncoder(reflect.Int32, ValueEncoderFunc(dve.IntEncodeValue)). + RegisterDefaultEncoder(reflect.Int64, ValueEncoderFunc(dve.IntEncodeValue)). + RegisterDefaultEncoder(reflect.Uint, ValueEncoderFunc(dve.UintEncodeValue)). + RegisterDefaultEncoder(reflect.Uint8, ValueEncoderFunc(dve.UintEncodeValue)). + RegisterDefaultEncoder(reflect.Uint16, ValueEncoderFunc(dve.UintEncodeValue)). + RegisterDefaultEncoder(reflect.Uint32, ValueEncoderFunc(dve.UintEncodeValue)). + RegisterDefaultEncoder(reflect.Uint64, ValueEncoderFunc(dve.UintEncodeValue)). + RegisterDefaultEncoder(reflect.Float32, ValueEncoderFunc(dve.FloatEncodeValue)). + RegisterDefaultEncoder(reflect.Float64, ValueEncoderFunc(dve.FloatEncodeValue)). + RegisterDefaultEncoder(reflect.Array, ValueEncoderFunc(dve.ArrayEncodeValue)). + RegisterDefaultEncoder(reflect.Map, ValueEncoderFunc(dve.MapEncodeValue)). + RegisterDefaultEncoder(reflect.Slice, ValueEncoderFunc(dve.SliceEncodeValue)). + RegisterDefaultEncoder(reflect.String, ValueEncoderFunc(dve.StringEncodeValue)). + RegisterDefaultEncoder(reflect.Struct, &StructCodec{cache: make(map[reflect.Type]*structDescription), parser: DefaultStructTagParser}). + RegisterDefaultEncoder(reflect.Ptr, NewPointerCodec()) +} + +// BooleanEncodeValue is the ValueEncoderFunc for bool types. +func (dve DefaultValueEncoders) BooleanEncodeValue(ectx EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Kind() != reflect.Bool { + return ValueEncoderError{Name: "BooleanEncodeValue", Kinds: []reflect.Kind{reflect.Bool}, Received: val} + } + return vw.WriteBoolean(val.Bool()) +} + +func fitsIn32Bits(i int64) bool { + return math.MinInt32 <= i && i <= math.MaxInt32 +} + +// IntEncodeValue is the ValueEncoderFunc for int types. +func (dve DefaultValueEncoders) IntEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + switch val.Kind() { + case reflect.Int8, reflect.Int16, reflect.Int32: + return vw.WriteInt32(int32(val.Int())) + case reflect.Int: + i64 := val.Int() + if fitsIn32Bits(i64) { + return vw.WriteInt32(int32(i64)) + } + return vw.WriteInt64(i64) + case reflect.Int64: + i64 := val.Int() + if ec.MinSize && fitsIn32Bits(i64) { + return vw.WriteInt32(int32(i64)) + } + return vw.WriteInt64(i64) + } + + return ValueEncoderError{ + Name: "IntEncodeValue", + Kinds: []reflect.Kind{reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int}, + Received: val, + } +} + +// UintEncodeValue is the ValueEncoderFunc for uint types. +func (dve DefaultValueEncoders) UintEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + switch val.Kind() { + case reflect.Uint8, reflect.Uint16: + return vw.WriteInt32(int32(val.Uint())) + case reflect.Uint, reflect.Uint32, reflect.Uint64: + u64 := val.Uint() + if ec.MinSize && u64 <= math.MaxInt32 { + return vw.WriteInt32(int32(u64)) + } + if u64 > math.MaxInt64 { + return fmt.Errorf("%d overflows int64", u64) + } + return vw.WriteInt64(int64(u64)) + } + + return ValueEncoderError{ + Name: "UintEncodeValue", + Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint}, + Received: val, + } +} + +// FloatEncodeValue is the ValueEncoderFunc for float types. +func (dve DefaultValueEncoders) FloatEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + switch val.Kind() { + case reflect.Float32, reflect.Float64: + return vw.WriteDouble(val.Float()) + } + + return ValueEncoderError{Name: "FloatEncodeValue", Kinds: []reflect.Kind{reflect.Float32, reflect.Float64}, Received: val} +} + +// StringEncodeValue is the ValueEncoderFunc for string types. +func (dve DefaultValueEncoders) StringEncodeValue(ectx EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if val.Kind() != reflect.String { + return ValueEncoderError{ + Name: "StringEncodeValue", + Kinds: []reflect.Kind{reflect.String}, + Received: val, + } + } + + return vw.WriteString(val.String()) +} + +// ObjectIDEncodeValue is the ValueEncoderFunc for primitive.ObjectID. +func (dve DefaultValueEncoders) ObjectIDEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tOID { + return ValueEncoderError{Name: "ObjectIDEncodeValue", Types: []reflect.Type{tOID}, Received: val} + } + return vw.WriteObjectID(val.Interface().(primitive.ObjectID)) +} + +// Decimal128EncodeValue is the ValueEncoderFunc for primitive.Decimal128. +func (dve DefaultValueEncoders) Decimal128EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tDecimal { + return ValueEncoderError{Name: "Decimal128EncodeValue", Types: []reflect.Type{tDecimal}, Received: val} + } + return vw.WriteDecimal128(val.Interface().(primitive.Decimal128)) +} + +// JSONNumberEncodeValue is the ValueEncoderFunc for json.Number. +func (dve DefaultValueEncoders) JSONNumberEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tJSONNumber { + return ValueEncoderError{Name: "JSONNumberEncodeValue", Types: []reflect.Type{tJSONNumber}, Received: val} + } + jsnum := val.Interface().(json.Number) + + // Attempt int first, then float64 + if i64, err := jsnum.Int64(); err == nil { + return dve.IntEncodeValue(ec, vw, reflect.ValueOf(i64)) + } + + f64, err := jsnum.Float64() + if err != nil { + return err + } + + return dve.FloatEncodeValue(ec, vw, reflect.ValueOf(f64)) +} + +// URLEncodeValue is the ValueEncoderFunc for url.URL. +func (dve DefaultValueEncoders) URLEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tURL { + return ValueEncoderError{Name: "URLEncodeValue", Types: []reflect.Type{tURL}, Received: val} + } + u := val.Interface().(url.URL) + return vw.WriteString(u.String()) +} + +// TimeEncodeValue is the ValueEncoderFunc for time.TIme. +func (dve DefaultValueEncoders) TimeEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tTime { + return ValueEncoderError{Name: "TimeEncodeValue", Types: []reflect.Type{tTime}, Received: val} + } + tt := val.Interface().(time.Time) + return vw.WriteDateTime(tt.Unix()*1000 + int64(tt.Nanosecond()/1e6)) +} + +// ByteSliceEncodeValue is the ValueEncoderFunc for []byte. +func (dve DefaultValueEncoders) ByteSliceEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tByteSlice { + return ValueEncoderError{Name: "ByteSliceEncodeValue", Types: []reflect.Type{tByteSlice}, Received: val} + } + if val.IsNil() { + return vw.WriteNull() + } + return vw.WriteBinary(val.Interface().([]byte)) +} + +// MapEncodeValue is the ValueEncoderFunc for map[string]* types. +func (dve DefaultValueEncoders) MapEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Kind() != reflect.Map || val.Type().Key().Kind() != reflect.String { + return ValueEncoderError{Name: "MapEncodeValue", Kinds: []reflect.Kind{reflect.Map}, Received: val} + } + + if val.IsNil() { + // If we have a nill map but we can't WriteNull, that means we're probably trying to encode + // to a TopLevel document. We can't currently tell if this is what actually happened, but if + // there's a deeper underlying problem, the error will also be returned from WriteDocument, + // so just continue. The operations on a map reflection value are valid, so we can call + // MapKeys within mapEncodeValue without a problem. + err := vw.WriteNull() + if err == nil { + return nil + } + } + + dw, err := vw.WriteDocument() + if err != nil { + return err + } + + return dve.mapEncodeValue(ec, dw, val, nil) +} + +// mapEncodeValue handles encoding of the values of a map. The collisionFn returns +// true if the provided key exists, this is mainly used for inline maps in the +// struct codec. +func (dve DefaultValueEncoders) mapEncodeValue(ec EncodeContext, dw bsonrw.DocumentWriter, val reflect.Value, collisionFn func(string) bool) error { + + encoder, err := ec.LookupEncoder(val.Type().Elem()) + if err != nil { + return err + } + + keys := val.MapKeys() + for _, key := range keys { + if collisionFn != nil && collisionFn(key.String()) { + return fmt.Errorf("Key %s of inlined map conflicts with a struct field name", key) + } + vw, err := dw.WriteDocumentElement(key.String()) + if err != nil { + return err + } + + if enc, ok := encoder.(ValueEncoder); ok { + err = enc.EncodeValue(ec, vw, val.MapIndex(key)) + if err != nil { + return err + } + continue + } + err = encoder.EncodeValue(ec, vw, val.MapIndex(key)) + if err != nil { + return err + } + } + + return dw.WriteDocumentEnd() +} + +// ArrayEncodeValue is the ValueEncoderFunc for array types. +func (dve DefaultValueEncoders) ArrayEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Kind() != reflect.Array { + return ValueEncoderError{Name: "ArrayEncodeValue", Kinds: []reflect.Kind{reflect.Array}, Received: val} + } + + // If we have a []primitive.E we want to treat it as a document instead of as an array. + if val.Type().Elem() == tE { + dw, err := vw.WriteDocument() + if err != nil { + return err + } + + for idx := 0; idx < val.Len(); idx++ { + e := val.Index(idx).Interface().(primitive.E) + err = encodeElement(ec, dw, e) + if err != nil { + return err + } + } + + return dw.WriteDocumentEnd() + } + + aw, err := vw.WriteArray() + if err != nil { + return err + } + + encoder, err := ec.LookupEncoder(val.Type().Elem()) + if err != nil { + return err + } + + for idx := 0; idx < val.Len(); idx++ { + vw, err := aw.WriteArrayElement() + if err != nil { + return err + } + + err = encoder.EncodeValue(ec, vw, val.Index(idx)) + if err != nil { + return err + } + } + return aw.WriteArrayEnd() +} + +// SliceEncodeValue is the ValueEncoderFunc for slice types. +func (dve DefaultValueEncoders) SliceEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Kind() != reflect.Slice { + return ValueEncoderError{Name: "SliceEncodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val} + } + + if val.IsNil() { + return vw.WriteNull() + } + + // If we have a []primitive.E we want to treat it as a document instead of as an array. + if val.Type().ConvertibleTo(tD) { + d := val.Convert(tD).Interface().(primitive.D) + + dw, err := vw.WriteDocument() + if err != nil { + return err + } + + for _, e := range d { + err = encodeElement(ec, dw, e) + if err != nil { + return err + } + } + + return dw.WriteDocumentEnd() + } + + aw, err := vw.WriteArray() + if err != nil { + return err + } + + encoder, err := ec.LookupEncoder(val.Type().Elem()) + if err != nil { + return err + } + + for idx := 0; idx < val.Len(); idx++ { + vw, err := aw.WriteArrayElement() + if err != nil { + return err + } + + err = encoder.EncodeValue(ec, vw, val.Index(idx)) + if err != nil { + return err + } + } + return aw.WriteArrayEnd() +} + +// EmptyInterfaceEncodeValue is the ValueEncoderFunc for interface{}. +func (dve DefaultValueEncoders) EmptyInterfaceEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tEmpty { + return ValueEncoderError{Name: "EmptyInterfaceEncodeValue", Types: []reflect.Type{tEmpty}, Received: val} + } + + if val.IsNil() { + return vw.WriteNull() + } + encoder, err := ec.LookupEncoder(val.Elem().Type()) + if err != nil { + return err + } + + return encoder.EncodeValue(ec, vw, val.Elem()) +} + +// ValueMarshalerEncodeValue is the ValueEncoderFunc for ValueMarshaler implementations. +func (dve DefaultValueEncoders) ValueMarshalerEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || !val.Type().Implements(tValueMarshaler) { + return ValueEncoderError{Name: "ValueMarshalerEncodeValue", Types: []reflect.Type{tValueMarshaler}, Received: val} + } + + fn := val.Convert(tValueMarshaler).MethodByName("MarshalBSONValue") + returns := fn.Call(nil) + if !returns[2].IsNil() { + return returns[2].Interface().(error) + } + t, data := returns[0].Interface().(bsontype.Type), returns[1].Interface().([]byte) + return bsonrw.Copier{}.CopyValueFromBytes(vw, t, data) +} + +// MarshalerEncodeValue is the ValueEncoderFunc for Marshaler implementations. +func (dve DefaultValueEncoders) MarshalerEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || !val.Type().Implements(tMarshaler) { + return ValueEncoderError{Name: "MarshalerEncodeValue", Types: []reflect.Type{tMarshaler}, Received: val} + } + + fn := val.Convert(tMarshaler).MethodByName("MarshalBSON") + returns := fn.Call(nil) + if !returns[1].IsNil() { + return returns[1].Interface().(error) + } + data := returns[0].Interface().([]byte) + return bsonrw.Copier{}.CopyValueFromBytes(vw, bsontype.EmbeddedDocument, data) +} + +// ProxyEncodeValue is the ValueEncoderFunc for Proxy implementations. +func (dve DefaultValueEncoders) ProxyEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || !val.Type().Implements(tProxy) { + return ValueEncoderError{Name: "ProxyEncodeValue", Types: []reflect.Type{tProxy}, Received: val} + } + + fn := val.Convert(tProxy).MethodByName("ProxyBSON") + returns := fn.Call(nil) + if !returns[1].IsNil() { + return returns[1].Interface().(error) + } + data := returns[0] + var encoder ValueEncoder + var err error + if data.Elem().IsValid() { + encoder, err = ec.LookupEncoder(data.Elem().Type()) + } else { + encoder, err = ec.LookupEncoder(nil) + } + if err != nil { + return err + } + return encoder.EncodeValue(ec, vw, data.Elem()) +} + +// JavaScriptEncodeValue is the ValueEncoderFunc for the primitive.JavaScript type. +func (DefaultValueEncoders) JavaScriptEncodeValue(ectx EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tJavaScript { + return ValueEncoderError{Name: "JavaScriptEncodeValue", Types: []reflect.Type{tJavaScript}, Received: val} + } + + return vw.WriteJavascript(val.String()) +} + +// SymbolEncodeValue is the ValueEncoderFunc for the primitive.Symbol type. +func (DefaultValueEncoders) SymbolEncodeValue(ectx EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tSymbol { + return ValueEncoderError{Name: "SymbolEncodeValue", Types: []reflect.Type{tSymbol}, Received: val} + } + + return vw.WriteSymbol(val.String()) +} + +// BinaryEncodeValue is the ValueEncoderFunc for Binary. +func (DefaultValueEncoders) BinaryEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tBinary { + return ValueEncoderError{Name: "BinaryEncodeValue", Types: []reflect.Type{tBinary}, Received: val} + } + b := val.Interface().(primitive.Binary) + + return vw.WriteBinaryWithSubtype(b.Data, b.Subtype) +} + +// UndefinedEncodeValue is the ValueEncoderFunc for Undefined. +func (DefaultValueEncoders) UndefinedEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tUndefined { + return ValueEncoderError{Name: "UndefinedEncodeValue", Types: []reflect.Type{tUndefined}, Received: val} + } + + return vw.WriteUndefined() +} + +// DateTimeEncodeValue is the ValueEncoderFunc for DateTime. +func (DefaultValueEncoders) DateTimeEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tDateTime { + return ValueEncoderError{Name: "DateTimeEncodeValue", Types: []reflect.Type{tDateTime}, Received: val} + } + + return vw.WriteDateTime(val.Int()) +} + +// NullEncodeValue is the ValueEncoderFunc for Null. +func (DefaultValueEncoders) NullEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tNull { + return ValueEncoderError{Name: "NullEncodeValue", Types: []reflect.Type{tNull}, Received: val} + } + + return vw.WriteNull() +} + +// RegexEncodeValue is the ValueEncoderFunc for Regex. +func (DefaultValueEncoders) RegexEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tRegex { + return ValueEncoderError{Name: "RegexEncodeValue", Types: []reflect.Type{tRegex}, Received: val} + } + + regex := val.Interface().(primitive.Regex) + + return vw.WriteRegex(regex.Pattern, regex.Options) +} + +// DBPointerEncodeValue is the ValueEncoderFunc for DBPointer. +func (DefaultValueEncoders) DBPointerEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tDBPointer { + return ValueEncoderError{Name: "DBPointerEncodeValue", Types: []reflect.Type{tDBPointer}, Received: val} + } + + dbp := val.Interface().(primitive.DBPointer) + + return vw.WriteDBPointer(dbp.DB, dbp.Pointer) +} + +// TimestampEncodeValue is the ValueEncoderFunc for Timestamp. +func (DefaultValueEncoders) TimestampEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tTimestamp { + return ValueEncoderError{Name: "TimestampEncodeValue", Types: []reflect.Type{tTimestamp}, Received: val} + } + + ts := val.Interface().(primitive.Timestamp) + + return vw.WriteTimestamp(ts.T, ts.I) +} + +// MinKeyEncodeValue is the ValueEncoderFunc for MinKey. +func (DefaultValueEncoders) MinKeyEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tMinKey { + return ValueEncoderError{Name: "MinKeyEncodeValue", Types: []reflect.Type{tMinKey}, Received: val} + } + + return vw.WriteMinKey() +} + +// MaxKeyEncodeValue is the ValueEncoderFunc for MaxKey. +func (DefaultValueEncoders) MaxKeyEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tMaxKey { + return ValueEncoderError{Name: "MaxKeyEncodeValue", Types: []reflect.Type{tMaxKey}, Received: val} + } + + return vw.WriteMaxKey() +} + +// CoreDocumentEncodeValue is the ValueEncoderFunc for bsoncore.Document. +func (DefaultValueEncoders) CoreDocumentEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tCoreDocument { + return ValueEncoderError{Name: "CoreDocumentEncodeValue", Types: []reflect.Type{tCoreDocument}, Received: val} + } + + cdoc := val.Interface().(bsoncore.Document) + + return bsonrw.Copier{}.CopyDocumentFromBytes(vw, cdoc) +} + +// CodeWithScopeEncodeValue is the ValueEncoderFunc for CodeWithScope. +func (dve DefaultValueEncoders) CodeWithScopeEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tCodeWithScope { + return ValueEncoderError{Name: "CodeWithScopeEncodeValue", Types: []reflect.Type{tCodeWithScope}, Received: val} + } + + cws := val.Interface().(primitive.CodeWithScope) + + dw, err := vw.WriteCodeWithScope(string(cws.Code)) + if err != nil { + return err + } + + sw := sliceWriterPool.Get().(*bsonrw.SliceWriter) + defer sliceWriterPool.Put(sw) + *sw = (*sw)[:0] + + scopeVW := bvwPool.Get(sw) + defer bvwPool.Put(scopeVW) + + encoder, err := ec.LookupEncoder(reflect.TypeOf(cws.Scope)) + if err != nil { + return err + } + + err = encoder.EncodeValue(ec, scopeVW, reflect.ValueOf(cws.Scope)) + if err != nil { + return err + } + + err = bsonrw.Copier{}.CopyBytesToDocumentWriter(dw, *sw) + if err != nil { + return err + } + return dw.WriteDocumentEnd() +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders_test.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders_test.go new file mode 100644 index 0000000..bd74c45 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders_test.go @@ -0,0 +1,1436 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "encoding/json" + "errors" + "fmt" + "net/url" + "reflect" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "go.mongodb.org/mongo-driver/bson/bsonrw" + "go.mongodb.org/mongo-driver/bson/bsonrw/bsonrwtest" + "go.mongodb.org/mongo-driver/bson/bsontype" + "go.mongodb.org/mongo-driver/bson/primitive" + "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" + "math" +) + +func TestDefaultValueEncoders(t *testing.T) { + var dve DefaultValueEncoders + var wrong = func(string, string) string { return "wrong" } + + type mybool bool + type myint8 int8 + type myint16 int16 + type myint32 int32 + type myint64 int64 + type myint int + type myuint8 uint8 + type myuint16 uint16 + type myuint32 uint32 + type myuint64 uint64 + type myuint uint + type myfloat32 float32 + type myfloat64 float64 + type mystring string + + now := time.Now().Truncate(time.Millisecond) + pjsnum := new(json.Number) + *pjsnum = json.Number("3.14159") + d128 := primitive.NewDecimal128(12345, 67890) + + type subtest struct { + name string + val interface{} + ectx *EncodeContext + llvrw *bsonrwtest.ValueReaderWriter + invoke bsonrwtest.Invoked + err error + } + + testCases := []struct { + name string + ve ValueEncoder + subtests []subtest + }{ + { + "BooleanEncodeValue", + ValueEncoderFunc(dve.BooleanEncodeValue), + []subtest{ + { + "wrong type", + wrong, + nil, + nil, + bsonrwtest.Nothing, + ValueEncoderError{Name: "BooleanEncodeValue", Kinds: []reflect.Kind{reflect.Bool}, Received: reflect.ValueOf(wrong)}, + }, + {"fast path", bool(true), nil, nil, bsonrwtest.WriteBoolean, nil}, + {"reflection path", mybool(true), nil, nil, bsonrwtest.WriteBoolean, nil}, + }, + }, + { + "IntEncodeValue", + ValueEncoderFunc(dve.IntEncodeValue), + []subtest{ + { + "wrong type", + wrong, + nil, + nil, + bsonrwtest.Nothing, + ValueEncoderError{ + Name: "IntEncodeValue", + Kinds: []reflect.Kind{reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int}, + Received: reflect.ValueOf(wrong), + }, + }, + {"int8/fast path", int8(127), nil, nil, bsonrwtest.WriteInt32, nil}, + {"int16/fast path", int16(32767), nil, nil, bsonrwtest.WriteInt32, nil}, + {"int32/fast path", int32(2147483647), nil, nil, bsonrwtest.WriteInt32, nil}, + {"int64/fast path", int64(1234567890987), nil, nil, bsonrwtest.WriteInt64, nil}, + {"int64/fast path - minsize", int64(math.MaxInt32), &EncodeContext{MinSize: true}, nil, bsonrwtest.WriteInt32, nil}, + {"int64/fast path - minsize too large", int64(math.MaxInt32 + 1), &EncodeContext{MinSize: true}, nil, bsonrwtest.WriteInt64, nil}, + {"int64/fast path - minsize too small", int64(math.MinInt32 - 1), &EncodeContext{MinSize: true}, nil, bsonrwtest.WriteInt64, nil}, + {"int/fast path - positive int32", int(math.MaxInt32 - 1), nil, nil, bsonrwtest.WriteInt32, nil}, + {"int/fast path - negative int32", int(math.MinInt32 + 1), nil, nil, bsonrwtest.WriteInt32, nil}, + {"int/fast path - MaxInt32", int(math.MaxInt32), nil, nil, bsonrwtest.WriteInt32, nil}, + {"int/fast path - MinInt32", int(math.MinInt32), nil, nil, bsonrwtest.WriteInt32, nil}, + {"int/fast path - larger than MaxInt32", int(math.MaxInt32 + 1), nil, nil, bsonrwtest.WriteInt64, nil}, + {"int/fast path - smaller than MinInt32", int(math.MinInt32 - 1), nil, nil, bsonrwtest.WriteInt64, nil}, + {"int8/reflection path", myint8(127), nil, nil, bsonrwtest.WriteInt32, nil}, + {"int16/reflection path", myint16(32767), nil, nil, bsonrwtest.WriteInt32, nil}, + {"int32/reflection path", myint32(2147483647), nil, nil, bsonrwtest.WriteInt32, nil}, + {"int64/reflection path", myint64(1234567890987), nil, nil, bsonrwtest.WriteInt64, nil}, + {"int64/reflection path - minsize", myint64(math.MaxInt32), &EncodeContext{MinSize: true}, nil, bsonrwtest.WriteInt32, nil}, + {"int64/reflection path - minsize too large", myint64(math.MaxInt32 + 1), &EncodeContext{MinSize: true}, nil, bsonrwtest.WriteInt64, nil}, + {"int64/reflection path - minsize too small", myint64(math.MinInt32 - 1), &EncodeContext{MinSize: true}, nil, bsonrwtest.WriteInt64, nil}, + {"int/reflection path - positive int32", myint(math.MaxInt32 - 1), nil, nil, bsonrwtest.WriteInt32, nil}, + {"int/reflection path - negative int32", myint(math.MinInt32 + 1), nil, nil, bsonrwtest.WriteInt32, nil}, + {"int/reflection path - MaxInt32", myint(math.MaxInt32), nil, nil, bsonrwtest.WriteInt32, nil}, + {"int/reflection path - MinInt32", myint(math.MinInt32), nil, nil, bsonrwtest.WriteInt32, nil}, + {"int/reflection path - larger than MaxInt32", myint(math.MaxInt32 + 1), nil, nil, bsonrwtest.WriteInt64, nil}, + {"int/reflection path - smaller than MinInt32", myint(math.MinInt32 - 1), nil, nil, bsonrwtest.WriteInt64, nil}, + }, + }, + { + "UintEncodeValue", + ValueEncoderFunc(dve.UintEncodeValue), + []subtest{ + { + "wrong type", + wrong, + nil, + nil, + bsonrwtest.Nothing, + ValueEncoderError{ + Name: "UintEncodeValue", + Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint}, + Received: reflect.ValueOf(wrong), + }, + }, + {"uint8/fast path", uint8(127), nil, nil, bsonrwtest.WriteInt32, nil}, + {"uint16/fast path", uint16(32767), nil, nil, bsonrwtest.WriteInt32, nil}, + {"uint32/fast path", uint32(2147483647), nil, nil, bsonrwtest.WriteInt64, nil}, + {"uint64/fast path", uint64(1234567890987), nil, nil, bsonrwtest.WriteInt64, nil}, + {"uint/fast path", uint(1234567), nil, nil, bsonrwtest.WriteInt64, nil}, + {"uint32/fast path - minsize", uint32(2147483647), &EncodeContext{MinSize: true}, nil, bsonrwtest.WriteInt32, nil}, + {"uint64/fast path - minsize", uint64(2147483647), &EncodeContext{MinSize: true}, nil, bsonrwtest.WriteInt32, nil}, + {"uint/fast path - minsize", uint(2147483647), &EncodeContext{MinSize: true}, nil, bsonrwtest.WriteInt32, nil}, + {"uint32/fast path - minsize too large", uint32(2147483648), &EncodeContext{MinSize: true}, nil, bsonrwtest.WriteInt64, nil}, + {"uint64/fast path - minsize too large", uint64(2147483648), &EncodeContext{MinSize: true}, nil, bsonrwtest.WriteInt64, nil}, + {"uint/fast path - minsize too large", uint(2147483648), &EncodeContext{MinSize: true}, nil, bsonrwtest.WriteInt64, nil}, + {"uint64/fast path - overflow", uint64(1 << 63), nil, nil, bsonrwtest.Nothing, fmt.Errorf("%d overflows int64", uint(1<<63))}, + {"uint/fast path - overflow", uint(1 << 63), nil, nil, bsonrwtest.Nothing, fmt.Errorf("%d overflows int64", uint(1<<63))}, + {"uint8/reflection path", myuint8(127), nil, nil, bsonrwtest.WriteInt32, nil}, + {"uint16/reflection path", myuint16(32767), nil, nil, bsonrwtest.WriteInt32, nil}, + {"uint32/reflection path", myuint32(2147483647), nil, nil, bsonrwtest.WriteInt64, nil}, + {"uint64/reflection path", myuint64(1234567890987), nil, nil, bsonrwtest.WriteInt64, nil}, + {"uint/reflection path", myuint(1234567890987), nil, nil, bsonrwtest.WriteInt64, nil}, + {"uint32/reflection path - minsize", myuint32(2147483647), &EncodeContext{MinSize: true}, nil, bsonrwtest.WriteInt32, nil}, + {"uint64/reflection path - minsize", myuint64(2147483647), &EncodeContext{MinSize: true}, nil, bsonrwtest.WriteInt32, nil}, + {"uint/reflection path - minsize", myuint(2147483647), &EncodeContext{MinSize: true}, nil, bsonrwtest.WriteInt32, nil}, + {"uint32/reflection path - minsize too large", myuint(1 << 31), &EncodeContext{MinSize: true}, nil, bsonrwtest.WriteInt64, nil}, + {"uint64/reflection path - minsize too large", myuint64(1 << 31), &EncodeContext{MinSize: true}, nil, bsonrwtest.WriteInt64, nil}, + {"uint/reflection path - minsize too large", myuint(2147483648), &EncodeContext{MinSize: true}, nil, bsonrwtest.WriteInt64, nil}, + {"uint64/reflection path - overflow", myuint64(1 << 63), nil, nil, bsonrwtest.Nothing, fmt.Errorf("%d overflows int64", uint(1<<63))}, + {"uint/reflection path - overflow", myuint(1 << 63), nil, nil, bsonrwtest.Nothing, fmt.Errorf("%d overflows int64", uint(1<<63))}, + }, + }, + { + "FloatEncodeValue", + ValueEncoderFunc(dve.FloatEncodeValue), + []subtest{ + { + "wrong type", + wrong, + nil, + nil, + bsonrwtest.Nothing, + ValueEncoderError{ + Name: "FloatEncodeValue", + Kinds: []reflect.Kind{reflect.Float32, reflect.Float64}, + Received: reflect.ValueOf(wrong), + }, + }, + {"float32/fast path", float32(3.14159), nil, nil, bsonrwtest.WriteDouble, nil}, + {"float64/fast path", float64(3.14159), nil, nil, bsonrwtest.WriteDouble, nil}, + {"float32/reflection path", myfloat32(3.14159), nil, nil, bsonrwtest.WriteDouble, nil}, + {"float64/reflection path", myfloat64(3.14159), nil, nil, bsonrwtest.WriteDouble, nil}, + }, + }, + { + "TimeEncodeValue", + ValueEncoderFunc(dve.TimeEncodeValue), + []subtest{ + { + "wrong type", + wrong, + nil, + nil, + bsonrwtest.Nothing, + ValueEncoderError{Name: "TimeEncodeValue", Types: []reflect.Type{tTime}, Received: reflect.ValueOf(wrong)}, + }, + {"time.Time", now, nil, nil, bsonrwtest.WriteDateTime, nil}, + }, + }, + { + "MapEncodeValue", + ValueEncoderFunc(dve.MapEncodeValue), + []subtest{ + { + "wrong kind", + wrong, + nil, + nil, + bsonrwtest.Nothing, + ValueEncoderError{Name: "MapEncodeValue", Kinds: []reflect.Kind{reflect.Map}, Received: reflect.ValueOf(wrong)}, + }, + { + "wrong kind (non-string key)", + map[int]interface{}{}, + nil, + nil, + bsonrwtest.Nothing, + ValueEncoderError{ + Name: "MapEncodeValue", + Kinds: []reflect.Kind{reflect.Map}, + Received: reflect.ValueOf(map[int]interface{}{}), + }, + }, + { + "WriteDocument Error", + map[string]interface{}{}, + nil, + &bsonrwtest.ValueReaderWriter{Err: errors.New("wd error"), ErrAfter: bsonrwtest.WriteDocument}, + bsonrwtest.WriteDocument, + errors.New("wd error"), + }, + { + "Lookup Error", + map[string]interface{}{}, + &EncodeContext{Registry: NewRegistryBuilder().Build()}, + &bsonrwtest.ValueReaderWriter{}, + bsonrwtest.WriteDocument, + ErrNoEncoder{Type: reflect.TypeOf((*interface{})(nil)).Elem()}, + }, + { + "WriteDocumentElement Error", + map[string]interface{}{"foo": "bar"}, + &EncodeContext{Registry: buildDefaultRegistry()}, + &bsonrwtest.ValueReaderWriter{Err: errors.New("wde error"), ErrAfter: bsonrwtest.WriteDocumentElement}, + bsonrwtest.WriteDocumentElement, + errors.New("wde error"), + }, + { + "EncodeValue Error", + map[string]interface{}{"foo": "bar"}, + &EncodeContext{Registry: buildDefaultRegistry()}, + &bsonrwtest.ValueReaderWriter{Err: errors.New("ev error"), ErrAfter: bsonrwtest.WriteString}, + bsonrwtest.WriteString, + errors.New("ev error"), + }, + }, + }, + { + "ArrayEncodeValue", + ValueEncoderFunc(dve.ArrayEncodeValue), + []subtest{ + { + "wrong kind", + wrong, + nil, + nil, + bsonrwtest.Nothing, + ValueEncoderError{Name: "ArrayEncodeValue", Kinds: []reflect.Kind{reflect.Array}, Received: reflect.ValueOf(wrong)}, + }, + { + "WriteArray Error", + [1]string{}, + nil, + &bsonrwtest.ValueReaderWriter{Err: errors.New("wa error"), ErrAfter: bsonrwtest.WriteArray}, + bsonrwtest.WriteArray, + errors.New("wa error"), + }, + { + "Lookup Error", + [1]interface{}{}, + &EncodeContext{Registry: NewRegistryBuilder().Build()}, + &bsonrwtest.ValueReaderWriter{}, + bsonrwtest.WriteArray, + ErrNoEncoder{Type: reflect.TypeOf((*interface{})(nil)).Elem()}, + }, + { + "WriteArrayElement Error", + [1]string{"foo"}, + &EncodeContext{Registry: buildDefaultRegistry()}, + &bsonrwtest.ValueReaderWriter{Err: errors.New("wae error"), ErrAfter: bsonrwtest.WriteArrayElement}, + bsonrwtest.WriteArrayElement, + errors.New("wae error"), + }, + { + "EncodeValue Error", + [1]string{"foo"}, + &EncodeContext{Registry: buildDefaultRegistry()}, + &bsonrwtest.ValueReaderWriter{Err: errors.New("ev error"), ErrAfter: bsonrwtest.WriteString}, + bsonrwtest.WriteString, + errors.New("ev error"), + }, + { + "[1]primitive.E/success", + [1]primitive.E{{"hello", "world"}}, + &EncodeContext{Registry: buildDefaultRegistry()}, + nil, + bsonrwtest.WriteDocumentEnd, + nil, + }, + { + "[1]primitive.E/success", + [1]primitive.E{{"hello", nil}}, + &EncodeContext{Registry: buildDefaultRegistry()}, + nil, + bsonrwtest.WriteDocumentEnd, + nil, + }, + }, + }, + { + "SliceEncodeValue", + ValueEncoderFunc(dve.SliceEncodeValue), + []subtest{ + { + "wrong kind", + wrong, + nil, + nil, + bsonrwtest.Nothing, + ValueEncoderError{Name: "SliceEncodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: reflect.ValueOf(wrong)}, + }, + { + "WriteArray Error", + []string{}, + nil, + &bsonrwtest.ValueReaderWriter{Err: errors.New("wa error"), ErrAfter: bsonrwtest.WriteArray}, + bsonrwtest.WriteArray, + errors.New("wa error"), + }, + { + "Lookup Error", + []interface{}{}, + &EncodeContext{Registry: NewRegistryBuilder().Build()}, + &bsonrwtest.ValueReaderWriter{}, + bsonrwtest.WriteArray, + ErrNoEncoder{Type: reflect.TypeOf((*interface{})(nil)).Elem()}, + }, + { + "WriteArrayElement Error", + []string{"foo"}, + &EncodeContext{Registry: buildDefaultRegistry()}, + &bsonrwtest.ValueReaderWriter{Err: errors.New("wae error"), ErrAfter: bsonrwtest.WriteArrayElement}, + bsonrwtest.WriteArrayElement, + errors.New("wae error"), + }, + { + "EncodeValue Error", + []string{"foo"}, + &EncodeContext{Registry: buildDefaultRegistry()}, + &bsonrwtest.ValueReaderWriter{Err: errors.New("ev error"), ErrAfter: bsonrwtest.WriteString}, + bsonrwtest.WriteString, + errors.New("ev error"), + }, + { + "D/success", + primitive.D{{"hello", "world"}}, + &EncodeContext{Registry: buildDefaultRegistry()}, + nil, + bsonrwtest.WriteDocumentEnd, + nil, + }, + { + "D/success", + primitive.D{{"hello", nil}}, + &EncodeContext{Registry: buildDefaultRegistry()}, + nil, + bsonrwtest.WriteDocumentEnd, + nil, + }, + }, + }, + { + "ObjectIDEncodeValue", + ValueEncoderFunc(dve.ObjectIDEncodeValue), + []subtest{ + { + "wrong type", + wrong, + nil, + nil, + bsonrwtest.Nothing, + ValueEncoderError{Name: "ObjectIDEncodeValue", Types: []reflect.Type{tOID}, Received: reflect.ValueOf(wrong)}, + }, + { + "primitive.ObjectID/success", + primitive.ObjectID{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C}, + nil, nil, bsonrwtest.WriteObjectID, nil, + }, + }, + }, + { + "Decimal128EncodeValue", + ValueEncoderFunc(dve.Decimal128EncodeValue), + []subtest{ + { + "wrong type", + wrong, + nil, + nil, + bsonrwtest.Nothing, + ValueEncoderError{Name: "Decimal128EncodeValue", Types: []reflect.Type{tDecimal}, Received: reflect.ValueOf(wrong)}, + }, + {"Decimal128/success", d128, nil, nil, bsonrwtest.WriteDecimal128, nil}, + }, + }, + { + "JSONNumberEncodeValue", + ValueEncoderFunc(dve.JSONNumberEncodeValue), + []subtest{ + { + "wrong type", + wrong, + nil, + nil, + bsonrwtest.Nothing, + ValueEncoderError{Name: "JSONNumberEncodeValue", Types: []reflect.Type{tJSONNumber}, Received: reflect.ValueOf(wrong)}, + }, + { + "json.Number/invalid", + json.Number("hello world"), + nil, nil, bsonrwtest.Nothing, errors.New(`strconv.ParseFloat: parsing "hello world": invalid syntax`), + }, + { + "json.Number/int64/success", + json.Number("1234567890"), + nil, nil, bsonrwtest.WriteInt64, nil, + }, + { + "json.Number/float64/success", + json.Number("3.14159"), + nil, nil, bsonrwtest.WriteDouble, nil, + }, + }, + }, + { + "URLEncodeValue", + ValueEncoderFunc(dve.URLEncodeValue), + []subtest{ + { + "wrong type", + wrong, + nil, + nil, + bsonrwtest.Nothing, + ValueEncoderError{Name: "URLEncodeValue", Types: []reflect.Type{tURL}, Received: reflect.ValueOf(wrong)}, + }, + {"url.URL", url.URL{Scheme: "http", Host: "example.com"}, nil, nil, bsonrwtest.WriteString, nil}, + }, + }, + { + "ByteSliceEncodeValue", + ValueEncoderFunc(dve.ByteSliceEncodeValue), + []subtest{ + { + "wrong type", + wrong, + nil, + nil, + bsonrwtest.Nothing, + ValueEncoderError{Name: "ByteSliceEncodeValue", Types: []reflect.Type{tByteSlice}, Received: reflect.ValueOf(wrong)}, + }, + {"[]byte", []byte{0x01, 0x02, 0x03}, nil, nil, bsonrwtest.WriteBinary, nil}, + {"[]byte/nil", []byte(nil), nil, nil, bsonrwtest.WriteNull, nil}, + }, + }, + { + "EmptyInterfaceEncodeValue", + ValueEncoderFunc(dve.EmptyInterfaceEncodeValue), + []subtest{ + { + "wrong type", + wrong, + nil, + nil, + bsonrwtest.Nothing, + ValueEncoderError{Name: "EmptyInterfaceEncodeValue", Types: []reflect.Type{tEmpty}, Received: reflect.ValueOf(wrong)}, + }, + }, + }, + { + "ValueMarshalerEncodeValue", + ValueEncoderFunc(dve.ValueMarshalerEncodeValue), + []subtest{ + { + "wrong type", + wrong, + nil, + nil, + bsonrwtest.Nothing, + ValueEncoderError{ + Name: "ValueMarshalerEncodeValue", + Types: []reflect.Type{tValueMarshaler}, + Received: reflect.ValueOf(wrong), + }, + }, + { + "MarshalBSONValue error", + testValueMarshaler{err: errors.New("mbsonv error")}, + nil, + nil, + bsonrwtest.Nothing, + errors.New("mbsonv error"), + }, + { + "Copy error", + testValueMarshaler{}, + nil, + nil, + bsonrwtest.Nothing, + fmt.Errorf("Cannot copy unknown BSON type %s", bsontype.Type(0)), + }, + { + "success", + testValueMarshaler{t: bsontype.String, buf: []byte{0x04, 0x00, 0x00, 0x00, 'f', 'o', 'o', 0x00}}, + nil, + nil, + bsonrwtest.WriteString, + nil, + }, + }, + }, + { + "MarshalerEncodeValue", + ValueEncoderFunc(dve.MarshalerEncodeValue), + []subtest{ + { + "wrong type", + wrong, + nil, + nil, + bsonrwtest.Nothing, + ValueEncoderError{Name: "MarshalerEncodeValue", Types: []reflect.Type{tMarshaler}, Received: reflect.ValueOf(wrong)}, + }, + { + "MarshalBSON error", + testMarshaler{err: errors.New("mbson error")}, + nil, + nil, + bsonrwtest.Nothing, + errors.New("mbson error"), + }, + { + "success", + testMarshaler{buf: bsoncore.BuildDocument(nil, bsoncore.AppendDoubleElement(nil, "pi", 3.14159))}, + nil, + nil, + bsonrwtest.WriteDocumentEnd, + nil, + }, + }, + }, + { + "ProxyEncodeValue", + ValueEncoderFunc(dve.ProxyEncodeValue), + []subtest{ + { + "wrong type", + wrong, + nil, + nil, + bsonrwtest.Nothing, + ValueEncoderError{Name: "ProxyEncodeValue", Types: []reflect.Type{tProxy}, Received: reflect.ValueOf(wrong)}, + }, + { + "Proxy error", + testProxy{err: errors.New("proxy error")}, + nil, + nil, + bsonrwtest.Nothing, + errors.New("proxy error"), + }, + { + "Lookup error", + testProxy{ret: nil}, + &EncodeContext{Registry: buildDefaultRegistry()}, + nil, + bsonrwtest.Nothing, + ErrNoEncoder{Type: nil}, + }, + { + "success", + testProxy{ret: int64(1234567890)}, + &EncodeContext{Registry: buildDefaultRegistry()}, + nil, + bsonrwtest.WriteInt64, + nil, + }, + }, + }, + { + "PointerCodec.EncodeValue", + NewPointerCodec(), + []subtest{ + { + "nil", + nil, + nil, + nil, + bsonrwtest.WriteNull, + nil, + }, + { + "not pointer", + int32(123456), + nil, + nil, + bsonrwtest.Nothing, + ValueEncoderError{Name: "PointerCodec.EncodeValue", Kinds: []reflect.Kind{reflect.Ptr}, Received: reflect.ValueOf(int32(123456))}, + }, + { + "typed nil", + (*int32)(nil), + nil, + nil, + bsonrwtest.WriteNull, + nil, + }, + { + "no encoder", + &wrong, + &EncodeContext{Registry: buildDefaultRegistry()}, + nil, + bsonrwtest.Nothing, + ErrNoEncoder{Type: reflect.TypeOf(wrong)}, + }, + }, + }, + { + "JavaScriptEncodeValue", + ValueEncoderFunc(dve.JavaScriptEncodeValue), + []subtest{ + { + "wrong type", + wrong, + nil, + nil, + bsonrwtest.Nothing, + ValueEncoderError{Name: "JavaScriptEncodeValue", Types: []reflect.Type{tJavaScript}, Received: reflect.ValueOf(wrong)}, + }, + {"JavaScript", primitive.JavaScript("foobar"), nil, nil, bsonrwtest.WriteJavascript, nil}, + }, + }, + { + "SymbolEncodeValue", + ValueEncoderFunc(dve.SymbolEncodeValue), + []subtest{ + { + "wrong type", + wrong, + nil, + nil, + bsonrwtest.Nothing, + ValueEncoderError{Name: "SymbolEncodeValue", Types: []reflect.Type{tSymbol}, Received: reflect.ValueOf(wrong)}, + }, + {"Symbol", primitive.Symbol("foobar"), nil, nil, bsonrwtest.WriteSymbol, nil}, + }, + }, + { + "BinaryEncodeValue", + ValueEncoderFunc(dve.BinaryEncodeValue), + []subtest{ + { + "wrong type", + wrong, + nil, + nil, + bsonrwtest.Nothing, + ValueEncoderError{Name: "BinaryEncodeValue", Types: []reflect.Type{tBinary}, Received: reflect.ValueOf(wrong)}, + }, + {"Binary/success", primitive.Binary{Data: []byte{0x01, 0x02}, Subtype: 0xFF}, nil, nil, bsonrwtest.WriteBinaryWithSubtype, nil}, + }, + }, + { + "UndefinedEncodeValue", + ValueEncoderFunc(dve.UndefinedEncodeValue), + []subtest{ + { + "wrong type", + wrong, + nil, + nil, + bsonrwtest.Nothing, + ValueEncoderError{Name: "UndefinedEncodeValue", Types: []reflect.Type{tUndefined}, Received: reflect.ValueOf(wrong)}, + }, + {"Undefined/success", primitive.Undefined{}, nil, nil, bsonrwtest.WriteUndefined, nil}, + }, + }, + { + "DateTimeEncodeValue", + ValueEncoderFunc(dve.DateTimeEncodeValue), + []subtest{ + { + "wrong type", + wrong, + nil, + nil, + bsonrwtest.Nothing, + ValueEncoderError{Name: "DateTimeEncodeValue", Types: []reflect.Type{tDateTime}, Received: reflect.ValueOf(wrong)}, + }, + {"DateTime/success", primitive.DateTime(1234567890), nil, nil, bsonrwtest.WriteDateTime, nil}, + }, + }, + { + "NullEncodeValue", + ValueEncoderFunc(dve.NullEncodeValue), + []subtest{ + { + "wrong type", + wrong, + nil, + nil, + bsonrwtest.Nothing, + ValueEncoderError{Name: "NullEncodeValue", Types: []reflect.Type{tNull}, Received: reflect.ValueOf(wrong)}, + }, + {"Null/success", primitive.Null{}, nil, nil, bsonrwtest.WriteNull, nil}, + }, + }, + { + "RegexEncodeValue", + ValueEncoderFunc(dve.RegexEncodeValue), + []subtest{ + { + "wrong type", + wrong, + nil, + nil, + bsonrwtest.Nothing, + ValueEncoderError{Name: "RegexEncodeValue", Types: []reflect.Type{tRegex}, Received: reflect.ValueOf(wrong)}, + }, + {"Regex/success", primitive.Regex{Pattern: "foo", Options: "bar"}, nil, nil, bsonrwtest.WriteRegex, nil}, + }, + }, + { + "DBPointerEncodeValue", + ValueEncoderFunc(dve.DBPointerEncodeValue), + []subtest{ + { + "wrong type", + wrong, + nil, + nil, + bsonrwtest.Nothing, + ValueEncoderError{Name: "DBPointerEncodeValue", Types: []reflect.Type{tDBPointer}, Received: reflect.ValueOf(wrong)}, + }, + { + "DBPointer/success", + primitive.DBPointer{ + DB: "foobar", + Pointer: primitive.ObjectID{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C}, + }, + nil, nil, bsonrwtest.WriteDBPointer, nil, + }, + }, + }, + { + "TimestampEncodeValue", + ValueEncoderFunc(dve.TimestampEncodeValue), + []subtest{ + { + "wrong type", + wrong, + nil, + nil, + bsonrwtest.Nothing, + ValueEncoderError{Name: "TimestampEncodeValue", Types: []reflect.Type{tTimestamp}, Received: reflect.ValueOf(wrong)}, + }, + {"Timestamp/success", primitive.Timestamp{T: 12345, I: 67890}, nil, nil, bsonrwtest.WriteTimestamp, nil}, + }, + }, + { + "MinKeyEncodeValue", + ValueEncoderFunc(dve.MinKeyEncodeValue), + []subtest{ + { + "wrong type", + wrong, + nil, + nil, + bsonrwtest.Nothing, + ValueEncoderError{Name: "MinKeyEncodeValue", Types: []reflect.Type{tMinKey}, Received: reflect.ValueOf(wrong)}, + }, + {"MinKey/success", primitive.MinKey{}, nil, nil, bsonrwtest.WriteMinKey, nil}, + }, + }, + { + "MaxKeyEncodeValue", + ValueEncoderFunc(dve.MaxKeyEncodeValue), + []subtest{ + { + "wrong type", + wrong, + nil, + nil, + bsonrwtest.Nothing, + ValueEncoderError{Name: "MaxKeyEncodeValue", Types: []reflect.Type{tMaxKey}, Received: reflect.ValueOf(wrong)}, + }, + {"MaxKey/success", primitive.MaxKey{}, nil, nil, bsonrwtest.WriteMaxKey, nil}, + }, + }, + { + "CoreDocumentEncodeValue", + ValueEncoderFunc(dve.CoreDocumentEncodeValue), + []subtest{ + { + "wrong type", + wrong, + nil, + nil, + bsonrwtest.Nothing, + ValueEncoderError{ + Name: "CoreDocumentEncodeValue", + Types: []reflect.Type{tCoreDocument}, + Received: reflect.ValueOf(wrong), + }, + }, + { + "WriteDocument Error", + bsoncore.Document{}, + nil, + &bsonrwtest.ValueReaderWriter{Err: errors.New("wd error"), ErrAfter: bsonrwtest.WriteDocument}, + bsonrwtest.WriteDocument, + errors.New("wd error"), + }, + { + "bsoncore.Document.Elements Error", + bsoncore.Document{0xFF, 0x00, 0x00, 0x00, 0x00}, + nil, + &bsonrwtest.ValueReaderWriter{}, + bsonrwtest.WriteDocument, + errors.New("length read exceeds number of bytes available. length=5 bytes=255"), + }, + { + "WriteDocumentElement Error", + bsoncore.Document(buildDocument(bsoncore.AppendNullElement(nil, "foo"))), + nil, + &bsonrwtest.ValueReaderWriter{Err: errors.New("wde error"), ErrAfter: bsonrwtest.WriteDocumentElement}, + bsonrwtest.WriteDocumentElement, + errors.New("wde error"), + }, + { + "encodeValue error", + bsoncore.Document(buildDocument(bsoncore.AppendNullElement(nil, "foo"))), + nil, + &bsonrwtest.ValueReaderWriter{Err: errors.New("ev error"), ErrAfter: bsonrwtest.WriteNull}, + bsonrwtest.WriteNull, + errors.New("ev error"), + }, + { + "iterator error", + bsoncore.Document{0x0C, 0x00, 0x00, 0x00, 0x01, 'f', 'o', 'o', 0x00, 0x01, 0x02, 0x03}, + nil, + &bsonrwtest.ValueReaderWriter{}, + bsonrwtest.WriteDocumentElement, + errors.New("not enough bytes available to read type. bytes=3 type=double"), + }, + }, + }, + { + "CodeWithScopeEncodeValue", + ValueEncoderFunc(dve.CodeWithScopeEncodeValue), + []subtest{ + { + "wrong type", + wrong, + nil, + nil, + bsonrwtest.Nothing, + ValueEncoderError{ + Name: "CodeWithScopeEncodeValue", + Types: []reflect.Type{tCodeWithScope}, + Received: reflect.ValueOf(wrong), + }, + }, + { + "WriteCodeWithScope error", + primitive.CodeWithScope{}, + nil, + &bsonrwtest.ValueReaderWriter{Err: errors.New("wcws error"), ErrAfter: bsonrwtest.WriteCodeWithScope}, + bsonrwtest.WriteCodeWithScope, + errors.New("wcws error"), + }, + { + "CodeWithScope/success", + primitive.CodeWithScope{ + Code: "var hello = 'world';", + Scope: primitive.D{}, + }, + &EncodeContext{Registry: buildDefaultRegistry()}, + nil, bsonrwtest.WriteDocumentEnd, nil, + }, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + for _, subtest := range tc.subtests { + t.Run(subtest.name, func(t *testing.T) { + var ec EncodeContext + if subtest.ectx != nil { + ec = *subtest.ectx + } + llvrw := new(bsonrwtest.ValueReaderWriter) + if subtest.llvrw != nil { + llvrw = subtest.llvrw + } + llvrw.T = t + err := tc.ve.EncodeValue(ec, llvrw, reflect.ValueOf(subtest.val)) + if !compareErrors(err, subtest.err) { + t.Errorf("Errors do not match. got %v; want %v", err, subtest.err) + } + invoked := llvrw.Invoked + if !cmp.Equal(invoked, subtest.invoke) { + t.Errorf("Incorrect method invoked. got %v; want %v", invoked, subtest.invoke) + } + }) + } + }) + } + + t.Run("success path", func(t *testing.T) { + oid := primitive.NewObjectID() + oids := []primitive.ObjectID{primitive.NewObjectID(), primitive.NewObjectID(), primitive.NewObjectID()} + var str = new(string) + *str = "bar" + now := time.Now().Truncate(time.Millisecond) + murl, err := url.Parse("https://mongodb.com/random-url?hello=world") + if err != nil { + t.Errorf("Error parsing URL: %v", err) + t.FailNow() + } + decimal128, err := primitive.ParseDecimal128("1.5e10") + if err != nil { + t.Errorf("Error parsing decimal128: %v", err) + t.FailNow() + } + + testCases := []struct { + name string + value interface{} + b []byte + err error + }{ + { + "map[string]int", + map[string]int32{"foo": 1}, + []byte{ + 0x0E, 0x00, 0x00, 0x00, + 0x10, 'f', 'o', 'o', 0x00, + 0x01, 0x00, 0x00, 0x00, + 0x00, + }, + nil, + }, + { + "map[string]primitive.ObjectID", + map[string]primitive.ObjectID{"foo": oid}, + buildDocument(bsoncore.AppendObjectIDElement(nil, "foo", oid)), + nil, + }, + { + "map[string][]int32", + map[string][]int32{"Z": {1, 2, 3}}, + buildDocumentArray(func(doc []byte) []byte { + doc = bsoncore.AppendInt32Element(doc, "0", 1) + doc = bsoncore.AppendInt32Element(doc, "1", 2) + return bsoncore.AppendInt32Element(doc, "2", 3) + }), + nil, + }, + { + "map[string][]primitive.ObjectID", + map[string][]primitive.ObjectID{"Z": oids}, + buildDocumentArray(func(doc []byte) []byte { + doc = bsoncore.AppendObjectIDElement(doc, "0", oids[0]) + doc = bsoncore.AppendObjectIDElement(doc, "1", oids[1]) + return bsoncore.AppendObjectIDElement(doc, "2", oids[2]) + }), + nil, + }, + { + "map[string][]json.Number(int64)", + map[string][]json.Number{"Z": {json.Number("5"), json.Number("10")}}, + buildDocumentArray(func(doc []byte) []byte { + doc = bsoncore.AppendInt64Element(doc, "0", 5) + return bsoncore.AppendInt64Element(doc, "1", 10) + }), + nil, + }, + { + "map[string][]json.Number(float64)", + map[string][]json.Number{"Z": {json.Number("5"), json.Number("10.1")}}, + buildDocumentArray(func(doc []byte) []byte { + doc = bsoncore.AppendInt64Element(doc, "0", 5) + return bsoncore.AppendDoubleElement(doc, "1", 10.1) + }), + nil, + }, + { + "map[string][]*url.URL", + map[string][]*url.URL{"Z": {murl}}, + buildDocumentArray(func(doc []byte) []byte { + return bsoncore.AppendStringElement(doc, "0", murl.String()) + }), + nil, + }, + { + "map[string][]primitive.Decimal128", + map[string][]primitive.Decimal128{"Z": {decimal128}}, + buildDocumentArray(func(doc []byte) []byte { + return bsoncore.AppendDecimal128Element(doc, "0", decimal128) + }), + nil, + }, + { + "-", + struct { + A string `bson:"-"` + }{ + A: "", + }, + []byte{0x05, 0x00, 0x00, 0x00, 0x00}, + nil, + }, + { + "omitempty", + struct { + A string `bson:",omitempty"` + }{ + A: "", + }, + []byte{0x05, 0x00, 0x00, 0x00, 0x00}, + nil, + }, + { + "omitempty, empty time", + struct { + A time.Time `bson:",omitempty"` + }{ + A: time.Time{}, + }, + []byte{0x05, 0x00, 0x00, 0x00, 0x00}, + nil, + }, + { + "no private fields", + noPrivateFields{a: "should be empty"}, + []byte{0x05, 0x00, 0x00, 0x00, 0x00}, + nil, + }, + { + "minsize", + struct { + A int64 `bson:",minsize"` + }{ + A: 12345, + }, + buildDocument(bsoncore.AppendInt32Element(nil, "a", 12345)), + nil, + }, + { + "inline", + struct { + Foo struct { + A int64 `bson:",minsize"` + } `bson:",inline"` + }{ + Foo: struct { + A int64 `bson:",minsize"` + }{ + A: 12345, + }, + }, + buildDocument(bsoncore.AppendInt32Element(nil, "a", 12345)), + nil, + }, + { + "inline map", + struct { + Foo map[string]string `bson:",inline"` + }{ + Foo: map[string]string{"foo": "bar"}, + }, + buildDocument(bsoncore.AppendStringElement(nil, "foo", "bar")), + nil, + }, + { + "alternate name bson:name", + struct { + A string `bson:"foo"` + }{ + A: "bar", + }, + buildDocument(bsoncore.AppendStringElement(nil, "foo", "bar")), + nil, + }, + { + "alternate name", + struct { + A string `bson:"foo"` + }{ + A: "bar", + }, + buildDocument(bsoncore.AppendStringElement(nil, "foo", "bar")), + nil, + }, + { + "inline, omitempty", + struct { + A string + Foo zeroTest `bson:"omitempty,inline"` + }{ + A: "bar", + Foo: zeroTest{true}, + }, + buildDocument(bsoncore.AppendStringElement(nil, "a", "bar")), + nil, + }, + { + "struct{}", + struct { + A bool + B int32 + C int64 + D uint16 + E uint64 + F float64 + G string + H map[string]string + I []byte + K [2]string + L struct { + M string + } + Q primitive.ObjectID + T []struct{} + Y json.Number + Z time.Time + AA json.Number + AB *url.URL + AC primitive.Decimal128 + AD *time.Time + AE testValueMarshaler + AF Proxy + AG testProxy + AH map[string]interface{} + AI primitive.CodeWithScope + }{ + A: true, + B: 123, + C: 456, + D: 789, + E: 101112, + F: 3.14159, + G: "Hello, world", + H: map[string]string{"foo": "bar"}, + I: []byte{0x01, 0x02, 0x03}, + K: [2]string{"baz", "qux"}, + L: struct { + M string + }{ + M: "foobar", + }, + Q: oid, + T: nil, + Y: json.Number("5"), + Z: now, + AA: json.Number("10.1"), + AB: murl, + AC: decimal128, + AD: &now, + AE: testValueMarshaler{t: bsontype.String, buf: bsoncore.AppendString(nil, "hello, world")}, + AF: testProxy{ret: struct{ Hello string }{Hello: "world!"}}, + AG: testProxy{ret: struct{ Pi float64 }{Pi: 3.14159}}, + AH: nil, + AI: primitive.CodeWithScope{Code: "var hello = 'world';", Scope: primitive.D{{"pi", 3.14159}}}, + }, + buildDocument(func(doc []byte) []byte { + doc = bsoncore.AppendBooleanElement(doc, "a", true) + doc = bsoncore.AppendInt32Element(doc, "b", 123) + doc = bsoncore.AppendInt64Element(doc, "c", 456) + doc = bsoncore.AppendInt32Element(doc, "d", 789) + doc = bsoncore.AppendInt64Element(doc, "e", 101112) + doc = bsoncore.AppendDoubleElement(doc, "f", 3.14159) + doc = bsoncore.AppendStringElement(doc, "g", "Hello, world") + doc = bsoncore.AppendDocumentElement(doc, "h", buildDocument(bsoncore.AppendStringElement(nil, "foo", "bar"))) + doc = bsoncore.AppendBinaryElement(doc, "i", 0x00, []byte{0x01, 0x02, 0x03}) + doc = bsoncore.AppendArrayElement(doc, "k", + buildArray(bsoncore.AppendStringElement(bsoncore.AppendStringElement(nil, "0", "baz"), "1", "qux")), + ) + doc = bsoncore.AppendDocumentElement(doc, "l", buildDocument(bsoncore.AppendStringElement(nil, "m", "foobar"))) + doc = bsoncore.AppendObjectIDElement(doc, "q", oid) + doc = bsoncore.AppendNullElement(doc, "t") + doc = bsoncore.AppendInt64Element(doc, "y", 5) + doc = bsoncore.AppendDateTimeElement(doc, "z", now.UnixNano()/int64(time.Millisecond)) + doc = bsoncore.AppendDoubleElement(doc, "aa", 10.1) + doc = bsoncore.AppendStringElement(doc, "ab", murl.String()) + doc = bsoncore.AppendDecimal128Element(doc, "ac", decimal128) + doc = bsoncore.AppendDateTimeElement(doc, "ad", now.UnixNano()/int64(time.Millisecond)) + doc = bsoncore.AppendStringElement(doc, "ae", "hello, world") + doc = bsoncore.AppendDocumentElement(doc, "af", buildDocument(bsoncore.AppendStringElement(nil, "hello", "world!"))) + doc = bsoncore.AppendDocumentElement(doc, "ag", buildDocument(bsoncore.AppendDoubleElement(nil, "pi", 3.14159))) + doc = bsoncore.AppendNullElement(doc, "ah") + doc = bsoncore.AppendCodeWithScopeElement(doc, "ai", + "var hello = 'world';", buildDocument(bsoncore.AppendDoubleElement(nil, "pi", 3.14159)), + ) + return doc + }(nil)), + nil, + }, + { + "struct{[]interface{}}", + struct { + A []bool + B []int32 + C []int64 + D []uint16 + E []uint64 + F []float64 + G []string + H []map[string]string + I [][]byte + K [1][2]string + L []struct { + M string + } + N [][]string + R []primitive.ObjectID + T []struct{} + W []map[string]struct{} + X []map[string]struct{} + Y []map[string]struct{} + Z []time.Time + AA []json.Number + AB []*url.URL + AC []primitive.Decimal128 + AD []*time.Time + AE []testValueMarshaler + AF []Proxy + AG []testProxy + }{ + A: []bool{true}, + B: []int32{123}, + C: []int64{456}, + D: []uint16{789}, + E: []uint64{101112}, + F: []float64{3.14159}, + G: []string{"Hello, world"}, + H: []map[string]string{{"foo": "bar"}}, + I: [][]byte{{0x01, 0x02, 0x03}}, + K: [1][2]string{{"baz", "qux"}}, + L: []struct { + M string + }{ + { + M: "foobar", + }, + }, + N: [][]string{{"foo", "bar"}}, + R: oids, + T: nil, + W: nil, + X: []map[string]struct{}{}, // Should be empty BSON Array + Y: []map[string]struct{}{{}}, // Should be BSON array with one element, an empty BSON SubDocument + Z: []time.Time{now, now}, + AA: []json.Number{json.Number("5"), json.Number("10.1")}, + AB: []*url.URL{murl}, + AC: []primitive.Decimal128{decimal128}, + AD: []*time.Time{&now, &now}, + AE: []testValueMarshaler{ + {t: bsontype.String, buf: bsoncore.AppendString(nil, "hello")}, + {t: bsontype.String, buf: bsoncore.AppendString(nil, "world")}, + }, + AF: []Proxy{ + testProxy{ret: struct{ Hello string }{Hello: "world!"}}, + testProxy{ret: struct{ Foo string }{Foo: "bar"}}, + }, + AG: []testProxy{ + {ret: struct{ One int64 }{One: 1234567890}}, + {ret: struct{ Pi float64 }{Pi: 3.14159}}, + }, + }, + buildDocument(func(doc []byte) []byte { + doc = appendArrayElement(doc, "a", bsoncore.AppendBooleanElement(nil, "0", true)) + doc = appendArrayElement(doc, "b", bsoncore.AppendInt32Element(nil, "0", 123)) + doc = appendArrayElement(doc, "c", bsoncore.AppendInt64Element(nil, "0", 456)) + doc = appendArrayElement(doc, "d", bsoncore.AppendInt32Element(nil, "0", 789)) + doc = appendArrayElement(doc, "e", bsoncore.AppendInt64Element(nil, "0", 101112)) + doc = appendArrayElement(doc, "f", bsoncore.AppendDoubleElement(nil, "0", 3.14159)) + doc = appendArrayElement(doc, "g", bsoncore.AppendStringElement(nil, "0", "Hello, world")) + doc = appendArrayElement(doc, "h", buildDocumentElement("0", bsoncore.AppendStringElement(nil, "foo", "bar"))) + doc = appendArrayElement(doc, "i", bsoncore.AppendBinaryElement(nil, "0", 0x00, []byte{0x01, 0x02, 0x03})) + doc = appendArrayElement(doc, "k", + buildArrayElement("0", + bsoncore.AppendStringElement(bsoncore.AppendStringElement(nil, "0", "baz"), "1", "qux")), + ) + doc = appendArrayElement(doc, "l", buildDocumentElement("0", bsoncore.AppendStringElement(nil, "m", "foobar"))) + doc = appendArrayElement(doc, "n", + buildArrayElement("0", + bsoncore.AppendStringElement(bsoncore.AppendStringElement(nil, "0", "foo"), "1", "bar")), + ) + doc = appendArrayElement(doc, "r", + bsoncore.AppendObjectIDElement( + bsoncore.AppendObjectIDElement( + bsoncore.AppendObjectIDElement(nil, + "0", oids[0]), + "1", oids[1]), + "2", oids[2]), + ) + doc = bsoncore.AppendNullElement(doc, "t") + doc = bsoncore.AppendNullElement(doc, "w") + doc = appendArrayElement(doc, "x", nil) + doc = appendArrayElement(doc, "y", buildDocumentElement("0", nil)) + doc = appendArrayElement(doc, "z", + bsoncore.AppendDateTimeElement( + bsoncore.AppendDateTimeElement( + nil, "0", now.UnixNano()/int64(time.Millisecond)), + "1", now.UnixNano()/int64(time.Millisecond)), + ) + doc = appendArrayElement(doc, "aa", bsoncore.AppendDoubleElement(bsoncore.AppendInt64Element(nil, "0", 5), "1", 10.10)) + doc = appendArrayElement(doc, "ab", bsoncore.AppendStringElement(nil, "0", murl.String())) + doc = appendArrayElement(doc, "ac", bsoncore.AppendDecimal128Element(nil, "0", decimal128)) + doc = appendArrayElement(doc, "ad", + bsoncore.AppendDateTimeElement( + bsoncore.AppendDateTimeElement(nil, "0", now.UnixNano()/int64(time.Millisecond)), + "1", now.UnixNano()/int64(time.Millisecond)), + ) + doc = appendArrayElement(doc, "ae", + bsoncore.AppendStringElement(bsoncore.AppendStringElement(nil, "0", "hello"), "1", "world"), + ) + doc = appendArrayElement(doc, "af", + bsoncore.AppendDocumentElement( + bsoncore.AppendDocumentElement(nil, "0", + bsoncore.BuildDocument(nil, bsoncore.AppendStringElement(nil, "hello", "world!")), + ), "1", + bsoncore.BuildDocument(nil, bsoncore.AppendStringElement(nil, "foo", "bar")), + ), + ) + doc = appendArrayElement(doc, "ag", + bsoncore.AppendDocumentElement( + bsoncore.AppendDocumentElement(nil, "0", + bsoncore.BuildDocument(nil, bsoncore.AppendInt64Element(nil, "one", 1234567890)), + ), "1", + bsoncore.BuildDocument(nil, bsoncore.AppendDoubleElement(nil, "pi", 3.14159)), + ), + ) + return doc + }(nil)), + nil, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + b := make(bsonrw.SliceWriter, 0, 512) + vw, err := bsonrw.NewBSONValueWriter(&b) + noerr(t, err) + reg := buildDefaultRegistry() + enc, err := reg.LookupEncoder(reflect.TypeOf(tc.value)) + noerr(t, err) + err = enc.EncodeValue(EncodeContext{Registry: reg}, vw, reflect.ValueOf(tc.value)) + if err != tc.err { + t.Errorf("Did not receive expected error. got %v; want %v", err, tc.err) + } + if diff := cmp.Diff([]byte(b), tc.b); diff != "" { + t.Errorf("Bytes written differ: (-got +want)\n%s", diff) + t.Errorf("Bytes\ngot: %v\nwant:%v\n", b, tc.b) + t.Errorf("Readers\ngot: %v\nwant:%v\n", bsoncore.Document(b), bsoncore.Document(tc.b)) + } + }) + } + }) + + t.Run("EmptyInterfaceEncodeValue/nil", func(t *testing.T) { + val := reflect.New(tEmpty).Elem() + llvrw := new(bsonrwtest.ValueReaderWriter) + err := dve.EmptyInterfaceEncodeValue(EncodeContext{Registry: NewRegistryBuilder().Build()}, llvrw, val) + noerr(t, err) + if llvrw.Invoked != bsonrwtest.WriteNull { + t.Errorf("Incorrect method called. got %v; want %v", llvrw.Invoked, bsonrwtest.WriteNull) + } + }) + + t.Run("EmptyInterfaceEncodeValue/LookupEncoder error", func(t *testing.T) { + val := reflect.New(tEmpty).Elem() + val.Set(reflect.ValueOf(int64(1234567890))) + llvrw := new(bsonrwtest.ValueReaderWriter) + got := dve.EmptyInterfaceEncodeValue(EncodeContext{Registry: NewRegistryBuilder().Build()}, llvrw, val) + want := ErrNoEncoder{Type: tInt64} + if !compareErrors(got, want) { + t.Errorf("Did not recieve expected error. got %v; want %v", got, want) + } + }) +} + +type testValueMarshaler struct { + t bsontype.Type + buf []byte + err error +} + +func (tvm testValueMarshaler) MarshalBSONValue() (bsontype.Type, []byte, error) { + return tvm.t, tvm.buf, tvm.err +} + +type testMarshaler struct { + buf []byte + err error +} + +func (tvm testMarshaler) MarshalBSON() ([]byte, error) { + return tvm.buf, tvm.err +} + +type testProxy struct { + ret interface{} + err error +} + +func (tp testProxy) ProxyBSON() (interface{}, error) { return tp.ret, tp.err } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go new file mode 100644 index 0000000..978511c --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go @@ -0,0 +1,61 @@ +// Package bsoncodec provides a system for encoding values to BSON representations and decoding +// values from BSON representations. This package considers both binary BSON and ExtendedJSON as +// BSON representations. The types in this package enable a flexible system for handling this +// encoding and decoding. +// +// The codec system is composed of two parts: +// +// 1) ValueEncoders and ValueDecoders that handle encoding and decoding Go values to and from BSON +// representations. +// +// 2) A Registry that holds these ValueEncoders and ValueDecoders and provides methods for +// retrieving them. +// +// ValueEncoders and ValueDecoders +// +// The ValueEncoder interface is implemented by types that can encode a provided Go type to BSON. +// The value to encode is provided as a reflect.Value and a bsonrw.ValueWriter is used within the +// EncodeValue method to actually create the BSON representation. For convenience, ValueEncoderFunc +// is provided to allow use of a function with the correct signature as a ValueEncoder. An +// EncodeContext instance is provided to allow implementations to lookup further ValueEncoders and +// to provide configuration information. +// +// The ValueDecoder interface is the inverse of the ValueEncoder. Implementations should ensure that +// the value they receive is settable. Similar to ValueEncoderFunc, ValueDecoderFunc is provided to +// allow the use of a function with the correct signature as a ValueDecoder. A DecodeContext +// instance is provided and serves similar functionality to the EncodeContext. +// +// Registry and RegistryBuilder +// +// A Registry is an immutable store for ValueEncoders, ValueDecoders, and a type map. For looking up +// ValueEncoders and Decoders the Registry first attempts to find a ValueEncoder or ValueDecoder for +// the type provided; if one cannot be found it then checks to see if a registered ValueEncoder or +// ValueDecoder exists for an interface the type implements. Finally, the reflect.Kind of the type +// is used to lookup a default ValueEncoder or ValueDecoder for that kind. If no ValueEncoder or +// ValueDecoder can be found, an error is returned. +// +// The Registry also holds a type map. This allows users to retrieve the Go type that should be used +// when decoding a BSON value into an empty interface. This is primarily only used for the empty +// interface ValueDecoder. +// +// A RegistryBuilder is used to construct a Registry. The Register methods are used to associate +// either a reflect.Type or a reflect.Kind with a ValueEncoder or ValueDecoder. A RegistryBuilder +// returned from NewRegistryBuilder contains no registered ValueEncoders nor ValueDecoders and +// contains an empty type map. +// +// The RegisterTypeMapEntry method handles associating a BSON type with a Go type. For example, if +// you want to decode BSON int64 and int32 values into Go int instances, you would do the following: +// +// var regbuilder *RegistryBuilder = ... intType := reflect.TypeOf(int(0)) +// regbuilder.RegisterTypeMapEntry(bsontype.Int64, intType).RegisterTypeMapEntry(bsontype.Int32, +// intType) +// +// DefaultValueEncoders and DefaultValueDecoders +// +// The DefaultValueEncoders and DefaultValueDecoders types provide a full set of ValueEncoders and +// ValueDecoders for handling a wide range of Go types, including all of the types within the +// primitive package. To make registering these codecs easier, a helper method on each type is +// provided. For the DefaultValueEncoders type the method is called RegisterDefaultEncoders and for +// the DefaultValueDecoders type the method is called RegisterDefaultDecoders, this method also +// handles registering type map entries for each BSON type. +package bsoncodec diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/mode.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/mode.go new file mode 100644 index 0000000..fbd9f0a --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/mode.go @@ -0,0 +1,65 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import "fmt" + +type mode int + +const ( + _ mode = iota + mTopLevel + mDocument + mArray + mValue + mElement + mCodeWithScope + mSpacer +) + +func (m mode) String() string { + var str string + + switch m { + case mTopLevel: + str = "TopLevel" + case mDocument: + str = "DocumentMode" + case mArray: + str = "ArrayMode" + case mValue: + str = "ValueMode" + case mElement: + str = "ElementMode" + case mCodeWithScope: + str = "CodeWithScopeMode" + case mSpacer: + str = "CodeWithScopeSpacerFrame" + default: + str = "UnknownMode" + } + + return str +} + +// TransitionError is an error returned when an invalid progressing a +// ValueReader or ValueWriter state machine occurs. +type TransitionError struct { + parent mode + current mode + destination mode +} + +func (te TransitionError) Error() string { + if te.destination == mode(0) { + return fmt.Sprintf("invalid state transition: cannot read/write value while in %s", te.current) + } + if te.parent == mode(0) { + return fmt.Sprintf("invalid state transition: %s -> %s", te.current, te.destination) + } + return fmt.Sprintf("invalid state transition: %s -> %s; parent %s", te.current, te.destination, te.parent) +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go new file mode 100644 index 0000000..0d9502f --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go @@ -0,0 +1,110 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "reflect" + "sync" + + "go.mongodb.org/mongo-driver/bson/bsonrw" + "go.mongodb.org/mongo-driver/bson/bsontype" +) + +var defaultPointerCodec = &PointerCodec{ + ecache: make(map[reflect.Type]ValueEncoder), + dcache: make(map[reflect.Type]ValueDecoder), +} + +var _ ValueEncoder = &PointerCodec{} +var _ ValueDecoder = &PointerCodec{} + +// PointerCodec is the Codec used for pointers. +type PointerCodec struct { + ecache map[reflect.Type]ValueEncoder + dcache map[reflect.Type]ValueDecoder + l sync.RWMutex +} + +// NewPointerCodec returns a PointerCodec that has been initialized. +func NewPointerCodec() *PointerCodec { + return &PointerCodec{ + ecache: make(map[reflect.Type]ValueEncoder), + dcache: make(map[reflect.Type]ValueDecoder), + } +} + +// EncodeValue handles encoding a pointer by either encoding it to BSON Null if the pointer is nil +// or looking up an encoder for the type of value the pointer points to. +func (pc *PointerCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if val.Kind() != reflect.Ptr { + if !val.IsValid() { + return vw.WriteNull() + } + return ValueEncoderError{Name: "PointerCodec.EncodeValue", Kinds: []reflect.Kind{reflect.Ptr}, Received: val} + } + + if val.IsNil() { + return vw.WriteNull() + } + + pc.l.RLock() + enc, ok := pc.ecache[val.Type()] + pc.l.RUnlock() + if ok { + if enc == nil { + return ErrNoEncoder{Type: val.Type()} + } + return enc.EncodeValue(ec, vw, val.Elem()) + } + + enc, err := ec.LookupEncoder(val.Type().Elem()) + pc.l.Lock() + pc.ecache[val.Type()] = enc + pc.l.Unlock() + if err != nil { + return err + } + + return enc.EncodeValue(ec, vw, val.Elem()) +} + +// DecodeValue handles decoding a pointer by looking up a decoder for the type it points to and +// using that to decode. If the BSON value is Null, this method will set the pointer to nil. +func (pc *PointerCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Kind() != reflect.Ptr { + return ValueDecoderError{Name: "PointerCodec.DecodeValue", Kinds: []reflect.Kind{reflect.Ptr}, Received: val} + } + + if vr.Type() == bsontype.Null { + val.Set(reflect.Zero(val.Type())) + return vr.ReadNull() + } + + if val.IsNil() { + val.Set(reflect.New(val.Type().Elem())) + } + + pc.l.RLock() + dec, ok := pc.dcache[val.Type()] + pc.l.RUnlock() + if ok { + if dec == nil { + return ErrNoDecoder{Type: val.Type()} + } + return dec.DecodeValue(dc, vr, val.Elem()) + } + + dec, err := dc.LookupDecoder(val.Type().Elem()) + pc.l.Lock() + pc.dcache[val.Type()] = dec + pc.l.Unlock() + if err != nil { + return err + } + + return dec.DecodeValue(dc, vr, val.Elem()) +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/proxy.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/proxy.go new file mode 100644 index 0000000..4cf2b01 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/proxy.go @@ -0,0 +1,14 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +// Proxy is an interface implemented by types that cannot themselves be directly encoded. Types +// that implement this interface with have ProxyBSON called during the encoding process and that +// value will be encoded in place for the implementer. +type Proxy interface { + ProxyBSON() (interface{}, error) +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go new file mode 100644 index 0000000..42e362b --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go @@ -0,0 +1,384 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "errors" + "reflect" + "sync" + + "go.mongodb.org/mongo-driver/bson/bsontype" +) + +// ErrNilType is returned when nil is passed to either LookupEncoder or LookupDecoder. +var ErrNilType = errors.New("cannot perform a decoder lookup on ") + +// ErrNotPointer is returned when a non-pointer type is provided to LookupDecoder. +var ErrNotPointer = errors.New("non-pointer provided to LookupDecoder") + +// ErrNoEncoder is returned when there wasn't an encoder available for a type. +type ErrNoEncoder struct { + Type reflect.Type +} + +func (ene ErrNoEncoder) Error() string { + if ene.Type == nil { + return "no encoder found for " + } + return "no encoder found for " + ene.Type.String() +} + +// ErrNoDecoder is returned when there wasn't a decoder available for a type. +type ErrNoDecoder struct { + Type reflect.Type +} + +func (end ErrNoDecoder) Error() string { + return "no decoder found for " + end.Type.String() +} + +// ErrNoTypeMapEntry is returned when there wasn't a type available for the provided BSON type. +type ErrNoTypeMapEntry struct { + Type bsontype.Type +} + +func (entme ErrNoTypeMapEntry) Error() string { + return "no type map entry found for " + entme.Type.String() +} + +// ErrNotInterface is returned when the provided type is not an interface. +var ErrNotInterface = errors.New("The provided type is not an interface") + +var defaultRegistry *Registry + +func init() { + defaultRegistry = buildDefaultRegistry() +} + +// A RegistryBuilder is used to build a Registry. This type is not goroutine +// safe. +type RegistryBuilder struct { + typeEncoders map[reflect.Type]ValueEncoder + interfaceEncoders []interfaceValueEncoder + kindEncoders map[reflect.Kind]ValueEncoder + + typeDecoders map[reflect.Type]ValueDecoder + interfaceDecoders []interfaceValueDecoder + kindDecoders map[reflect.Kind]ValueDecoder + + typeMap map[bsontype.Type]reflect.Type +} + +// A Registry is used to store and retrieve codecs for types and interfaces. This type is the main +// typed passed around and Encoders and Decoders are constructed from it. +type Registry struct { + typeEncoders map[reflect.Type]ValueEncoder + typeDecoders map[reflect.Type]ValueDecoder + + interfaceEncoders []interfaceValueEncoder + interfaceDecoders []interfaceValueDecoder + + kindEncoders map[reflect.Kind]ValueEncoder + kindDecoders map[reflect.Kind]ValueDecoder + + typeMap map[bsontype.Type]reflect.Type + + mu sync.RWMutex +} + +// NewRegistryBuilder creates a new empty RegistryBuilder. +func NewRegistryBuilder() *RegistryBuilder { + return &RegistryBuilder{ + typeEncoders: make(map[reflect.Type]ValueEncoder), + typeDecoders: make(map[reflect.Type]ValueDecoder), + + interfaceEncoders: make([]interfaceValueEncoder, 0), + interfaceDecoders: make([]interfaceValueDecoder, 0), + + kindEncoders: make(map[reflect.Kind]ValueEncoder), + kindDecoders: make(map[reflect.Kind]ValueDecoder), + + typeMap: make(map[bsontype.Type]reflect.Type), + } +} + +func buildDefaultRegistry() *Registry { + rb := NewRegistryBuilder() + defaultValueEncoders.RegisterDefaultEncoders(rb) + defaultValueDecoders.RegisterDefaultDecoders(rb) + return rb.Build() +} + +// RegisterCodec will register the provided ValueCodec for the provided type. +func (rb *RegistryBuilder) RegisterCodec(t reflect.Type, codec ValueCodec) *RegistryBuilder { + rb.RegisterEncoder(t, codec) + rb.RegisterDecoder(t, codec) + return rb +} + +// RegisterEncoder will register the provided ValueEncoder to the provided type. +// +// The type registered will be used directly, so an encoder can be registered for a type and a +// different encoder can be registered for a pointer to that type. +func (rb *RegistryBuilder) RegisterEncoder(t reflect.Type, enc ValueEncoder) *RegistryBuilder { + if t == tEmpty { + rb.typeEncoders[t] = enc + return rb + } + switch t.Kind() { + case reflect.Interface: + for idx, ir := range rb.interfaceEncoders { + if ir.i == t { + rb.interfaceEncoders[idx].ve = enc + return rb + } + } + + rb.interfaceEncoders = append(rb.interfaceEncoders, interfaceValueEncoder{i: t, ve: enc}) + default: + rb.typeEncoders[t] = enc + } + return rb +} + +// RegisterDecoder will register the provided ValueDecoder to the provided type. +// +// The type registered will be used directly, so a decoder can be registered for a type and a +// different decoder can be registered for a pointer to that type. +func (rb *RegistryBuilder) RegisterDecoder(t reflect.Type, dec ValueDecoder) *RegistryBuilder { + if t == nil { + rb.typeDecoders[nil] = dec + return rb + } + if t == tEmpty { + rb.typeDecoders[t] = dec + return rb + } + switch t.Kind() { + case reflect.Interface: + for idx, ir := range rb.interfaceDecoders { + if ir.i == t { + rb.interfaceDecoders[idx].vd = dec + return rb + } + } + + rb.interfaceDecoders = append(rb.interfaceDecoders, interfaceValueDecoder{i: t, vd: dec}) + default: + rb.typeDecoders[t] = dec + } + return rb +} + +// RegisterDefaultEncoder will registr the provided ValueEncoder to the provided +// kind. +func (rb *RegistryBuilder) RegisterDefaultEncoder(kind reflect.Kind, enc ValueEncoder) *RegistryBuilder { + rb.kindEncoders[kind] = enc + return rb +} + +// RegisterDefaultDecoder will register the provided ValueDecoder to the +// provided kind. +func (rb *RegistryBuilder) RegisterDefaultDecoder(kind reflect.Kind, dec ValueDecoder) *RegistryBuilder { + rb.kindDecoders[kind] = dec + return rb +} + +// RegisterTypeMapEntry will register the provided type to the BSON type. The primary usage for this +// mapping is decoding situations where an empty interface is used and a default type needs to be +// created and decoded into. +// +// NOTE: It is unlikely that registering a type for BSON Embedded Document is actually desired. By +// registering a type map entry for BSON Embedded Document the type registered will be used in any +// case where a BSON Embedded Document will be decoded into an empty interface. For example, if you +// register primitive.M, the EmptyInterface decoder will always use primitive.M, even if an ancestor +// was a primitive.D. +func (rb *RegistryBuilder) RegisterTypeMapEntry(bt bsontype.Type, rt reflect.Type) *RegistryBuilder { + rb.typeMap[bt] = rt + return rb +} + +// Build creates a Registry from the current state of this RegistryBuilder. +func (rb *RegistryBuilder) Build() *Registry { + registry := new(Registry) + + registry.typeEncoders = make(map[reflect.Type]ValueEncoder) + for t, enc := range rb.typeEncoders { + registry.typeEncoders[t] = enc + } + + registry.typeDecoders = make(map[reflect.Type]ValueDecoder) + for t, dec := range rb.typeDecoders { + registry.typeDecoders[t] = dec + } + + registry.interfaceEncoders = make([]interfaceValueEncoder, len(rb.interfaceEncoders)) + copy(registry.interfaceEncoders, rb.interfaceEncoders) + + registry.interfaceDecoders = make([]interfaceValueDecoder, len(rb.interfaceDecoders)) + copy(registry.interfaceDecoders, rb.interfaceDecoders) + + registry.kindEncoders = make(map[reflect.Kind]ValueEncoder) + for kind, enc := range rb.kindEncoders { + registry.kindEncoders[kind] = enc + } + + registry.kindDecoders = make(map[reflect.Kind]ValueDecoder) + for kind, dec := range rb.kindDecoders { + registry.kindDecoders[kind] = dec + } + + registry.typeMap = make(map[bsontype.Type]reflect.Type) + for bt, rt := range rb.typeMap { + registry.typeMap[bt] = rt + } + + return registry +} + +// LookupEncoder will inspect the registry for an encoder that satisfies the +// type provided. An encoder registered for a specific type will take +// precedence over an encoder registered for an interface the type satisfies, +// which takes precedence over an encoder for the reflect.Kind of the value. If +// no encoder can be found, an error is returned. +func (r *Registry) LookupEncoder(t reflect.Type) (ValueEncoder, error) { + encodererr := ErrNoEncoder{Type: t} + r.mu.RLock() + enc, found := r.lookupTypeEncoder(t) + r.mu.RUnlock() + if found { + if enc == nil { + return nil, ErrNoEncoder{Type: t} + } + return enc, nil + } + + enc, found = r.lookupInterfaceEncoder(t) + if found { + r.mu.Lock() + r.typeEncoders[t] = enc + r.mu.Unlock() + return enc, nil + } + + if t == nil { + r.mu.Lock() + r.typeEncoders[t] = nil + r.mu.Unlock() + return nil, encodererr + } + + enc, found = r.kindEncoders[t.Kind()] + if !found { + r.mu.Lock() + r.typeEncoders[t] = nil + r.mu.Unlock() + return nil, encodererr + } + + r.mu.Lock() + r.typeEncoders[t] = enc + r.mu.Unlock() + return enc, nil +} + +func (r *Registry) lookupTypeEncoder(t reflect.Type) (ValueEncoder, bool) { + enc, found := r.typeEncoders[t] + return enc, found +} + +func (r *Registry) lookupInterfaceEncoder(t reflect.Type) (ValueEncoder, bool) { + if t == nil { + return nil, false + } + for _, ienc := range r.interfaceEncoders { + if !t.Implements(ienc.i) { + continue + } + + return ienc.ve, true + } + return nil, false +} + +// LookupDecoder will inspect the registry for a decoder that satisfies the +// type provided. A decoder registered for a specific type will take +// precedence over a decoder registered for an interface the type satisfies, +// which takes precedence over a decoder for the reflect.Kind of the value. If +// no decoder can be found, an error is returned. +func (r *Registry) LookupDecoder(t reflect.Type) (ValueDecoder, error) { + if t == nil { + return nil, ErrNilType + } + decodererr := ErrNoDecoder{Type: t} + r.mu.RLock() + dec, found := r.lookupTypeDecoder(t) + r.mu.RUnlock() + if found { + if dec == nil { + return nil, ErrNoDecoder{Type: t} + } + return dec, nil + } + + dec, found = r.lookupInterfaceDecoder(t) + if found { + r.mu.Lock() + r.typeDecoders[t] = dec + r.mu.Unlock() + return dec, nil + } + + dec, found = r.kindDecoders[t.Kind()] + if !found { + r.mu.Lock() + r.typeDecoders[t] = nil + r.mu.Unlock() + return nil, decodererr + } + + r.mu.Lock() + r.typeDecoders[t] = dec + r.mu.Unlock() + return dec, nil +} + +func (r *Registry) lookupTypeDecoder(t reflect.Type) (ValueDecoder, bool) { + dec, found := r.typeDecoders[t] + return dec, found +} + +func (r *Registry) lookupInterfaceDecoder(t reflect.Type) (ValueDecoder, bool) { + for _, idec := range r.interfaceDecoders { + if !t.Implements(idec.i) && !reflect.PtrTo(t).Implements(idec.i) { + continue + } + + return idec.vd, true + } + return nil, false +} + +// LookupTypeMapEntry inspects the registry's type map for a Go type for the corresponding BSON +// type. If no type is found, ErrNoTypeMapEntry is returned. +func (r *Registry) LookupTypeMapEntry(bt bsontype.Type) (reflect.Type, error) { + t, ok := r.typeMap[bt] + if !ok || t == nil { + return nil, ErrNoTypeMapEntry{Type: bt} + } + return t, nil +} + +type interfaceValueEncoder struct { + i reflect.Type + ve ValueEncoder +} + +type interfaceValueDecoder struct { + i reflect.Type + vd ValueDecoder +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry_test.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry_test.go new file mode 100644 index 0000000..3c71811 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry_test.go @@ -0,0 +1,359 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "reflect" + "testing" + + "github.com/google/go-cmp/cmp" + "go.mongodb.org/mongo-driver/bson/bsonrw" + "go.mongodb.org/mongo-driver/bson/bsontype" +) + +func TestRegistry(t *testing.T) { + t.Run("Register", func(t *testing.T) { + fc1, fc2, fc3, fc4 := new(fakeCodec), new(fakeCodec), new(fakeCodec), new(fakeCodec) + t.Run("interface", func(t *testing.T) { + var t1f *testInterface1 + var t2f *testInterface2 + var t4f *testInterface4 + ips := []interfaceValueEncoder{ + {i: reflect.TypeOf(t1f).Elem(), ve: fc1}, + {i: reflect.TypeOf(t2f).Elem(), ve: fc2}, + {i: reflect.TypeOf(t1f).Elem(), ve: fc3}, + {i: reflect.TypeOf(t4f).Elem(), ve: fc4}, + } + want := []interfaceValueEncoder{ + {i: reflect.TypeOf(t1f).Elem(), ve: fc3}, + {i: reflect.TypeOf(t2f).Elem(), ve: fc2}, + {i: reflect.TypeOf(t4f).Elem(), ve: fc4}, + } + rb := NewRegistryBuilder() + for _, ip := range ips { + rb.RegisterEncoder(ip.i, ip.ve) + } + got := rb.interfaceEncoders + if !cmp.Equal(got, want, cmp.AllowUnexported(interfaceValueEncoder{}, fakeCodec{}), cmp.Comparer(typeComparer)) { + t.Errorf("The registered interfaces are not correct. got %v; want %v", got, want) + } + }) + t.Run("type", func(t *testing.T) { + ft1, ft2, ft4 := fakeType1{}, fakeType2{}, fakeType4{} + rb := NewRegistryBuilder(). + RegisterEncoder(reflect.TypeOf(ft1), fc1). + RegisterEncoder(reflect.TypeOf(ft2), fc2). + RegisterEncoder(reflect.TypeOf(ft1), fc3). + RegisterEncoder(reflect.TypeOf(ft4), fc4) + want := []struct { + t reflect.Type + c ValueEncoder + }{ + {reflect.TypeOf(ft1), fc3}, + {reflect.TypeOf(ft2), fc2}, + {reflect.TypeOf(ft4), fc4}, + } + got := rb.typeEncoders + for _, s := range want { + wantT, wantC := s.t, s.c + gotC, exists := got[wantT] + if !exists { + t.Errorf("Did not find type in the type registry: %v", wantT) + } + if !cmp.Equal(gotC, wantC, cmp.AllowUnexported(fakeCodec{})) { + t.Errorf("Codecs did not match. got %#v; want %#v", gotC, wantC) + } + } + }) + t.Run("kind", func(t *testing.T) { + k1, k2, k4 := reflect.Struct, reflect.Slice, reflect.Map + rb := NewRegistryBuilder(). + RegisterDefaultEncoder(k1, fc1). + RegisterDefaultEncoder(k2, fc2). + RegisterDefaultEncoder(k1, fc3). + RegisterDefaultEncoder(k4, fc4) + want := []struct { + k reflect.Kind + c ValueEncoder + }{ + {k1, fc3}, + {k2, fc2}, + {k4, fc4}, + } + got := rb.kindEncoders + for _, s := range want { + wantK, wantC := s.k, s.c + gotC, exists := got[wantK] + if !exists { + t.Errorf("Did not find kind in the kind registry: %v", wantK) + } + if !cmp.Equal(gotC, wantC, cmp.AllowUnexported(fakeCodec{})) { + t.Errorf("Codecs did not match. got %#v; want %#v", gotC, wantC) + } + } + }) + t.Run("RegisterDefault", func(t *testing.T) { + t.Run("MapCodec", func(t *testing.T) { + codec := fakeCodec{num: 1} + codec2 := fakeCodec{num: 2} + rb := NewRegistryBuilder() + rb.RegisterDefaultEncoder(reflect.Map, codec) + if rb.kindEncoders[reflect.Map] != codec { + t.Errorf("Did not properly set the map codec. got %v; want %v", rb.kindEncoders[reflect.Map], codec) + } + rb.RegisterDefaultEncoder(reflect.Map, codec2) + if rb.kindEncoders[reflect.Map] != codec2 { + t.Errorf("Did not properly set the map codec. got %v; want %v", rb.kindEncoders[reflect.Map], codec2) + } + }) + t.Run("StructCodec", func(t *testing.T) { + codec := fakeCodec{num: 1} + codec2 := fakeCodec{num: 2} + rb := NewRegistryBuilder() + rb.RegisterDefaultEncoder(reflect.Struct, codec) + if rb.kindEncoders[reflect.Struct] != codec { + t.Errorf("Did not properly set the struct codec. got %v; want %v", rb.kindEncoders[reflect.Struct], codec) + } + rb.RegisterDefaultEncoder(reflect.Struct, codec2) + if rb.kindEncoders[reflect.Struct] != codec2 { + t.Errorf("Did not properly set the struct codec. got %v; want %v", rb.kindEncoders[reflect.Struct], codec2) + } + }) + t.Run("SliceCodec", func(t *testing.T) { + codec := fakeCodec{num: 1} + codec2 := fakeCodec{num: 2} + rb := NewRegistryBuilder() + rb.RegisterDefaultEncoder(reflect.Slice, codec) + if rb.kindEncoders[reflect.Slice] != codec { + t.Errorf("Did not properly set the slice codec. got %v; want %v", rb.kindEncoders[reflect.Slice], codec) + } + rb.RegisterDefaultEncoder(reflect.Slice, codec2) + if rb.kindEncoders[reflect.Slice] != codec2 { + t.Errorf("Did not properly set the slice codec. got %v; want %v", rb.kindEncoders[reflect.Slice], codec2) + } + }) + t.Run("ArrayCodec", func(t *testing.T) { + codec := fakeCodec{num: 1} + codec2 := fakeCodec{num: 2} + rb := NewRegistryBuilder() + rb.RegisterDefaultEncoder(reflect.Array, codec) + if rb.kindEncoders[reflect.Array] != codec { + t.Errorf("Did not properly set the slice codec. got %v; want %v", rb.kindEncoders[reflect.Array], codec) + } + rb.RegisterDefaultEncoder(reflect.Array, codec2) + if rb.kindEncoders[reflect.Array] != codec2 { + t.Errorf("Did not properly set the slice codec. got %v; want %v", rb.kindEncoders[reflect.Array], codec2) + } + }) + }) + t.Run("Lookup", func(t *testing.T) { + type Codec interface { + ValueEncoder + ValueDecoder + } + + var arrinstance [12]int + arr := reflect.TypeOf(arrinstance) + slc := reflect.TypeOf(make([]int, 12)) + m := reflect.TypeOf(make(map[string]int)) + strct := reflect.TypeOf(struct{ Foo string }{}) + ft1 := reflect.PtrTo(reflect.TypeOf(fakeType1{})) + ft2 := reflect.TypeOf(fakeType2{}) + ft3 := reflect.TypeOf(fakeType5(func(string, string) string { return "fakeType5" })) + ti2 := reflect.TypeOf((*testInterface2)(nil)).Elem() + fc1, fc2, fc4 := fakeCodec{num: 1}, fakeCodec{num: 2}, fakeCodec{num: 4} + fsc, fslcc, fmc := new(fakeStructCodec), new(fakeSliceCodec), new(fakeMapCodec) + pc := NewPointerCodec() + reg := NewRegistryBuilder(). + RegisterEncoder(ft1, fc1). + RegisterEncoder(ft2, fc2). + RegisterEncoder(ti2, fc4). + RegisterDefaultEncoder(reflect.Struct, fsc). + RegisterDefaultEncoder(reflect.Slice, fslcc). + RegisterDefaultEncoder(reflect.Array, fslcc). + RegisterDefaultEncoder(reflect.Map, fmc). + RegisterDefaultEncoder(reflect.Ptr, pc). + RegisterDecoder(ft1, fc1). + RegisterDecoder(ft2, fc2). + RegisterDecoder(ti2, fc4). + RegisterDefaultDecoder(reflect.Struct, fsc). + RegisterDefaultDecoder(reflect.Slice, fslcc). + RegisterDefaultDecoder(reflect.Array, fslcc). + RegisterDefaultDecoder(reflect.Map, fmc). + RegisterDefaultDecoder(reflect.Ptr, pc). + Build() + + testCases := []struct { + name string + t reflect.Type + wantcodec Codec + wanterr error + testcache bool + }{ + { + "type registry (pointer)", + ft1, + fc1, + nil, + false, + }, + { + "type registry (non-pointer)", + ft2, + fc2, + nil, + false, + }, + { + "interface registry", + ti2, + fc4, + nil, + true, + }, + { + "default struct codec (pointer)", + reflect.PtrTo(strct), + pc, + nil, + false, + }, + { + "default struct codec (non-pointer)", + strct, + fsc, + nil, + false, + }, + { + "default array codec", + arr, + fslcc, + nil, + false, + }, + { + "default slice codec", + slc, + fslcc, + nil, + false, + }, + { + "default map", + m, + fmc, + nil, + false, + }, + { + "map non-string key", + reflect.TypeOf(map[int]int{}), + fmc, + nil, + false, + }, + { + "No Codec Registered", + ft3, + nil, + ErrNoEncoder{Type: ft3}, + false, + }, + } + + allowunexported := cmp.AllowUnexported(fakeCodec{}, fakeStructCodec{}, fakeSliceCodec{}, fakeMapCodec{}) + comparepc := func(pc1, pc2 *PointerCodec) bool { return true } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + t.Run("Encoder", func(t *testing.T) { + gotcodec, goterr := reg.LookupEncoder(tc.t) + if !cmp.Equal(goterr, tc.wanterr, cmp.Comparer(compareErrors)) { + t.Errorf("Errors did not match. got %v; want %v", goterr, tc.wanterr) + } + if !cmp.Equal(gotcodec, tc.wantcodec, allowunexported, cmp.Comparer(comparepc)) { + t.Errorf("Codecs did not match. got %v; want %v", gotcodec, tc.wantcodec) + } + }) + t.Run("Decoder", func(t *testing.T) { + var wanterr error + if ene, ok := tc.wanterr.(ErrNoEncoder); ok { + wanterr = ErrNoDecoder{Type: ene.Type} + } else { + wanterr = tc.wanterr + } + gotcodec, goterr := reg.LookupDecoder(tc.t) + if !cmp.Equal(goterr, wanterr, cmp.Comparer(compareErrors)) { + t.Errorf("Errors did not match. got %v; want %v", goterr, wanterr) + } + if !cmp.Equal(gotcodec, tc.wantcodec, allowunexported, cmp.Comparer(comparepc)) { + t.Errorf("Codecs did not match. got %v; want %v", gotcodec, tc.wantcodec) + t.Errorf("Codecs did not match. got %T; want %T", gotcodec, tc.wantcodec) + } + }) + }) + } + }) + }) + t.Run("Type Map", func(t *testing.T) { + reg := NewRegistryBuilder(). + RegisterTypeMapEntry(bsontype.String, reflect.TypeOf(string(""))). + RegisterTypeMapEntry(bsontype.Int32, reflect.TypeOf(int(0))). + Build() + + var got, want reflect.Type + + want = reflect.TypeOf(string("")) + got, err := reg.LookupTypeMapEntry(bsontype.String) + noerr(t, err) + if got != want { + t.Errorf("Did not get expected type. got %v; want %v", got, want) + } + + want = reflect.TypeOf(int(0)) + got, err = reg.LookupTypeMapEntry(bsontype.Int32) + noerr(t, err) + if got != want { + t.Errorf("Did not get expected type. got %v; want %v", got, want) + } + + want = nil + wanterr := ErrNoTypeMapEntry{Type: bsontype.ObjectID} + got, err = reg.LookupTypeMapEntry(bsontype.ObjectID) + if err != wanterr { + t.Errorf("Did not get expected error. got %v; want %v", err, wanterr) + } + if got != want { + t.Errorf("Did not get expected type. got %v; want %v", got, want) + } + }) +} + +type fakeType1 struct{ b bool } +type fakeType2 struct{ b bool } +type fakeType3 struct{ b bool } +type fakeType4 struct{ b bool } +type fakeType5 func(string, string) string +type fakeStructCodec struct{ fakeCodec } +type fakeSliceCodec struct{ fakeCodec } +type fakeMapCodec struct{ fakeCodec } + +type fakeCodec struct{ num int } + +func (fc fakeCodec) EncodeValue(EncodeContext, bsonrw.ValueWriter, reflect.Value) error { + return nil +} +func (fc fakeCodec) DecodeValue(DecodeContext, bsonrw.ValueReader, reflect.Value) error { + return nil +} + +type testInterface1 interface{ test1() } +type testInterface2 interface{ test2() } +type testInterface3 interface{ test3() } +type testInterface4 interface{ test4() } + +func typeComparer(i1, i2 reflect.Type) bool { return i1 == i2 } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go new file mode 100644 index 0000000..fe90272 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go @@ -0,0 +1,359 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "errors" + "fmt" + "reflect" + "sync" + + "go.mongodb.org/mongo-driver/bson/bsonrw" + "go.mongodb.org/mongo-driver/bson/bsontype" +) + +var defaultStructCodec = &StructCodec{ + cache: make(map[reflect.Type]*structDescription), + parser: DefaultStructTagParser, +} + +// Zeroer allows custom struct types to implement a report of zero +// state. All struct types that don't implement Zeroer or where IsZero +// returns false are considered to be not zero. +type Zeroer interface { + IsZero() bool +} + +// StructCodec is the Codec used for struct values. +type StructCodec struct { + cache map[reflect.Type]*structDescription + l sync.RWMutex + parser StructTagParser +} + +var _ ValueEncoder = &StructCodec{} +var _ ValueDecoder = &StructCodec{} + +// NewStructCodec returns a StructCodec that uses p for struct tag parsing. +func NewStructCodec(p StructTagParser) (*StructCodec, error) { + if p == nil { + return nil, errors.New("a StructTagParser must be provided to NewStructCodec") + } + + return &StructCodec{ + cache: make(map[reflect.Type]*structDescription), + parser: p, + }, nil +} + +// EncodeValue handles encoding generic struct types. +func (sc *StructCodec) EncodeValue(r EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Kind() != reflect.Struct { + return ValueEncoderError{Name: "StructCodec.EncodeValue", Kinds: []reflect.Kind{reflect.Struct}, Received: val} + } + + sd, err := sc.describeStruct(r.Registry, val.Type()) + if err != nil { + return err + } + + dw, err := vw.WriteDocument() + if err != nil { + return err + } + var rv reflect.Value + for _, desc := range sd.fl { + if desc.inline == nil { + rv = val.Field(desc.idx) + } else { + rv = val.FieldByIndex(desc.inline) + } + + if desc.encoder == nil { + return ErrNoEncoder{Type: rv.Type()} + } + + encoder := desc.encoder + + iszero := sc.isZero + if iz, ok := encoder.(CodecZeroer); ok { + iszero = iz.IsTypeZero + } + + if desc.omitEmpty && iszero(rv.Interface()) { + continue + } + + vw2, err := dw.WriteDocumentElement(desc.name) + if err != nil { + return err + } + + ectx := EncodeContext{Registry: r.Registry, MinSize: desc.minSize} + err = encoder.EncodeValue(ectx, vw2, rv) + if err != nil { + return err + } + } + + if sd.inlineMap >= 0 { + rv := val.Field(sd.inlineMap) + collisionFn := func(key string) bool { + _, exists := sd.fm[key] + return exists + } + + return defaultValueEncoders.mapEncodeValue(r, dw, rv, collisionFn) + } + + return dw.WriteDocumentEnd() +} + +// DecodeValue implements the Codec interface. +// By default, map types in val will not be cleared. If a map has existing key/value pairs, it will be extended with the new ones from vr. +// For slices, the decoder will set the length of the slice to zero and append all elements. The underlying array will not be cleared. +func (sc *StructCodec) DecodeValue(r DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Kind() != reflect.Struct { + return ValueDecoderError{Name: "StructCodec.DecodeValue", Kinds: []reflect.Kind{reflect.Struct}, Received: val} + } + + switch vr.Type() { + case bsontype.Type(0), bsontype.EmbeddedDocument: + default: + return fmt.Errorf("cannot decode %v into a %s", vr.Type(), val.Type()) + } + + sd, err := sc.describeStruct(r.Registry, val.Type()) + if err != nil { + return err + } + + var decoder ValueDecoder + var inlineMap reflect.Value + if sd.inlineMap >= 0 { + inlineMap = val.Field(sd.inlineMap) + if inlineMap.IsNil() { + inlineMap.Set(reflect.MakeMap(inlineMap.Type())) + } + decoder, err = r.LookupDecoder(inlineMap.Type().Elem()) + if err != nil { + return err + } + } + + dr, err := vr.ReadDocument() + if err != nil { + return err + } + + for { + name, vr, err := dr.ReadElement() + if err == bsonrw.ErrEOD { + break + } + if err != nil { + return err + } + + fd, exists := sd.fm[name] + if !exists { + if sd.inlineMap < 0 { + // The encoding/json package requires a flag to return on error for non-existent fields. + // This functionality seems appropriate for the struct codec. + err = vr.Skip() + if err != nil { + return err + } + continue + } + + elem := reflect.New(inlineMap.Type().Elem()).Elem() + err = decoder.DecodeValue(r, vr, elem) + if err != nil { + return err + } + inlineMap.SetMapIndex(reflect.ValueOf(name), elem) + continue + } + + var field reflect.Value + if fd.inline == nil { + field = val.Field(fd.idx) + } else { + field = val.FieldByIndex(fd.inline) + } + + if !field.CanSet() { // Being settable is a super set of being addressable. + return fmt.Errorf("cannot decode element '%s' into field %v; it is not settable", name, field) + } + if field.Kind() == reflect.Ptr && field.IsNil() { + field.Set(reflect.New(field.Type().Elem())) + } + field = field.Addr() + + dctx := DecodeContext{Registry: r.Registry, Truncate: fd.truncate} + if fd.decoder == nil { + return ErrNoDecoder{Type: field.Elem().Type()} + } + + if decoder, ok := fd.decoder.(ValueDecoder); ok { + err = decoder.DecodeValue(dctx, vr, field.Elem()) + if err != nil { + return err + } + continue + } + err = fd.decoder.DecodeValue(dctx, vr, field) + if err != nil { + return err + } + } + + return nil +} + +func (sc *StructCodec) isZero(i interface{}) bool { + v := reflect.ValueOf(i) + + // check the value validity + if !v.IsValid() { + return true + } + + if z, ok := v.Interface().(Zeroer); ok && (v.Kind() != reflect.Ptr || !v.IsNil()) { + return z.IsZero() + } + + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + + return false +} + +type structDescription struct { + fm map[string]fieldDescription + fl []fieldDescription + inlineMap int +} + +type fieldDescription struct { + name string + idx int + omitEmpty bool + minSize bool + truncate bool + inline []int + encoder ValueEncoder + decoder ValueDecoder +} + +func (sc *StructCodec) describeStruct(r *Registry, t reflect.Type) (*structDescription, error) { + // We need to analyze the struct, including getting the tags, collecting + // information about inlining, and create a map of the field name to the field. + sc.l.RLock() + ds, exists := sc.cache[t] + sc.l.RUnlock() + if exists { + return ds, nil + } + + numFields := t.NumField() + sd := &structDescription{ + fm: make(map[string]fieldDescription, numFields), + fl: make([]fieldDescription, 0, numFields), + inlineMap: -1, + } + + for i := 0; i < numFields; i++ { + sf := t.Field(i) + if sf.PkgPath != "" { + // unexported, ignore + continue + } + + encoder, err := r.LookupEncoder(sf.Type) + if err != nil { + encoder = nil + } + decoder, err := r.LookupDecoder(sf.Type) + if err != nil { + decoder = nil + } + + description := fieldDescription{idx: i, encoder: encoder, decoder: decoder} + + stags, err := sc.parser.ParseStructTags(sf) + if err != nil { + return nil, err + } + if stags.Skip { + continue + } + description.name = stags.Name + description.omitEmpty = stags.OmitEmpty + description.minSize = stags.MinSize + description.truncate = stags.Truncate + + if stags.Inline { + switch sf.Type.Kind() { + case reflect.Map: + if sd.inlineMap >= 0 { + return nil, errors.New("(struct " + t.String() + ") multiple inline maps") + } + if sf.Type.Key() != tString { + return nil, errors.New("(struct " + t.String() + ") inline map must have a string keys") + } + sd.inlineMap = description.idx + case reflect.Struct: + inlinesf, err := sc.describeStruct(r, sf.Type) + if err != nil { + return nil, err + } + for _, fd := range inlinesf.fl { + if _, exists := sd.fm[fd.name]; exists { + return nil, fmt.Errorf("(struct %s) duplicated key %s", t.String(), fd.name) + } + if fd.inline == nil { + fd.inline = []int{i, fd.idx} + } else { + fd.inline = append([]int{i}, fd.inline...) + } + sd.fm[fd.name] = fd + sd.fl = append(sd.fl, fd) + } + default: + return nil, fmt.Errorf("(struct %s) inline fields must be either a struct or a map", t.String()) + } + continue + } + + if _, exists := sd.fm[description.name]; exists { + return nil, fmt.Errorf("struct %s) duplicated key %s", t.String(), description.name) + } + + sd.fm[description.name] = description + sd.fl = append(sd.fl, description) + } + + sc.l.Lock() + sc.cache[t] = sd + sc.l.Unlock() + + return sd, nil +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec_test.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec_test.go new file mode 100644 index 0000000..cf49541 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec_test.go @@ -0,0 +1,47 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestZeoerInterfaceUsedByDecoder(t *testing.T) { + enc := &StructCodec{} + + // cases that are zero, because they are known types or pointers + var st *nonZeroer + assert.True(t, enc.isZero(st)) + assert.True(t, enc.isZero(0)) + assert.True(t, enc.isZero(false)) + + // cases that shouldn't be zero + st = &nonZeroer{value: false} + assert.False(t, enc.isZero(struct{ val bool }{val: true})) + assert.False(t, enc.isZero(struct{ val bool }{val: false})) + assert.False(t, enc.isZero(st)) + st.value = true + assert.False(t, enc.isZero(st)) + + // a test to see if the interface impacts the outcome + z := zeroTest{} + assert.False(t, enc.isZero(z)) + + z.reportZero = true + assert.True(t, enc.isZero(z)) + + // *time.Time with nil should be zero + var tp *time.Time + assert.True(t, enc.isZero(tp)) + + // actually all zeroer if nil should also be zero + var zp *zeroTest + assert.True(t, enc.isZero(zp)) +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go new file mode 100644 index 0000000..69d0ae4 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go @@ -0,0 +1,119 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "reflect" + "strings" +) + +// StructTagParser returns the struct tags for a given struct field. +type StructTagParser interface { + ParseStructTags(reflect.StructField) (StructTags, error) +} + +// StructTagParserFunc is an adapter that allows a generic function to be used +// as a StructTagParser. +type StructTagParserFunc func(reflect.StructField) (StructTags, error) + +// ParseStructTags implements the StructTagParser interface. +func (stpf StructTagParserFunc) ParseStructTags(sf reflect.StructField) (StructTags, error) { + return stpf(sf) +} + +// StructTags represents the struct tag fields that the StructCodec uses during +// the encoding and decoding process. +// +// In the case of a struct, the lowercased field name is used as the key for each exported +// field but this behavior may be changed using a struct tag. The tag may also contain flags to +// adjust the marshalling behavior for the field. +// +// The properties are defined below: +// +// OmitEmpty Only include the field if it's not set to the zero value for the type or to +// empty slices or maps. +// +// MinSize Marshal an integer of a type larger than 32 bits value as an int32, if that's +// feasible while preserving the numeric value. +// +// Truncate When unmarshaling a BSON double, it is permitted to lose precision to fit within +// a float32. +// +// Inline Inline the field, which must be a struct or a map, causing all of its fields +// or keys to be processed as if they were part of the outer struct. For maps, +// keys must not conflict with the bson keys of other struct fields. +// +// Skip This struct field should be skipped. This is usually denoted by parsing a "-" +// for the name. +// +// TODO(skriptble): Add tags for undefined as nil and for null as nil. +type StructTags struct { + Name string + OmitEmpty bool + MinSize bool + Truncate bool + Inline bool + Skip bool +} + +// DefaultStructTagParser is the StructTagParser used by the StructCodec by default. +// It will handle the bson struct tag. See the documentation for StructTags to see +// what each of the returned fields means. +// +// If there is no name in the struct tag fields, the struct field name is lowercased. +// The tag formats accepted are: +// +// "[][,[,]]" +// +// `(...) bson:"[][,[,]]" (...)` +// +// An example: +// +// type T struct { +// A bool +// B int "myb" +// C string "myc,omitempty" +// D string `bson:",omitempty" json:"jsonkey"` +// E int64 ",minsize" +// F int64 "myf,omitempty,minsize" +// } +// +// A struct tag either consisting entirely of '-' or with a bson key with a +// value consisting entirely of '-' will return a StructTags with Skip true and +// the remaining fields will be their default values. +var DefaultStructTagParser StructTagParserFunc = func(sf reflect.StructField) (StructTags, error) { + key := strings.ToLower(sf.Name) + tag, ok := sf.Tag.Lookup("bson") + if !ok && !strings.Contains(string(sf.Tag), ":") && len(sf.Tag) > 0 { + tag = string(sf.Tag) + } + var st StructTags + if tag == "-" { + st.Skip = true + return st, nil + } + + for idx, str := range strings.Split(tag, ",") { + if idx == 0 && str != "" { + key = str + } + switch str { + case "omitempty": + st.OmitEmpty = true + case "minsize": + st.MinSize = true + case "truncate": + st.Truncate = true + case "inline": + st.Inline = true + } + } + + st.Name = key + + return st, nil +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser_test.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser_test.go new file mode 100644 index 0000000..f91e8b6 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser_test.go @@ -0,0 +1,73 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "reflect" + "testing" + + "github.com/google/go-cmp/cmp" +) + +func TestDefaultStructTagParser(t *testing.T) { + testCases := []struct { + name string + sf reflect.StructField + want StructTags + }{ + { + "no bson tag", + reflect.StructField{Name: "foo", Tag: reflect.StructTag("bar")}, + StructTags{Name: "bar"}, + }, + { + "empty", + reflect.StructField{Name: "foo", Tag: reflect.StructTag("")}, + StructTags{Name: "foo"}, + }, + { + "tag only dash", + reflect.StructField{Name: "foo", Tag: reflect.StructTag("-")}, + StructTags{Skip: true}, + }, + { + "bson tag only dash", + reflect.StructField{Name: "foo", Tag: reflect.StructTag(`bson:"-"`)}, + StructTags{Skip: true}, + }, + { + "all options", + reflect.StructField{Name: "foo", Tag: reflect.StructTag(`bar,omitempty,minsize,truncate,inline`)}, + StructTags{Name: "bar", OmitEmpty: true, MinSize: true, Truncate: true, Inline: true}, + }, + { + "all options default name", + reflect.StructField{Name: "foo", Tag: reflect.StructTag(`,omitempty,minsize,truncate,inline`)}, + StructTags{Name: "foo", OmitEmpty: true, MinSize: true, Truncate: true, Inline: true}, + }, + { + "bson tag all options", + reflect.StructField{Name: "foo", Tag: reflect.StructTag(`bson:"bar,omitempty,minsize,truncate,inline"`)}, + StructTags{Name: "bar", OmitEmpty: true, MinSize: true, Truncate: true, Inline: true}, + }, + { + "bson tag all options default name", + reflect.StructField{Name: "foo", Tag: reflect.StructTag(`bson:",omitempty,minsize,truncate,inline"`)}, + StructTags{Name: "foo", OmitEmpty: true, MinSize: true, Truncate: true, Inline: true}, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + got, err := DefaultStructTagParser(tc.sf) + noerr(t, err) + if !cmp.Equal(got, tc.want) { + t.Errorf("Returned struct tags do not match. got %#v; want %#v", got, tc.want) + } + }) + } +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go new file mode 100644 index 0000000..7726487 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go @@ -0,0 +1,80 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "encoding/json" + "net/url" + "reflect" + "time" + + "go.mongodb.org/mongo-driver/bson/primitive" + "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" +) + +var ptBool = reflect.TypeOf((*bool)(nil)) +var ptInt8 = reflect.TypeOf((*int8)(nil)) +var ptInt16 = reflect.TypeOf((*int16)(nil)) +var ptInt32 = reflect.TypeOf((*int32)(nil)) +var ptInt64 = reflect.TypeOf((*int64)(nil)) +var ptInt = reflect.TypeOf((*int)(nil)) +var ptUint8 = reflect.TypeOf((*uint8)(nil)) +var ptUint16 = reflect.TypeOf((*uint16)(nil)) +var ptUint32 = reflect.TypeOf((*uint32)(nil)) +var ptUint64 = reflect.TypeOf((*uint64)(nil)) +var ptUint = reflect.TypeOf((*uint)(nil)) +var ptFloat32 = reflect.TypeOf((*float32)(nil)) +var ptFloat64 = reflect.TypeOf((*float64)(nil)) +var ptString = reflect.TypeOf((*string)(nil)) + +var tBool = reflect.TypeOf(false) +var tFloat32 = reflect.TypeOf(float32(0)) +var tFloat64 = reflect.TypeOf(float64(0)) +var tInt = reflect.TypeOf(int(0)) +var tInt8 = reflect.TypeOf(int8(0)) +var tInt16 = reflect.TypeOf(int16(0)) +var tInt32 = reflect.TypeOf(int32(0)) +var tInt64 = reflect.TypeOf(int64(0)) +var tString = reflect.TypeOf("") +var tTime = reflect.TypeOf(time.Time{}) +var tUint = reflect.TypeOf(uint(0)) +var tUint8 = reflect.TypeOf(uint8(0)) +var tUint16 = reflect.TypeOf(uint16(0)) +var tUint32 = reflect.TypeOf(uint32(0)) +var tUint64 = reflect.TypeOf(uint64(0)) + +var tEmpty = reflect.TypeOf((*interface{})(nil)).Elem() +var tByteSlice = reflect.TypeOf([]byte(nil)) +var tByte = reflect.TypeOf(byte(0x00)) +var tURL = reflect.TypeOf(url.URL{}) +var tJSONNumber = reflect.TypeOf(json.Number("")) + +var tValueMarshaler = reflect.TypeOf((*ValueMarshaler)(nil)).Elem() +var tValueUnmarshaler = reflect.TypeOf((*ValueUnmarshaler)(nil)).Elem() +var tMarshaler = reflect.TypeOf((*Marshaler)(nil)).Elem() +var tUnmarshaler = reflect.TypeOf((*Unmarshaler)(nil)).Elem() +var tProxy = reflect.TypeOf((*Proxy)(nil)).Elem() + +var tBinary = reflect.TypeOf(primitive.Binary{}) +var tUndefined = reflect.TypeOf(primitive.Undefined{}) +var tOID = reflect.TypeOf(primitive.ObjectID{}) +var tDateTime = reflect.TypeOf(primitive.DateTime(0)) +var tNull = reflect.TypeOf(primitive.Null{}) +var tRegex = reflect.TypeOf(primitive.Regex{}) +var tCodeWithScope = reflect.TypeOf(primitive.CodeWithScope{}) +var tDBPointer = reflect.TypeOf(primitive.DBPointer{}) +var tJavaScript = reflect.TypeOf(primitive.JavaScript("")) +var tSymbol = reflect.TypeOf(primitive.Symbol("")) +var tTimestamp = reflect.TypeOf(primitive.Timestamp{}) +var tDecimal = reflect.TypeOf(primitive.Decimal128{}) +var tMinKey = reflect.TypeOf(primitive.MinKey{}) +var tMaxKey = reflect.TypeOf(primitive.MaxKey{}) +var tD = reflect.TypeOf(primitive.D{}) +var tA = reflect.TypeOf(primitive.A{}) +var tE = reflect.TypeOf(primitive.E{}) + +var tCoreDocument = reflect.TypeOf(bsoncore.Document{}) diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/bsonrw_test.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/bsonrw_test.go new file mode 100644 index 0000000..4657961 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/bsonrw_test.go @@ -0,0 +1,33 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsonrw + +import "testing" + +func compareErrors(err1, err2 error) bool { + if err1 == nil && err2 == nil { + return true + } + + if err1 == nil || err2 == nil { + return false + } + + if err1.Error() != err2.Error() { + return false + } + + return true +} + +func noerr(t *testing.T, err error) { + if err != nil { + t.Helper() + t.Errorf("Unexpected error: (%T)%v", err, err) + t.FailNow() + } +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/bsonrwtest/bsonrwtest.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/bsonrwtest/bsonrwtest.go new file mode 100644 index 0000000..bbb2570 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/bsonrwtest/bsonrwtest.go @@ -0,0 +1,847 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsonrwtest // import "go.mongodb.org/mongo-driver/bson/bsonrw/bsonrwtest" + +import ( + "testing" + + "go.mongodb.org/mongo-driver/bson/bsonrw" + "go.mongodb.org/mongo-driver/bson/bsontype" + "go.mongodb.org/mongo-driver/bson/primitive" + "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" +) + +var _ bsonrw.ValueReader = (*ValueReaderWriter)(nil) +var _ bsonrw.ValueWriter = (*ValueReaderWriter)(nil) + +// Invoked is a type used to indicate what method was called last. +type Invoked byte + +// These are the different methods that can be invoked. +const ( + Nothing Invoked = iota + ReadArray + ReadBinary + ReadBoolean + ReadDocument + ReadCodeWithScope + ReadDBPointer + ReadDateTime + ReadDecimal128 + ReadDouble + ReadInt32 + ReadInt64 + ReadJavascript + ReadMaxKey + ReadMinKey + ReadNull + ReadObjectID + ReadRegex + ReadString + ReadSymbol + ReadTimestamp + ReadUndefined + ReadElement + ReadValue + WriteArray + WriteBinary + WriteBinaryWithSubtype + WriteBoolean + WriteCodeWithScope + WriteDBPointer + WriteDateTime + WriteDecimal128 + WriteDouble + WriteInt32 + WriteInt64 + WriteJavascript + WriteMaxKey + WriteMinKey + WriteNull + WriteObjectID + WriteRegex + WriteString + WriteDocument + WriteSymbol + WriteTimestamp + WriteUndefined + WriteDocumentElement + WriteDocumentEnd + WriteArrayElement + WriteArrayEnd + Skip +) + +func (i Invoked) String() string { + switch i { + case Nothing: + return "Nothing" + case ReadArray: + return "ReadArray" + case ReadBinary: + return "ReadBinary" + case ReadBoolean: + return "ReadBoolean" + case ReadDocument: + return "ReadDocument" + case ReadCodeWithScope: + return "ReadCodeWithScope" + case ReadDBPointer: + return "ReadDBPointer" + case ReadDateTime: + return "ReadDateTime" + case ReadDecimal128: + return "ReadDecimal128" + case ReadDouble: + return "ReadDouble" + case ReadInt32: + return "ReadInt32" + case ReadInt64: + return "ReadInt64" + case ReadJavascript: + return "ReadJavascript" + case ReadMaxKey: + return "ReadMaxKey" + case ReadMinKey: + return "ReadMinKey" + case ReadNull: + return "ReadNull" + case ReadObjectID: + return "ReadObjectID" + case ReadRegex: + return "ReadRegex" + case ReadString: + return "ReadString" + case ReadSymbol: + return "ReadSymbol" + case ReadTimestamp: + return "ReadTimestamp" + case ReadUndefined: + return "ReadUndefined" + case ReadElement: + return "ReadElement" + case ReadValue: + return "ReadValue" + case WriteArray: + return "WriteArray" + case WriteBinary: + return "WriteBinary" + case WriteBinaryWithSubtype: + return "WriteBinaryWithSubtype" + case WriteBoolean: + return "WriteBoolean" + case WriteCodeWithScope: + return "WriteCodeWithScope" + case WriteDBPointer: + return "WriteDBPointer" + case WriteDateTime: + return "WriteDateTime" + case WriteDecimal128: + return "WriteDecimal128" + case WriteDouble: + return "WriteDouble" + case WriteInt32: + return "WriteInt32" + case WriteInt64: + return "WriteInt64" + case WriteJavascript: + return "WriteJavascript" + case WriteMaxKey: + return "WriteMaxKey" + case WriteMinKey: + return "WriteMinKey" + case WriteNull: + return "WriteNull" + case WriteObjectID: + return "WriteObjectID" + case WriteRegex: + return "WriteRegex" + case WriteString: + return "WriteString" + case WriteDocument: + return "WriteDocument" + case WriteSymbol: + return "WriteSymbol" + case WriteTimestamp: + return "WriteTimestamp" + case WriteUndefined: + return "WriteUndefined" + case WriteDocumentElement: + return "WriteDocumentElement" + case WriteDocumentEnd: + return "WriteDocumentEnd" + case WriteArrayElement: + return "WriteArrayElement" + case WriteArrayEnd: + return "WriteArrayEnd" + default: + return "" + } +} + +// ValueReaderWriter is a test implementation of a bsonrw.ValueReader and bsonrw.ValueWriter +type ValueReaderWriter struct { + T *testing.T + Invoked Invoked + Return interface{} // Can be a primitive or a bsoncore.Value + BSONType bsontype.Type + Err error + ErrAfter Invoked // error after this method is called + depth uint64 +} + +// prevent infinite recursion. +func (llvrw *ValueReaderWriter) checkdepth() { + llvrw.depth++ + if llvrw.depth > 1000 { + panic("max depth exceeded") + } +} + +// Type implements the bsonrw.ValueReader interface. +func (llvrw *ValueReaderWriter) Type() bsontype.Type { + llvrw.checkdepth() + return llvrw.BSONType +} + +// Skip implements the bsonrw.ValueReader interface. +func (llvrw *ValueReaderWriter) Skip() error { + llvrw.checkdepth() + llvrw.Invoked = Skip + if llvrw.ErrAfter == llvrw.Invoked { + return llvrw.Err + } + return nil +} + +// ReadArray implements the bsonrw.ValueReader interface. +func (llvrw *ValueReaderWriter) ReadArray() (bsonrw.ArrayReader, error) { + llvrw.checkdepth() + llvrw.Invoked = ReadArray + if llvrw.ErrAfter == llvrw.Invoked { + return nil, llvrw.Err + } + + return llvrw, nil +} + +// ReadBinary implements the bsonrw.ValueReader interface. +func (llvrw *ValueReaderWriter) ReadBinary() (b []byte, btype byte, err error) { + llvrw.checkdepth() + llvrw.Invoked = ReadBinary + if llvrw.ErrAfter == llvrw.Invoked { + return nil, 0x00, llvrw.Err + } + + switch tt := llvrw.Return.(type) { + case bsoncore.Value: + subtype, data, _, ok := bsoncore.ReadBinary(tt.Data) + if !ok { + llvrw.T.Error("Invalid Value provided for return value of ReadBinary.") + return nil, 0x00, nil + } + return data, subtype, nil + default: + llvrw.T.Errorf("Incorrect type provided for return value of ReadBinary: %T", llvrw.Return) + return nil, 0x00, nil + } +} + +// ReadBoolean implements the bsonrw.ValueReader interface. +func (llvrw *ValueReaderWriter) ReadBoolean() (bool, error) { + llvrw.checkdepth() + llvrw.Invoked = ReadBoolean + if llvrw.ErrAfter == llvrw.Invoked { + return false, llvrw.Err + } + + switch tt := llvrw.Return.(type) { + case bool: + return tt, nil + case bsoncore.Value: + b, _, ok := bsoncore.ReadBoolean(tt.Data) + if !ok { + llvrw.T.Error("Invalid Value provided for return value of ReadBoolean.") + return false, nil + } + return b, nil + default: + llvrw.T.Errorf("Incorrect type provided for return value of ReadBoolean: %T", llvrw.Return) + return false, nil + } +} + +// ReadDocument implements the bsonrw.ValueReader interface. +func (llvrw *ValueReaderWriter) ReadDocument() (bsonrw.DocumentReader, error) { + llvrw.checkdepth() + llvrw.Invoked = ReadDocument + if llvrw.ErrAfter == llvrw.Invoked { + return nil, llvrw.Err + } + + return llvrw, nil +} + +// ReadCodeWithScope implements the bsonrw.ValueReader interface. +func (llvrw *ValueReaderWriter) ReadCodeWithScope() (code string, dr bsonrw.DocumentReader, err error) { + llvrw.checkdepth() + llvrw.Invoked = ReadCodeWithScope + if llvrw.ErrAfter == llvrw.Invoked { + return "", nil, llvrw.Err + } + + return "", llvrw, nil +} + +// ReadDBPointer implements the bsonrw.ValueReader interface. +func (llvrw *ValueReaderWriter) ReadDBPointer() (ns string, oid primitive.ObjectID, err error) { + llvrw.checkdepth() + llvrw.Invoked = ReadDBPointer + if llvrw.ErrAfter == llvrw.Invoked { + return "", primitive.ObjectID{}, llvrw.Err + } + + switch tt := llvrw.Return.(type) { + case bsoncore.Value: + ns, oid, _, ok := bsoncore.ReadDBPointer(tt.Data) + if !ok { + llvrw.T.Error("Invalid Value instance provided for return value of ReadDBPointer") + return "", primitive.ObjectID{}, nil + } + return ns, oid, nil + default: + llvrw.T.Errorf("Incorrect type provided for return value of ReadDBPointer: %T", llvrw.Return) + return "", primitive.ObjectID{}, nil + } +} + +// ReadDateTime implements the bsonrw.ValueReader interface. +func (llvrw *ValueReaderWriter) ReadDateTime() (int64, error) { + llvrw.checkdepth() + llvrw.Invoked = ReadDateTime + if llvrw.ErrAfter == llvrw.Invoked { + return 0, llvrw.Err + } + + dt, ok := llvrw.Return.(int64) + if !ok { + llvrw.T.Errorf("Incorrect type provided for return value of ReadDateTime: %T", llvrw.Return) + return 0, nil + } + + return dt, nil +} + +// ReadDecimal128 implements the bsonrw.ValueReader interface. +func (llvrw *ValueReaderWriter) ReadDecimal128() (primitive.Decimal128, error) { + llvrw.checkdepth() + llvrw.Invoked = ReadDecimal128 + if llvrw.ErrAfter == llvrw.Invoked { + return primitive.Decimal128{}, llvrw.Err + } + + d128, ok := llvrw.Return.(primitive.Decimal128) + if !ok { + llvrw.T.Errorf("Incorrect type provided for return value of ReadDecimal128: %T", llvrw.Return) + return primitive.Decimal128{}, nil + } + + return d128, nil +} + +// ReadDouble implements the bsonrw.ValueReader interface. +func (llvrw *ValueReaderWriter) ReadDouble() (float64, error) { + llvrw.checkdepth() + llvrw.Invoked = ReadDouble + if llvrw.ErrAfter == llvrw.Invoked { + return 0, llvrw.Err + } + + f64, ok := llvrw.Return.(float64) + if !ok { + llvrw.T.Errorf("Incorrect type provided for return value of ReadDouble: %T", llvrw.Return) + return 0, nil + } + + return f64, nil +} + +// ReadInt32 implements the bsonrw.ValueReader interface. +func (llvrw *ValueReaderWriter) ReadInt32() (int32, error) { + llvrw.checkdepth() + llvrw.Invoked = ReadInt32 + if llvrw.ErrAfter == llvrw.Invoked { + return 0, llvrw.Err + } + + i32, ok := llvrw.Return.(int32) + if !ok { + llvrw.T.Errorf("Incorrect type provided for return value of ReadInt32: %T", llvrw.Return) + return 0, nil + } + + return i32, nil +} + +// ReadInt64 implements the bsonrw.ValueReader interface. +func (llvrw *ValueReaderWriter) ReadInt64() (int64, error) { + llvrw.checkdepth() + llvrw.Invoked = ReadInt64 + if llvrw.ErrAfter == llvrw.Invoked { + return 0, llvrw.Err + } + i64, ok := llvrw.Return.(int64) + if !ok { + llvrw.T.Errorf("Incorrect type provided for return value of ReadInt64: %T", llvrw.Return) + return 0, nil + } + + return i64, nil +} + +// ReadJavascript implements the bsonrw.ValueReader interface. +func (llvrw *ValueReaderWriter) ReadJavascript() (code string, err error) { + llvrw.checkdepth() + llvrw.Invoked = ReadJavascript + if llvrw.ErrAfter == llvrw.Invoked { + return "", llvrw.Err + } + js, ok := llvrw.Return.(string) + if !ok { + llvrw.T.Errorf("Incorrect type provided for return value of ReadJavascript: %T", llvrw.Return) + return "", nil + } + + return js, nil +} + +// ReadMaxKey implements the bsonrw.ValueReader interface. +func (llvrw *ValueReaderWriter) ReadMaxKey() error { + llvrw.checkdepth() + llvrw.Invoked = ReadMaxKey + if llvrw.ErrAfter == llvrw.Invoked { + return llvrw.Err + } + + return nil +} + +// ReadMinKey implements the bsonrw.ValueReader interface. +func (llvrw *ValueReaderWriter) ReadMinKey() error { + llvrw.checkdepth() + llvrw.Invoked = ReadMinKey + if llvrw.ErrAfter == llvrw.Invoked { + return llvrw.Err + } + + return nil +} + +// ReadNull implements the bsonrw.ValueReader interface. +func (llvrw *ValueReaderWriter) ReadNull() error { + llvrw.checkdepth() + llvrw.Invoked = ReadNull + if llvrw.ErrAfter == llvrw.Invoked { + return llvrw.Err + } + + return nil +} + +// ReadObjectID implements the bsonrw.ValueReader interface. +func (llvrw *ValueReaderWriter) ReadObjectID() (primitive.ObjectID, error) { + llvrw.checkdepth() + llvrw.Invoked = ReadObjectID + if llvrw.ErrAfter == llvrw.Invoked { + return primitive.ObjectID{}, llvrw.Err + } + oid, ok := llvrw.Return.(primitive.ObjectID) + if !ok { + llvrw.T.Errorf("Incorrect type provided for return value of ReadObjectID: %T", llvrw.Return) + return primitive.ObjectID{}, nil + } + + return oid, nil +} + +// ReadRegex implements the bsonrw.ValueReader interface. +func (llvrw *ValueReaderWriter) ReadRegex() (pattern string, options string, err error) { + llvrw.checkdepth() + llvrw.Invoked = ReadRegex + if llvrw.ErrAfter == llvrw.Invoked { + return "", "", llvrw.Err + } + switch tt := llvrw.Return.(type) { + case bsoncore.Value: + pattern, options, _, ok := bsoncore.ReadRegex(tt.Data) + if !ok { + llvrw.T.Error("Invalid Value instance provided for ReadRegex") + return "", "", nil + } + return pattern, options, nil + default: + llvrw.T.Errorf("Incorrect type provided for return value of ReadRegex: %T", llvrw.Return) + return "", "", nil + } +} + +// ReadString implements the bsonrw.ValueReader interface. +func (llvrw *ValueReaderWriter) ReadString() (string, error) { + llvrw.checkdepth() + llvrw.Invoked = ReadString + if llvrw.ErrAfter == llvrw.Invoked { + return "", llvrw.Err + } + str, ok := llvrw.Return.(string) + if !ok { + llvrw.T.Errorf("Incorrect type provided for return value of ReadString: %T", llvrw.Return) + return "", nil + } + + return str, nil +} + +// ReadSymbol implements the bsonrw.ValueReader interface. +func (llvrw *ValueReaderWriter) ReadSymbol() (symbol string, err error) { + llvrw.checkdepth() + llvrw.Invoked = ReadSymbol + if llvrw.ErrAfter == llvrw.Invoked { + return "", llvrw.Err + } + switch tt := llvrw.Return.(type) { + case string: + return tt, nil + case bsoncore.Value: + symbol, _, ok := bsoncore.ReadSymbol(tt.Data) + if !ok { + llvrw.T.Error("Invalid Value instance provided for ReadSymbol") + return "", nil + } + return symbol, nil + default: + llvrw.T.Errorf("Incorrect type provided for return value of ReadSymbol: %T", llvrw.Return) + return "", nil + } +} + +// ReadTimestamp implements the bsonrw.ValueReader interface. +func (llvrw *ValueReaderWriter) ReadTimestamp() (t uint32, i uint32, err error) { + llvrw.checkdepth() + llvrw.Invoked = ReadTimestamp + if llvrw.ErrAfter == llvrw.Invoked { + return 0, 0, llvrw.Err + } + switch tt := llvrw.Return.(type) { + case bsoncore.Value: + t, i, _, ok := bsoncore.ReadTimestamp(tt.Data) + if !ok { + llvrw.T.Errorf("Invalid Value instance provided for return value of ReadTimestamp") + return 0, 0, nil + } + return t, i, nil + default: + llvrw.T.Errorf("Incorrect type provided for return value of ReadTimestamp: %T", llvrw.Return) + return 0, 0, nil + } +} + +// ReadUndefined implements the bsonrw.ValueReader interface. +func (llvrw *ValueReaderWriter) ReadUndefined() error { + llvrw.checkdepth() + llvrw.Invoked = ReadUndefined + if llvrw.ErrAfter == llvrw.Invoked { + return llvrw.Err + } + + return nil +} + +// WriteArray implements the bsonrw.ValueWriter interface. +func (llvrw *ValueReaderWriter) WriteArray() (bsonrw.ArrayWriter, error) { + llvrw.checkdepth() + llvrw.Invoked = WriteArray + if llvrw.ErrAfter == llvrw.Invoked { + return nil, llvrw.Err + } + return llvrw, nil +} + +// WriteBinary implements the bsonrw.ValueWriter interface. +func (llvrw *ValueReaderWriter) WriteBinary(b []byte) error { + llvrw.checkdepth() + llvrw.Invoked = WriteBinary + if llvrw.ErrAfter == llvrw.Invoked { + return llvrw.Err + } + return nil +} + +// WriteBinaryWithSubtype implements the bsonrw.ValueWriter interface. +func (llvrw *ValueReaderWriter) WriteBinaryWithSubtype(b []byte, btype byte) error { + llvrw.checkdepth() + llvrw.Invoked = WriteBinaryWithSubtype + if llvrw.ErrAfter == llvrw.Invoked { + return llvrw.Err + } + return nil +} + +// WriteBoolean implements the bsonrw.ValueWriter interface. +func (llvrw *ValueReaderWriter) WriteBoolean(bool) error { + llvrw.checkdepth() + llvrw.Invoked = WriteBoolean + if llvrw.ErrAfter == llvrw.Invoked { + return llvrw.Err + } + return nil +} + +// WriteCodeWithScope implements the bsonrw.ValueWriter interface. +func (llvrw *ValueReaderWriter) WriteCodeWithScope(code string) (bsonrw.DocumentWriter, error) { + llvrw.checkdepth() + llvrw.Invoked = WriteCodeWithScope + if llvrw.ErrAfter == llvrw.Invoked { + return nil, llvrw.Err + } + return llvrw, nil +} + +// WriteDBPointer implements the bsonrw.ValueWriter interface. +func (llvrw *ValueReaderWriter) WriteDBPointer(ns string, oid primitive.ObjectID) error { + llvrw.checkdepth() + llvrw.Invoked = WriteDBPointer + if llvrw.ErrAfter == llvrw.Invoked { + return llvrw.Err + } + return nil +} + +// WriteDateTime implements the bsonrw.ValueWriter interface. +func (llvrw *ValueReaderWriter) WriteDateTime(dt int64) error { + llvrw.checkdepth() + llvrw.Invoked = WriteDateTime + if llvrw.ErrAfter == llvrw.Invoked { + return llvrw.Err + } + return nil +} + +// WriteDecimal128 implements the bsonrw.ValueWriter interface. +func (llvrw *ValueReaderWriter) WriteDecimal128(primitive.Decimal128) error { + llvrw.checkdepth() + llvrw.Invoked = WriteDecimal128 + if llvrw.ErrAfter == llvrw.Invoked { + return llvrw.Err + } + return nil +} + +// WriteDouble implements the bsonrw.ValueWriter interface. +func (llvrw *ValueReaderWriter) WriteDouble(float64) error { + llvrw.checkdepth() + llvrw.Invoked = WriteDouble + if llvrw.ErrAfter == llvrw.Invoked { + return llvrw.Err + } + return nil +} + +// WriteInt32 implements the bsonrw.ValueWriter interface. +func (llvrw *ValueReaderWriter) WriteInt32(int32) error { + llvrw.checkdepth() + llvrw.Invoked = WriteInt32 + if llvrw.ErrAfter == llvrw.Invoked { + return llvrw.Err + } + return nil +} + +// WriteInt64 implements the bsonrw.ValueWriter interface. +func (llvrw *ValueReaderWriter) WriteInt64(int64) error { + llvrw.checkdepth() + llvrw.Invoked = WriteInt64 + if llvrw.ErrAfter == llvrw.Invoked { + return llvrw.Err + } + return nil +} + +// WriteJavascript implements the bsonrw.ValueWriter interface. +func (llvrw *ValueReaderWriter) WriteJavascript(code string) error { + llvrw.checkdepth() + llvrw.Invoked = WriteJavascript + if llvrw.ErrAfter == llvrw.Invoked { + return llvrw.Err + } + return nil +} + +// WriteMaxKey implements the bsonrw.ValueWriter interface. +func (llvrw *ValueReaderWriter) WriteMaxKey() error { + llvrw.checkdepth() + llvrw.Invoked = WriteMaxKey + if llvrw.ErrAfter == llvrw.Invoked { + return llvrw.Err + } + return nil +} + +// WriteMinKey implements the bsonrw.ValueWriter interface. +func (llvrw *ValueReaderWriter) WriteMinKey() error { + llvrw.checkdepth() + llvrw.Invoked = WriteMinKey + if llvrw.ErrAfter == llvrw.Invoked { + return llvrw.Err + } + return nil +} + +// WriteNull implements the bsonrw.ValueWriter interface. +func (llvrw *ValueReaderWriter) WriteNull() error { + llvrw.checkdepth() + llvrw.Invoked = WriteNull + if llvrw.ErrAfter == llvrw.Invoked { + return llvrw.Err + } + return nil +} + +// WriteObjectID implements the bsonrw.ValueWriter interface. +func (llvrw *ValueReaderWriter) WriteObjectID(primitive.ObjectID) error { + llvrw.checkdepth() + llvrw.Invoked = WriteObjectID + if llvrw.ErrAfter == llvrw.Invoked { + return llvrw.Err + } + return nil +} + +// WriteRegex implements the bsonrw.ValueWriter interface. +func (llvrw *ValueReaderWriter) WriteRegex(pattern string, options string) error { + llvrw.checkdepth() + llvrw.Invoked = WriteRegex + if llvrw.ErrAfter == llvrw.Invoked { + return llvrw.Err + } + return nil +} + +// WriteString implements the bsonrw.ValueWriter interface. +func (llvrw *ValueReaderWriter) WriteString(string) error { + llvrw.checkdepth() + llvrw.Invoked = WriteString + if llvrw.ErrAfter == llvrw.Invoked { + return llvrw.Err + } + return nil +} + +// WriteDocument implements the bsonrw.ValueWriter interface. +func (llvrw *ValueReaderWriter) WriteDocument() (bsonrw.DocumentWriter, error) { + llvrw.checkdepth() + llvrw.Invoked = WriteDocument + if llvrw.ErrAfter == llvrw.Invoked { + return nil, llvrw.Err + } + return llvrw, nil +} + +// WriteSymbol implements the bsonrw.ValueWriter interface. +func (llvrw *ValueReaderWriter) WriteSymbol(symbol string) error { + llvrw.checkdepth() + llvrw.Invoked = WriteSymbol + if llvrw.ErrAfter == llvrw.Invoked { + return llvrw.Err + } + return nil +} + +// WriteTimestamp implements the bsonrw.ValueWriter interface. +func (llvrw *ValueReaderWriter) WriteTimestamp(t uint32, i uint32) error { + llvrw.checkdepth() + llvrw.Invoked = WriteTimestamp + if llvrw.ErrAfter == llvrw.Invoked { + return llvrw.Err + } + return nil +} + +// WriteUndefined implements the bsonrw.ValueWriter interface. +func (llvrw *ValueReaderWriter) WriteUndefined() error { + llvrw.checkdepth() + llvrw.Invoked = WriteUndefined + if llvrw.ErrAfter == llvrw.Invoked { + return llvrw.Err + } + return nil +} + +// ReadElement implements the bsonrw.DocumentReader interface. +func (llvrw *ValueReaderWriter) ReadElement() (string, bsonrw.ValueReader, error) { + llvrw.checkdepth() + llvrw.Invoked = ReadElement + if llvrw.ErrAfter == llvrw.Invoked { + return "", nil, llvrw.Err + } + + return "", llvrw, nil +} + +// WriteDocumentElement implements the bsonrw.DocumentWriter interface. +func (llvrw *ValueReaderWriter) WriteDocumentElement(string) (bsonrw.ValueWriter, error) { + llvrw.checkdepth() + llvrw.Invoked = WriteDocumentElement + if llvrw.ErrAfter == llvrw.Invoked { + return nil, llvrw.Err + } + + return llvrw, nil +} + +// WriteDocumentEnd implements the bsonrw.DocumentWriter interface. +func (llvrw *ValueReaderWriter) WriteDocumentEnd() error { + llvrw.checkdepth() + llvrw.Invoked = WriteDocumentEnd + if llvrw.ErrAfter == llvrw.Invoked { + return llvrw.Err + } + + return nil +} + +// ReadValue implements the bsonrw.ArrayReader interface. +func (llvrw *ValueReaderWriter) ReadValue() (bsonrw.ValueReader, error) { + llvrw.checkdepth() + llvrw.Invoked = ReadValue + if llvrw.ErrAfter == llvrw.Invoked { + return nil, llvrw.Err + } + + return llvrw, nil +} + +// WriteArrayElement implements the bsonrw.ArrayWriter interface. +func (llvrw *ValueReaderWriter) WriteArrayElement() (bsonrw.ValueWriter, error) { + llvrw.checkdepth() + llvrw.Invoked = WriteArrayElement + if llvrw.ErrAfter == llvrw.Invoked { + return nil, llvrw.Err + } + + return llvrw, nil +} + +// WriteArrayEnd implements the bsonrw.ArrayWriter interface. +func (llvrw *ValueReaderWriter) WriteArrayEnd() error { + llvrw.checkdepth() + llvrw.Invoked = WriteArrayEnd + if llvrw.ErrAfter == llvrw.Invoked { + return llvrw.Err + } + + return nil +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go new file mode 100644 index 0000000..02e3a7e --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go @@ -0,0 +1,389 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsonrw + +import ( + "fmt" + "io" + + "go.mongodb.org/mongo-driver/bson/bsontype" + "go.mongodb.org/mongo-driver/bson/primitive" + "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" +) + +// Copier is a type that allows copying between ValueReaders, ValueWriters, and +// []byte values. +type Copier struct{} + +// NewCopier creates a new copier with the given registry. If a nil registry is provided +// a default registry is used. +func NewCopier() Copier { + return Copier{} +} + +// CopyDocument handles copying a document from src to dst. +func CopyDocument(dst ValueWriter, src ValueReader) error { + return Copier{}.CopyDocument(dst, src) +} + +// CopyDocument handles copying one document from the src to the dst. +func (c Copier) CopyDocument(dst ValueWriter, src ValueReader) error { + dr, err := src.ReadDocument() + if err != nil { + return err + } + + dw, err := dst.WriteDocument() + if err != nil { + return err + } + + return c.copyDocumentCore(dw, dr) +} + +// CopyDocumentFromBytes copies the values from a BSON document represented as a +// []byte to a ValueWriter. +func (c Copier) CopyDocumentFromBytes(dst ValueWriter, src []byte) error { + dw, err := dst.WriteDocument() + if err != nil { + return err + } + + err = c.CopyBytesToDocumentWriter(dw, src) + if err != nil { + return err + } + + return dw.WriteDocumentEnd() +} + +// CopyBytesToDocumentWriter copies the values from a BSON document represented as a []byte to a +// DocumentWriter. +func (c Copier) CopyBytesToDocumentWriter(dst DocumentWriter, src []byte) error { + // TODO(skriptble): Create errors types here. Anything thats a tag should be a property. + length, rem, ok := bsoncore.ReadLength(src) + if !ok { + return fmt.Errorf("couldn't read length from src, not enough bytes. length=%d", len(src)) + } + if len(src) < int(length) { + return fmt.Errorf("length read exceeds number of bytes available. length=%d bytes=%d", len(src), length) + } + rem = rem[:length-4] + + var t bsontype.Type + var key string + var val bsoncore.Value + for { + t, rem, ok = bsoncore.ReadType(rem) + if !ok { + return io.EOF + } + if t == bsontype.Type(0) { + if len(rem) != 0 { + return fmt.Errorf("document end byte found before end of document. remaining bytes=%v", rem) + } + break + } + + key, rem, ok = bsoncore.ReadKey(rem) + if !ok { + return fmt.Errorf("invalid key found. remaining bytes=%v", rem) + } + dvw, err := dst.WriteDocumentElement(key) + if err != nil { + return err + } + val, rem, ok = bsoncore.ReadValue(rem, t) + if !ok { + return fmt.Errorf("not enough bytes available to read type. bytes=%d type=%s", len(rem), t) + } + err = c.CopyValueFromBytes(dvw, t, val.Data) + if err != nil { + return err + } + } + return nil +} + +// CopyDocumentToBytes copies an entire document from the ValueReader and +// returns it as bytes. +func (c Copier) CopyDocumentToBytes(src ValueReader) ([]byte, error) { + return c.AppendDocumentBytes(nil, src) +} + +// AppendDocumentBytes functions the same as CopyDocumentToBytes, but will +// append the result to dst. +func (c Copier) AppendDocumentBytes(dst []byte, src ValueReader) ([]byte, error) { + if br, ok := src.(BytesReader); ok { + _, dst, err := br.ReadValueBytes(dst) + return dst, err + } + + vw := vwPool.Get().(*valueWriter) + defer vwPool.Put(vw) + + vw.reset(dst) + + err := c.CopyDocument(vw, src) + dst = vw.buf + return dst, err +} + +// CopyValueFromBytes will write the value represtend by t and src to dst. +func (c Copier) CopyValueFromBytes(dst ValueWriter, t bsontype.Type, src []byte) error { + if wvb, ok := dst.(BytesWriter); ok { + return wvb.WriteValueBytes(t, src) + } + + vr := vrPool.Get().(*valueReader) + defer vrPool.Put(vr) + + vr.reset(src) + vr.pushElement(t) + + return c.CopyValue(dst, vr) +} + +// CopyValueToBytes copies a value from src and returns it as a bsontype.Type and a +// []byte. +func (c Copier) CopyValueToBytes(src ValueReader) (bsontype.Type, []byte, error) { + return c.AppendValueBytes(nil, src) +} + +// AppendValueBytes functions the same as CopyValueToBytes, but will append the +// result to dst. +func (c Copier) AppendValueBytes(dst []byte, src ValueReader) (bsontype.Type, []byte, error) { + if br, ok := src.(BytesReader); ok { + return br.ReadValueBytes(dst) + } + + vw := vwPool.Get().(*valueWriter) + defer vwPool.Put(vw) + + start := len(dst) + + vw.reset(dst) + vw.push(mElement) + + err := c.CopyValue(vw, src) + if err != nil { + return 0, dst, err + } + + return bsontype.Type(vw.buf[start]), vw.buf[start+2:], nil +} + +// CopyValue will copy a single value from src to dst. +func (c Copier) CopyValue(dst ValueWriter, src ValueReader) error { + var err error + switch src.Type() { + case bsontype.Double: + var f64 float64 + f64, err = src.ReadDouble() + if err != nil { + break + } + err = dst.WriteDouble(f64) + case bsontype.String: + var str string + str, err = src.ReadString() + if err != nil { + return err + } + err = dst.WriteString(str) + case bsontype.EmbeddedDocument: + err = c.CopyDocument(dst, src) + case bsontype.Array: + err = c.copyArray(dst, src) + case bsontype.Binary: + var data []byte + var subtype byte + data, subtype, err = src.ReadBinary() + if err != nil { + break + } + err = dst.WriteBinaryWithSubtype(data, subtype) + case bsontype.Undefined: + err = src.ReadUndefined() + if err != nil { + break + } + err = dst.WriteUndefined() + case bsontype.ObjectID: + var oid primitive.ObjectID + oid, err = src.ReadObjectID() + if err != nil { + break + } + err = dst.WriteObjectID(oid) + case bsontype.Boolean: + var b bool + b, err = src.ReadBoolean() + if err != nil { + break + } + err = dst.WriteBoolean(b) + case bsontype.DateTime: + var dt int64 + dt, err = src.ReadDateTime() + if err != nil { + break + } + err = dst.WriteDateTime(dt) + case bsontype.Null: + err = src.ReadNull() + if err != nil { + break + } + err = dst.WriteNull() + case bsontype.Regex: + var pattern, options string + pattern, options, err = src.ReadRegex() + if err != nil { + break + } + err = dst.WriteRegex(pattern, options) + case bsontype.DBPointer: + var ns string + var pointer primitive.ObjectID + ns, pointer, err = src.ReadDBPointer() + if err != nil { + break + } + err = dst.WriteDBPointer(ns, pointer) + case bsontype.JavaScript: + var js string + js, err = src.ReadJavascript() + if err != nil { + break + } + err = dst.WriteJavascript(js) + case bsontype.Symbol: + var symbol string + symbol, err = src.ReadSymbol() + if err != nil { + break + } + err = dst.WriteSymbol(symbol) + case bsontype.CodeWithScope: + var code string + var srcScope DocumentReader + code, srcScope, err = src.ReadCodeWithScope() + if err != nil { + break + } + + var dstScope DocumentWriter + dstScope, err = dst.WriteCodeWithScope(code) + if err != nil { + break + } + err = c.copyDocumentCore(dstScope, srcScope) + case bsontype.Int32: + var i32 int32 + i32, err = src.ReadInt32() + if err != nil { + break + } + err = dst.WriteInt32(i32) + case bsontype.Timestamp: + var t, i uint32 + t, i, err = src.ReadTimestamp() + if err != nil { + break + } + err = dst.WriteTimestamp(t, i) + case bsontype.Int64: + var i64 int64 + i64, err = src.ReadInt64() + if err != nil { + break + } + err = dst.WriteInt64(i64) + case bsontype.Decimal128: + var d128 primitive.Decimal128 + d128, err = src.ReadDecimal128() + if err != nil { + break + } + err = dst.WriteDecimal128(d128) + case bsontype.MinKey: + err = src.ReadMinKey() + if err != nil { + break + } + err = dst.WriteMinKey() + case bsontype.MaxKey: + err = src.ReadMaxKey() + if err != nil { + break + } + err = dst.WriteMaxKey() + default: + err = fmt.Errorf("Cannot copy unknown BSON type %s", src.Type()) + } + + return err +} + +func (c Copier) copyArray(dst ValueWriter, src ValueReader) error { + ar, err := src.ReadArray() + if err != nil { + return err + } + + aw, err := dst.WriteArray() + if err != nil { + return err + } + + for { + vr, err := ar.ReadValue() + if err == ErrEOA { + break + } + if err != nil { + return err + } + + vw, err := aw.WriteArrayElement() + if err != nil { + return err + } + + err = c.CopyValue(vw, vr) + if err != nil { + return err + } + } + + return aw.WriteArrayEnd() +} + +func (c Copier) copyDocumentCore(dw DocumentWriter, dr DocumentReader) error { + for { + key, vr, err := dr.ReadElement() + if err == ErrEOD { + break + } + if err != nil { + return err + } + + vw, err := dw.WriteDocumentElement(key) + if err != nil { + return err + } + + err = c.CopyValue(vw, vr) + if err != nil { + return err + } + } + + return dw.WriteDocumentEnd() +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier_test.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier_test.go new file mode 100644 index 0000000..84e0a80 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier_test.go @@ -0,0 +1,529 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsonrw + +import ( + "bytes" + "errors" + "fmt" + "testing" + + "go.mongodb.org/mongo-driver/bson/bsontype" + "go.mongodb.org/mongo-driver/bson/primitive" + "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" +) + +func TestCopier(t *testing.T) { + t.Run("CopyDocument", func(t *testing.T) { + t.Run("ReadDocument Error", func(t *testing.T) { + want := errors.New("ReadDocumentError") + src := &TestValueReaderWriter{t: t, err: want, errAfter: llvrwReadDocument} + got := Copier{}.CopyDocument(nil, src) + if !compareErrors(got, want) { + t.Errorf("Did not receive correct error. got %v; want %v", got, want) + } + }) + t.Run("WriteDocument Error", func(t *testing.T) { + want := errors.New("WriteDocumentError") + src := &TestValueReaderWriter{} + dst := &TestValueReaderWriter{t: t, err: want, errAfter: llvrwWriteDocument} + got := Copier{}.CopyDocument(dst, src) + if !compareErrors(got, want) { + t.Errorf("Did not receive correct error. got %v; want %v", got, want) + } + }) + t.Run("success", func(t *testing.T) { + idx, doc := bsoncore.AppendDocumentStart(nil) + doc = bsoncore.AppendStringElement(doc, "Hello", "world") + doc, err := bsoncore.AppendDocumentEnd(doc, idx) + noerr(t, err) + src := newValueReader(doc) + dst := newValueWriterFromSlice(make([]byte, 0)) + want := doc + err = Copier{}.CopyDocument(dst, src) + noerr(t, err) + got := dst.buf + if !bytes.Equal(got, want) { + t.Errorf("Bytes are not equal. got %v; want %v", got, want) + } + }) + }) + t.Run("copyArray", func(t *testing.T) { + t.Run("ReadArray Error", func(t *testing.T) { + want := errors.New("ReadArrayError") + src := &TestValueReaderWriter{t: t, err: want, errAfter: llvrwReadArray} + got := Copier{}.copyArray(nil, src) + if !compareErrors(got, want) { + t.Errorf("Did not receive correct error. got %v; want %v", got, want) + } + }) + t.Run("WriteArray Error", func(t *testing.T) { + want := errors.New("WriteArrayError") + src := &TestValueReaderWriter{} + dst := &TestValueReaderWriter{t: t, err: want, errAfter: llvrwWriteArray} + got := Copier{}.copyArray(dst, src) + if !compareErrors(got, want) { + t.Errorf("Did not receive correct error. got %v; want %v", got, want) + } + }) + t.Run("success", func(t *testing.T) { + idx, doc := bsoncore.AppendDocumentStart(nil) + aidx, doc := bsoncore.AppendArrayElementStart(doc, "foo") + doc = bsoncore.AppendStringElement(doc, "0", "Hello, world!") + doc, err := bsoncore.AppendArrayEnd(doc, aidx) + noerr(t, err) + doc, err = bsoncore.AppendDocumentEnd(doc, idx) + noerr(t, err) + src := newValueReader(doc) + + _, err = src.ReadDocument() + noerr(t, err) + _, _, err = src.ReadElement() + noerr(t, err) + + dst := newValueWriterFromSlice(make([]byte, 0)) + _, err = dst.WriteDocument() + noerr(t, err) + _, err = dst.WriteDocumentElement("foo") + noerr(t, err) + want := doc + + err = Copier{}.copyArray(dst, src) + noerr(t, err) + + err = dst.WriteDocumentEnd() + noerr(t, err) + + got := dst.buf + if !bytes.Equal(got, want) { + t.Errorf("Bytes are not equal. got %v; want %v", got, want) + } + }) + }) + t.Run("CopyValue", func(t *testing.T) { + testCases := []struct { + name string + dst *TestValueReaderWriter + src *TestValueReaderWriter + err error + }{ + { + "Double/src/error", + &TestValueReaderWriter{}, + &TestValueReaderWriter{bsontype: bsontype.Double, err: errors.New("1"), errAfter: llvrwReadDouble}, + errors.New("1"), + }, + { + "Double/dst/error", + &TestValueReaderWriter{bsontype: bsontype.Double, err: errors.New("2"), errAfter: llvrwWriteDouble}, + &TestValueReaderWriter{bsontype: bsontype.Double, readval: float64(3.14159)}, + errors.New("2"), + }, + { + "String/src/error", + &TestValueReaderWriter{}, + &TestValueReaderWriter{bsontype: bsontype.String, err: errors.New("1"), errAfter: llvrwReadString}, + errors.New("1"), + }, + { + "String/dst/error", + &TestValueReaderWriter{bsontype: bsontype.String, err: errors.New("2"), errAfter: llvrwWriteString}, + &TestValueReaderWriter{bsontype: bsontype.String, readval: string("hello, world")}, + errors.New("2"), + }, + { + "Document/src/error", + &TestValueReaderWriter{}, + &TestValueReaderWriter{bsontype: bsontype.EmbeddedDocument, err: errors.New("1"), errAfter: llvrwReadDocument}, + errors.New("1"), + }, + { + "Array/dst/error", + &TestValueReaderWriter{}, + &TestValueReaderWriter{bsontype: bsontype.Array, err: errors.New("2"), errAfter: llvrwReadArray}, + errors.New("2"), + }, + { + "Binary/src/error", + &TestValueReaderWriter{}, + &TestValueReaderWriter{bsontype: bsontype.Binary, err: errors.New("1"), errAfter: llvrwReadBinary}, + errors.New("1"), + }, + { + "Binary/dst/error", + &TestValueReaderWriter{bsontype: bsontype.Binary, err: errors.New("2"), errAfter: llvrwWriteBinaryWithSubtype}, + &TestValueReaderWriter{ + bsontype: bsontype.Binary, + readval: bsoncore.Value{ + Type: bsontype.Binary, + Data: []byte{0x03, 0x00, 0x00, 0x00, 0xFF, 0x01, 0x02, 0x03}, + }, + }, + errors.New("2"), + }, + { + "Undefined/src/error", + &TestValueReaderWriter{}, + &TestValueReaderWriter{bsontype: bsontype.Undefined, err: errors.New("1"), errAfter: llvrwReadUndefined}, + errors.New("1"), + }, + { + "Undefined/dst/error", + &TestValueReaderWriter{bsontype: bsontype.Undefined, err: errors.New("2"), errAfter: llvrwWriteUndefined}, + &TestValueReaderWriter{bsontype: bsontype.Undefined}, + errors.New("2"), + }, + { + "ObjectID/src/error", + &TestValueReaderWriter{}, + &TestValueReaderWriter{bsontype: bsontype.ObjectID, err: errors.New("1"), errAfter: llvrwReadObjectID}, + errors.New("1"), + }, + { + "ObjectID/dst/error", + &TestValueReaderWriter{bsontype: bsontype.ObjectID, err: errors.New("2"), errAfter: llvrwWriteObjectID}, + &TestValueReaderWriter{bsontype: bsontype.ObjectID, readval: primitive.ObjectID{0x01, 0x02, 0x03}}, + errors.New("2"), + }, + { + "Boolean/src/error", + &TestValueReaderWriter{}, + &TestValueReaderWriter{bsontype: bsontype.Boolean, err: errors.New("1"), errAfter: llvrwReadBoolean}, + errors.New("1"), + }, + { + "Boolean/dst/error", + &TestValueReaderWriter{bsontype: bsontype.Boolean, err: errors.New("2"), errAfter: llvrwWriteBoolean}, + &TestValueReaderWriter{bsontype: bsontype.Boolean, readval: bool(true)}, + errors.New("2"), + }, + { + "DateTime/src/error", + &TestValueReaderWriter{}, + &TestValueReaderWriter{bsontype: bsontype.DateTime, err: errors.New("1"), errAfter: llvrwReadDateTime}, + errors.New("1"), + }, + { + "DateTime/dst/error", + &TestValueReaderWriter{bsontype: bsontype.DateTime, err: errors.New("2"), errAfter: llvrwWriteDateTime}, + &TestValueReaderWriter{bsontype: bsontype.DateTime, readval: int64(1234567890)}, + errors.New("2"), + }, + { + "Null/src/error", + &TestValueReaderWriter{}, + &TestValueReaderWriter{bsontype: bsontype.Null, err: errors.New("1"), errAfter: llvrwReadNull}, + errors.New("1"), + }, + { + "Null/dst/error", + &TestValueReaderWriter{bsontype: bsontype.Null, err: errors.New("2"), errAfter: llvrwWriteNull}, + &TestValueReaderWriter{bsontype: bsontype.Null}, + errors.New("2"), + }, + { + "Regex/src/error", + &TestValueReaderWriter{}, + &TestValueReaderWriter{bsontype: bsontype.Regex, err: errors.New("1"), errAfter: llvrwReadRegex}, + errors.New("1"), + }, + { + "Regex/dst/error", + &TestValueReaderWriter{bsontype: bsontype.Regex, err: errors.New("2"), errAfter: llvrwWriteRegex}, + &TestValueReaderWriter{ + bsontype: bsontype.Regex, + readval: bsoncore.Value{ + Type: bsontype.Regex, + Data: bsoncore.AppendRegex(nil, "hello", "world"), + }, + }, + errors.New("2"), + }, + { + "DBPointer/src/error", + &TestValueReaderWriter{}, + &TestValueReaderWriter{bsontype: bsontype.DBPointer, err: errors.New("1"), errAfter: llvrwReadDBPointer}, + errors.New("1"), + }, + { + "DBPointer/dst/error", + &TestValueReaderWriter{bsontype: bsontype.DBPointer, err: errors.New("2"), errAfter: llvrwWriteDBPointer}, + &TestValueReaderWriter{ + bsontype: bsontype.DBPointer, + readval: bsoncore.Value{ + Type: bsontype.DBPointer, + Data: bsoncore.AppendDBPointer(nil, "foo", primitive.ObjectID{0x01, 0x02, 0x03}), + }, + }, + errors.New("2"), + }, + { + "Javascript/src/error", + &TestValueReaderWriter{}, + &TestValueReaderWriter{bsontype: bsontype.JavaScript, err: errors.New("1"), errAfter: llvrwReadJavascript}, + errors.New("1"), + }, + { + "Javascript/dst/error", + &TestValueReaderWriter{bsontype: bsontype.JavaScript, err: errors.New("2"), errAfter: llvrwWriteJavascript}, + &TestValueReaderWriter{bsontype: bsontype.JavaScript, readval: string("hello, world")}, + errors.New("2"), + }, + { + "Symbol/src/error", + &TestValueReaderWriter{}, + &TestValueReaderWriter{bsontype: bsontype.Symbol, err: errors.New("1"), errAfter: llvrwReadSymbol}, + errors.New("1"), + }, + { + "Symbol/dst/error", + &TestValueReaderWriter{bsontype: bsontype.Symbol, err: errors.New("2"), errAfter: llvrwWriteSymbol}, + &TestValueReaderWriter{ + bsontype: bsontype.Symbol, + readval: bsoncore.Value{ + Type: bsontype.Symbol, + Data: bsoncore.AppendSymbol(nil, "hello, world"), + }, + }, + errors.New("2"), + }, + { + "CodeWithScope/src/error", + &TestValueReaderWriter{}, + &TestValueReaderWriter{bsontype: bsontype.CodeWithScope, err: errors.New("1"), errAfter: llvrwReadCodeWithScope}, + errors.New("1"), + }, + { + "CodeWithScope/dst/error", + &TestValueReaderWriter{bsontype: bsontype.CodeWithScope, err: errors.New("2"), errAfter: llvrwWriteCodeWithScope}, + &TestValueReaderWriter{bsontype: bsontype.CodeWithScope}, + errors.New("2"), + }, + { + "CodeWithScope/dst/copyDocumentCore error", + &TestValueReaderWriter{err: errors.New("3"), errAfter: llvrwWriteDocumentElement}, + &TestValueReaderWriter{bsontype: bsontype.CodeWithScope}, + errors.New("3"), + }, + { + "Int32/src/error", + &TestValueReaderWriter{}, + &TestValueReaderWriter{bsontype: bsontype.Int32, err: errors.New("1"), errAfter: llvrwReadInt32}, + errors.New("1"), + }, + { + "Int32/dst/error", + &TestValueReaderWriter{bsontype: bsontype.Int32, err: errors.New("2"), errAfter: llvrwWriteInt32}, + &TestValueReaderWriter{bsontype: bsontype.Int32, readval: int32(12345)}, + errors.New("2"), + }, + { + "Timestamp/src/error", + &TestValueReaderWriter{}, + &TestValueReaderWriter{bsontype: bsontype.Timestamp, err: errors.New("1"), errAfter: llvrwReadTimestamp}, + errors.New("1"), + }, + { + "Timestamp/dst/error", + &TestValueReaderWriter{bsontype: bsontype.Timestamp, err: errors.New("2"), errAfter: llvrwWriteTimestamp}, + &TestValueReaderWriter{ + bsontype: bsontype.Timestamp, + readval: bsoncore.Value{ + Type: bsontype.Timestamp, + Data: bsoncore.AppendTimestamp(nil, 12345, 67890), + }, + }, + errors.New("2"), + }, + { + "Int64/src/error", + &TestValueReaderWriter{}, + &TestValueReaderWriter{bsontype: bsontype.Int64, err: errors.New("1"), errAfter: llvrwReadInt64}, + errors.New("1"), + }, + { + "Int64/dst/error", + &TestValueReaderWriter{bsontype: bsontype.Int64, err: errors.New("2"), errAfter: llvrwWriteInt64}, + &TestValueReaderWriter{bsontype: bsontype.Int64, readval: int64(1234567890)}, + errors.New("2"), + }, + { + "Decimal128/src/error", + &TestValueReaderWriter{}, + &TestValueReaderWriter{bsontype: bsontype.Decimal128, err: errors.New("1"), errAfter: llvrwReadDecimal128}, + errors.New("1"), + }, + { + "Decimal128/dst/error", + &TestValueReaderWriter{bsontype: bsontype.Decimal128, err: errors.New("2"), errAfter: llvrwWriteDecimal128}, + &TestValueReaderWriter{bsontype: bsontype.Decimal128, readval: primitive.NewDecimal128(12345, 67890)}, + errors.New("2"), + }, + { + "MinKey/src/error", + &TestValueReaderWriter{}, + &TestValueReaderWriter{bsontype: bsontype.MinKey, err: errors.New("1"), errAfter: llvrwReadMinKey}, + errors.New("1"), + }, + { + "MinKey/dst/error", + &TestValueReaderWriter{bsontype: bsontype.MinKey, err: errors.New("2"), errAfter: llvrwWriteMinKey}, + &TestValueReaderWriter{bsontype: bsontype.MinKey}, + errors.New("2"), + }, + { + "MaxKey/src/error", + &TestValueReaderWriter{}, + &TestValueReaderWriter{bsontype: bsontype.MaxKey, err: errors.New("1"), errAfter: llvrwReadMaxKey}, + errors.New("1"), + }, + { + "MaxKey/dst/error", + &TestValueReaderWriter{bsontype: bsontype.MaxKey, err: errors.New("2"), errAfter: llvrwWriteMaxKey}, + &TestValueReaderWriter{bsontype: bsontype.MaxKey}, + errors.New("2"), + }, + { + "Unknown BSON type error", + &TestValueReaderWriter{}, + &TestValueReaderWriter{}, + fmt.Errorf("Cannot copy unknown BSON type %s", bsontype.Type(0)), + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + tc.dst.t, tc.src.t = t, t + err := Copier{}.CopyValue(tc.dst, tc.src) + if !compareErrors(err, tc.err) { + t.Errorf("Did not receive expected error. got %v; want %v", err, tc.err) + } + }) + } + }) + t.Run("CopyValueFromBytes", func(t *testing.T) { + t.Run("BytesWriter", func(t *testing.T) { + vw := newValueWriterFromSlice(make([]byte, 0)) + _, err := vw.WriteDocument() + noerr(t, err) + _, err = vw.WriteDocumentElement("foo") + noerr(t, err) + err = Copier{}.CopyValueFromBytes(vw, bsontype.String, bsoncore.AppendString(nil, "bar")) + noerr(t, err) + err = vw.WriteDocumentEnd() + noerr(t, err) + var idx int32 + want, err := bsoncore.AppendDocumentEnd( + bsoncore.AppendStringElement( + bsoncore.AppendDocumentStartInline(nil, &idx), + "foo", "bar", + ), + idx, + ) + noerr(t, err) + got := vw.buf + if !bytes.Equal(got, want) { + t.Errorf("Bytes are not equal. got %v; want %v", got, want) + } + }) + t.Run("Non BytesWriter", func(t *testing.T) { + llvrw := &TestValueReaderWriter{t: t} + err := Copier{}.CopyValueFromBytes(llvrw, bsontype.String, bsoncore.AppendString(nil, "bar")) + noerr(t, err) + got, want := llvrw.invoked, llvrwWriteString + if got != want { + t.Errorf("Incorrect method invoked on llvrw. got %v; want %v", got, want) + } + }) + }) + t.Run("CopyValueToBytes", func(t *testing.T) { + t.Run("BytesReader", func(t *testing.T) { + var idx int32 + b, err := bsoncore.AppendDocumentEnd( + bsoncore.AppendStringElement( + bsoncore.AppendDocumentStartInline(nil, &idx), + "hello", "world", + ), + idx, + ) + noerr(t, err) + vr := newValueReader(b) + _, err = vr.ReadDocument() + noerr(t, err) + _, _, err = vr.ReadElement() + noerr(t, err) + btype, got, err := Copier{}.CopyValueToBytes(vr) + noerr(t, err) + want := bsoncore.AppendString(nil, "world") + if btype != bsontype.String { + t.Errorf("Incorrect type returned. got %v; want %v", btype, bsontype.String) + } + if !bytes.Equal(got, want) { + t.Errorf("Bytes do not match. got %v; want %v", got, want) + } + }) + t.Run("Non BytesReader", func(t *testing.T) { + llvrw := &TestValueReaderWriter{t: t, bsontype: bsontype.String, readval: string("Hello, world!")} + btype, got, err := Copier{}.CopyValueToBytes(llvrw) + noerr(t, err) + want := bsoncore.AppendString(nil, "Hello, world!") + if btype != bsontype.String { + t.Errorf("Incorrect type returned. got %v; want %v", btype, bsontype.String) + } + if !bytes.Equal(got, want) { + t.Errorf("Bytes do not match. got %v; want %v", got, want) + } + }) + }) + t.Run("AppendValueBytes", func(t *testing.T) { + t.Run("BytesReader", func(t *testing.T) { + var idx int32 + b, err := bsoncore.AppendDocumentEnd( + bsoncore.AppendStringElement( + bsoncore.AppendDocumentStartInline(nil, &idx), + "hello", "world", + ), + idx, + ) + noerr(t, err) + vr := newValueReader(b) + _, err = vr.ReadDocument() + noerr(t, err) + _, _, err = vr.ReadElement() + noerr(t, err) + btype, got, err := Copier{}.AppendValueBytes(nil, vr) + noerr(t, err) + want := bsoncore.AppendString(nil, "world") + if btype != bsontype.String { + t.Errorf("Incorrect type returned. got %v; want %v", btype, bsontype.String) + } + if !bytes.Equal(got, want) { + t.Errorf("Bytes do not match. got %v; want %v", got, want) + } + }) + t.Run("Non BytesReader", func(t *testing.T) { + llvrw := &TestValueReaderWriter{t: t, bsontype: bsontype.String, readval: string("Hello, world!")} + btype, got, err := Copier{}.AppendValueBytes(nil, llvrw) + noerr(t, err) + want := bsoncore.AppendString(nil, "Hello, world!") + if btype != bsontype.String { + t.Errorf("Incorrect type returned. got %v; want %v", btype, bsontype.String) + } + if !bytes.Equal(got, want) { + t.Errorf("Bytes do not match. got %v; want %v", got, want) + } + }) + t.Run("CopyValue error", func(t *testing.T) { + want := errors.New("CopyValue error") + llvrw := &TestValueReaderWriter{t: t, bsontype: bsontype.String, err: want, errAfter: llvrwReadString} + _, _, got := Copier{}.AppendValueBytes(make([]byte, 0), llvrw) + if !compareErrors(got, want) { + t.Errorf("Errors do not match. got %v; want %v", got, want) + } + }) + }) +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/doc.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/doc.go new file mode 100644 index 0000000..750b0d2 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/doc.go @@ -0,0 +1,9 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +// Package bsonrw contains abstractions for reading and writing +// BSON and BSON like types from sources. +package bsonrw // import "go.mongodb.org/mongo-driver/bson/bsonrw" diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go new file mode 100644 index 0000000..9e223ed --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go @@ -0,0 +1,731 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsonrw + +import ( + "errors" + "fmt" + "io" + + "go.mongodb.org/mongo-driver/bson/bsontype" +) + +const maxNestingDepth = 200 + +// ErrInvalidJSON indicates the JSON input is invalid +var ErrInvalidJSON = errors.New("invalid JSON input") + +type jsonParseState byte + +const ( + jpsStartState jsonParseState = iota + jpsSawBeginObject + jpsSawEndObject + jpsSawBeginArray + jpsSawEndArray + jpsSawColon + jpsSawComma + jpsSawKey + jpsSawValue + jpsDoneState + jpsInvalidState +) + +type jsonParseMode byte + +const ( + jpmInvalidMode jsonParseMode = iota + jpmObjectMode + jpmArrayMode +) + +type extJSONValue struct { + t bsontype.Type + v interface{} +} + +type extJSONObject struct { + keys []string + values []*extJSONValue +} + +type extJSONParser struct { + js *jsonScanner + s jsonParseState + m []jsonParseMode + k string + v *extJSONValue + + err error + canonical bool + depth int + maxDepth int + + emptyObject bool +} + +// newExtJSONParser returns a new extended JSON parser, ready to to begin +// parsing from the first character of the argued json input. It will not +// perform any read-ahead and will therefore not report any errors about +// malformed JSON at this point. +func newExtJSONParser(r io.Reader, canonical bool) *extJSONParser { + return &extJSONParser{ + js: &jsonScanner{r: r}, + s: jpsStartState, + m: []jsonParseMode{}, + canonical: canonical, + maxDepth: maxNestingDepth, + } +} + +// peekType examines the next value and returns its BSON Type +func (ejp *extJSONParser) peekType() (bsontype.Type, error) { + var t bsontype.Type + var err error + + ejp.advanceState() + switch ejp.s { + case jpsSawValue: + t = ejp.v.t + case jpsSawBeginArray: + t = bsontype.Array + case jpsInvalidState: + err = ejp.err + case jpsSawComma: + // in array mode, seeing a comma means we need to progress again to actually observe a type + if ejp.peekMode() == jpmArrayMode { + return ejp.peekType() + } + case jpsSawEndArray: + // this would only be a valid state if we were in array mode, so return end-of-array error + err = ErrEOA + case jpsSawBeginObject: + // peek key to determine type + ejp.advanceState() + switch ejp.s { + case jpsSawEndObject: // empty embedded document + t = bsontype.EmbeddedDocument + ejp.emptyObject = true + case jpsInvalidState: + err = ejp.err + case jpsSawKey: + t = wrapperKeyBSONType(ejp.k) + + if t == bsontype.JavaScript { + // just saw $code, need to check for $scope at same level + _, err := ejp.readValue(bsontype.JavaScript) + + if err != nil { + break + } + + switch ejp.s { + case jpsSawEndObject: // type is TypeJavaScript + case jpsSawComma: + ejp.advanceState() + if ejp.s == jpsSawKey && ejp.k == "$scope" { + t = bsontype.CodeWithScope + } else { + err = fmt.Errorf("invalid extended JSON: unexpected key %s in CodeWithScope object", ejp.k) + } + case jpsInvalidState: + err = ejp.err + default: + err = ErrInvalidJSON + } + } + } + } + + return t, err +} + +// readKey parses the next key and its type and returns them +func (ejp *extJSONParser) readKey() (string, bsontype.Type, error) { + if ejp.emptyObject { + ejp.emptyObject = false + return "", 0, ErrEOD + } + + // advance to key (or return with error) + switch ejp.s { + case jpsStartState: + ejp.advanceState() + if ejp.s == jpsSawBeginObject { + ejp.advanceState() + } + case jpsSawBeginObject: + ejp.advanceState() + case jpsSawValue, jpsSawEndObject, jpsSawEndArray: + ejp.advanceState() + switch ejp.s { + case jpsSawBeginObject, jpsSawComma: + ejp.advanceState() + case jpsSawEndObject: + return "", 0, ErrEOD + case jpsDoneState: + return "", 0, io.EOF + case jpsInvalidState: + return "", 0, ejp.err + default: + return "", 0, ErrInvalidJSON + } + case jpsSawKey: // do nothing (key was peeked before) + default: + return "", 0, invalidRequestError("key") + } + + // read key + var key string + + switch ejp.s { + case jpsSawKey: + key = ejp.k + case jpsSawEndObject: + return "", 0, ErrEOD + case jpsInvalidState: + return "", 0, ejp.err + default: + return "", 0, invalidRequestError("key") + } + + // check for colon + ejp.advanceState() + if err := ensureColon(ejp.s, key); err != nil { + return "", 0, err + } + + // peek at the value to determine type + t, err := ejp.peekType() + if err != nil { + return "", 0, err + } + + return key, t, nil +} + +// readValue returns the value corresponding to the Type returned by peekType +func (ejp *extJSONParser) readValue(t bsontype.Type) (*extJSONValue, error) { + if ejp.s == jpsInvalidState { + return nil, ejp.err + } + + var v *extJSONValue + + switch t { + case bsontype.Null, bsontype.Boolean, bsontype.String: + if ejp.s != jpsSawValue { + return nil, invalidRequestError(t.String()) + } + v = ejp.v + case bsontype.Int32, bsontype.Int64, bsontype.Double: + // relaxed version allows these to be literal number values + if ejp.s == jpsSawValue { + v = ejp.v + break + } + fallthrough + case bsontype.Decimal128, bsontype.Symbol, bsontype.ObjectID, bsontype.MinKey, bsontype.MaxKey, bsontype.Undefined: + switch ejp.s { + case jpsSawKey: + // read colon + ejp.advanceState() + if err := ensureColon(ejp.s, ejp.k); err != nil { + return nil, err + } + + // read value + ejp.advanceState() + if ejp.s != jpsSawValue || !ejp.ensureExtValueType(t) { + return nil, invalidJSONErrorForType("value", t) + } + + v = ejp.v + + // read end object + ejp.advanceState() + if ejp.s != jpsSawEndObject { + return nil, invalidJSONErrorForType("} after value", t) + } + default: + return nil, invalidRequestError(t.String()) + } + case bsontype.Binary, bsontype.Regex, bsontype.Timestamp, bsontype.DBPointer: + if ejp.s != jpsSawKey { + return nil, invalidRequestError(t.String()) + } + // read colon + ejp.advanceState() + if err := ensureColon(ejp.s, ejp.k); err != nil { + return nil, err + } + + ejp.advanceState() + if t == bsontype.Binary && ejp.s == jpsSawValue { + // convert legacy $binary format + base64 := ejp.v + + ejp.advanceState() + if ejp.s != jpsSawComma { + return nil, invalidJSONErrorForType(",", bsontype.Binary) + } + + ejp.advanceState() + key, t, err := ejp.readKey() + if err != nil { + return nil, err + } + if key != "$type" { + return nil, invalidJSONErrorForType("$type", bsontype.Binary) + } + + subType, err := ejp.readValue(t) + if err != nil { + return nil, err + } + + ejp.advanceState() + if ejp.s != jpsSawEndObject { + return nil, invalidJSONErrorForType("2 key-value pairs and then }", bsontype.Binary) + } + + v = &extJSONValue{ + t: bsontype.EmbeddedDocument, + v: &extJSONObject{ + keys: []string{"base64", "subType"}, + values: []*extJSONValue{base64, subType}, + }, + } + break + } + + // read KV pairs + if ejp.s != jpsSawBeginObject { + return nil, invalidJSONErrorForType("{", t) + } + + keys, vals, err := ejp.readObject(2, true) + if err != nil { + return nil, err + } + + ejp.advanceState() + if ejp.s != jpsSawEndObject { + return nil, invalidJSONErrorForType("2 key-value pairs and then }", t) + } + + v = &extJSONValue{t: bsontype.EmbeddedDocument, v: &extJSONObject{keys: keys, values: vals}} + + case bsontype.DateTime: + switch ejp.s { + case jpsSawValue: + v = ejp.v + case jpsSawKey: + // read colon + ejp.advanceState() + if err := ensureColon(ejp.s, ejp.k); err != nil { + return nil, err + } + + ejp.advanceState() + switch ejp.s { + case jpsSawBeginObject: + keys, vals, err := ejp.readObject(1, true) + if err != nil { + return nil, err + } + v = &extJSONValue{t: bsontype.EmbeddedDocument, v: &extJSONObject{keys: keys, values: vals}} + case jpsSawValue: + if ejp.canonical { + return nil, invalidJSONError("{") + } + v = ejp.v + default: + if ejp.canonical { + return nil, invalidJSONErrorForType("object", t) + } + return nil, invalidJSONErrorForType("ISO-8601 Internet Date/Time Format as decribed in RFC-3339", t) + } + + ejp.advanceState() + if ejp.s != jpsSawEndObject { + return nil, invalidJSONErrorForType("value and then }", t) + } + default: + return nil, invalidRequestError(t.String()) + } + case bsontype.JavaScript: + switch ejp.s { + case jpsSawKey: + // read colon + ejp.advanceState() + if err := ensureColon(ejp.s, ejp.k); err != nil { + return nil, err + } + + // read value + ejp.advanceState() + if ejp.s != jpsSawValue { + return nil, invalidJSONErrorForType("value", t) + } + v = ejp.v + + // read end object or comma and just return + ejp.advanceState() + case jpsSawEndObject: + v = ejp.v + default: + return nil, invalidRequestError(t.String()) + } + case bsontype.CodeWithScope: + if ejp.s == jpsSawKey && ejp.k == "$scope" { + v = ejp.v // this is the $code string from earlier + + // read colon + ejp.advanceState() + if err := ensureColon(ejp.s, ejp.k); err != nil { + return nil, err + } + + // read { + ejp.advanceState() + if ejp.s != jpsSawBeginObject { + return nil, invalidJSONError("$scope to be embedded document") + } + } else { + return nil, invalidRequestError(t.String()) + } + case bsontype.EmbeddedDocument, bsontype.Array: + return nil, invalidRequestError(t.String()) + } + + return v, nil +} + +// readObject is a utility method for reading full objects of known (or expected) size +// it is useful for extended JSON types such as binary, datetime, regex, and timestamp +func (ejp *extJSONParser) readObject(numKeys int, started bool) ([]string, []*extJSONValue, error) { + keys := make([]string, numKeys) + vals := make([]*extJSONValue, numKeys) + + if !started { + ejp.advanceState() + if ejp.s != jpsSawBeginObject { + return nil, nil, invalidJSONError("{") + } + } + + for i := 0; i < numKeys; i++ { + key, t, err := ejp.readKey() + if err != nil { + return nil, nil, err + } + + switch ejp.s { + case jpsSawKey: + v, err := ejp.readValue(t) + if err != nil { + return nil, nil, err + } + + keys[i] = key + vals[i] = v + case jpsSawValue: + keys[i] = key + vals[i] = ejp.v + default: + return nil, nil, invalidJSONError("value") + } + } + + ejp.advanceState() + if ejp.s != jpsSawEndObject { + return nil, nil, invalidJSONError("}") + } + + return keys, vals, nil +} + +// advanceState reads the next JSON token from the scanner and transitions +// from the current state based on that token's type +func (ejp *extJSONParser) advanceState() { + if ejp.s == jpsDoneState || ejp.s == jpsInvalidState { + return + } + + jt, err := ejp.js.nextToken() + + if err != nil { + ejp.err = err + ejp.s = jpsInvalidState + return + } + + valid := ejp.validateToken(jt.t) + if !valid { + ejp.err = unexpectedTokenError(jt) + ejp.s = jpsInvalidState + return + } + + switch jt.t { + case jttBeginObject: + ejp.s = jpsSawBeginObject + ejp.pushMode(jpmObjectMode) + ejp.depth++ + + if ejp.depth > ejp.maxDepth { + ejp.err = nestingDepthError(jt.p, ejp.depth) + ejp.s = jpsInvalidState + } + case jttEndObject: + ejp.s = jpsSawEndObject + ejp.depth-- + + if ejp.popMode() != jpmObjectMode { + ejp.err = unexpectedTokenError(jt) + ejp.s = jpsInvalidState + } + case jttBeginArray: + ejp.s = jpsSawBeginArray + ejp.pushMode(jpmArrayMode) + case jttEndArray: + ejp.s = jpsSawEndArray + + if ejp.popMode() != jpmArrayMode { + ejp.err = unexpectedTokenError(jt) + ejp.s = jpsInvalidState + } + case jttColon: + ejp.s = jpsSawColon + case jttComma: + ejp.s = jpsSawComma + case jttEOF: + ejp.s = jpsDoneState + if len(ejp.m) != 0 { + ejp.err = unexpectedTokenError(jt) + ejp.s = jpsInvalidState + } + case jttString: + switch ejp.s { + case jpsSawComma: + if ejp.peekMode() == jpmArrayMode { + ejp.s = jpsSawValue + ejp.v = extendJSONToken(jt) + return + } + fallthrough + case jpsSawBeginObject: + ejp.s = jpsSawKey + ejp.k = jt.v.(string) + return + } + fallthrough + default: + ejp.s = jpsSawValue + ejp.v = extendJSONToken(jt) + } +} + +var jpsValidTransitionTokens = map[jsonParseState]map[jsonTokenType]bool{ + jpsStartState: { + jttBeginObject: true, + jttBeginArray: true, + jttInt32: true, + jttInt64: true, + jttDouble: true, + jttString: true, + jttBool: true, + jttNull: true, + jttEOF: true, + }, + jpsSawBeginObject: { + jttEndObject: true, + jttString: true, + }, + jpsSawEndObject: { + jttEndObject: true, + jttEndArray: true, + jttComma: true, + jttEOF: true, + }, + jpsSawBeginArray: { + jttBeginObject: true, + jttBeginArray: true, + jttEndArray: true, + jttInt32: true, + jttInt64: true, + jttDouble: true, + jttString: true, + jttBool: true, + jttNull: true, + }, + jpsSawEndArray: { + jttEndObject: true, + jttEndArray: true, + jttComma: true, + jttEOF: true, + }, + jpsSawColon: { + jttBeginObject: true, + jttBeginArray: true, + jttInt32: true, + jttInt64: true, + jttDouble: true, + jttString: true, + jttBool: true, + jttNull: true, + }, + jpsSawComma: { + jttBeginObject: true, + jttBeginArray: true, + jttInt32: true, + jttInt64: true, + jttDouble: true, + jttString: true, + jttBool: true, + jttNull: true, + }, + jpsSawKey: { + jttColon: true, + }, + jpsSawValue: { + jttEndObject: true, + jttEndArray: true, + jttComma: true, + jttEOF: true, + }, + jpsDoneState: {}, + jpsInvalidState: {}, +} + +func (ejp *extJSONParser) validateToken(jtt jsonTokenType) bool { + switch ejp.s { + case jpsSawEndObject: + // if we are at depth zero and the next token is a '{', + // we can consider it valid only if we are not in array mode. + if jtt == jttBeginObject && ejp.depth == 0 { + return ejp.peekMode() != jpmArrayMode + } + case jpsSawComma: + switch ejp.peekMode() { + // the only valid next token after a comma inside a document is a string (a key) + case jpmObjectMode: + return jtt == jttString + case jpmInvalidMode: + return false + } + } + + _, ok := jpsValidTransitionTokens[ejp.s][jtt] + return ok +} + +// ensureExtValueType returns true if the current value has the expected +// value type for single-key extended JSON types. For example, +// {"$numberInt": v} v must be TypeString +func (ejp *extJSONParser) ensureExtValueType(t bsontype.Type) bool { + switch t { + case bsontype.MinKey, bsontype.MaxKey: + return ejp.v.t == bsontype.Int32 + case bsontype.Undefined: + return ejp.v.t == bsontype.Boolean + case bsontype.Int32, bsontype.Int64, bsontype.Double, bsontype.Decimal128, bsontype.Symbol, bsontype.ObjectID: + return ejp.v.t == bsontype.String + default: + return false + } +} + +func (ejp *extJSONParser) pushMode(m jsonParseMode) { + ejp.m = append(ejp.m, m) +} + +func (ejp *extJSONParser) popMode() jsonParseMode { + l := len(ejp.m) + if l == 0 { + return jpmInvalidMode + } + + m := ejp.m[l-1] + ejp.m = ejp.m[:l-1] + + return m +} + +func (ejp *extJSONParser) peekMode() jsonParseMode { + l := len(ejp.m) + if l == 0 { + return jpmInvalidMode + } + + return ejp.m[l-1] +} + +func extendJSONToken(jt *jsonToken) *extJSONValue { + var t bsontype.Type + + switch jt.t { + case jttInt32: + t = bsontype.Int32 + case jttInt64: + t = bsontype.Int64 + case jttDouble: + t = bsontype.Double + case jttString: + t = bsontype.String + case jttBool: + t = bsontype.Boolean + case jttNull: + t = bsontype.Null + default: + return nil + } + + return &extJSONValue{t: t, v: jt.v} +} + +func ensureColon(s jsonParseState, key string) error { + if s != jpsSawColon { + return fmt.Errorf("invalid JSON input: missing colon after key \"%s\"", key) + } + + return nil +} + +func invalidRequestError(s string) error { + return fmt.Errorf("invalid request to read %s", s) +} + +func invalidJSONError(expected string) error { + return fmt.Errorf("invalid JSON input; expected %s", expected) +} + +func invalidJSONErrorForType(expected string, t bsontype.Type) error { + return fmt.Errorf("invalid JSON input; expected %s for %s", expected, t) +} + +func unexpectedTokenError(jt *jsonToken) error { + switch jt.t { + case jttInt32, jttInt64, jttDouble: + return fmt.Errorf("invalid JSON input; unexpected number (%v) at position %d", jt.v, jt.p) + case jttString: + return fmt.Errorf("invalid JSON input; unexpected string (\"%v\") at position %d", jt.v, jt.p) + case jttBool: + return fmt.Errorf("invalid JSON input; unexpected boolean literal (%v) at position %d", jt.v, jt.p) + case jttNull: + return fmt.Errorf("invalid JSON input; unexpected null literal at position %d", jt.p) + case jttEOF: + return fmt.Errorf("invalid JSON input; unexpected end of input at position %d", jt.p) + default: + return fmt.Errorf("invalid JSON input; unexpected %c at position %d", jt.v.(byte), jt.p) + } +} + +func nestingDepthError(p, depth int) error { + return fmt.Errorf("invalid JSON input; nesting too deep (%d levels) at position %d", depth, p) +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser_test.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser_test.go new file mode 100644 index 0000000..247fea5 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser_test.go @@ -0,0 +1,736 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsonrw + +import ( + "io" + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + "go.mongodb.org/mongo-driver/bson/bsontype" +) + +var ( + keyDiff = specificDiff("key") + typDiff = specificDiff("type") + valDiff = specificDiff("value") + + expectErrEOF = expectSpecificError(io.EOF) + expectErrEOD = expectSpecificError(ErrEOD) + expectErrEOA = expectSpecificError(ErrEOA) +) + +type expectedErrorFunc func(t *testing.T, err error, desc string) + +type peekTypeTestCase struct { + desc string + input string + typs []bsontype.Type + errFs []expectedErrorFunc +} + +type readKeyValueTestCase struct { + desc string + input string + keys []string + typs []bsontype.Type + vals []*extJSONValue + + keyEFs []expectedErrorFunc + valEFs []expectedErrorFunc +} + +func expectSpecificError(expected error) expectedErrorFunc { + return func(t *testing.T, err error, desc string) { + if err != expected { + t.Helper() + t.Errorf("%s: Expected %v but got: %v", desc, expected, err) + t.FailNow() + } + } +} + +func specificDiff(name string) func(t *testing.T, expected, actual interface{}, desc string) { + return func(t *testing.T, expected, actual interface{}, desc string) { + if diff := cmp.Diff(expected, actual); diff != "" { + t.Helper() + t.Errorf("%s: Incorrect JSON %s (-want, +got): %s\n", desc, name, diff) + t.FailNow() + } + } +} + +func expectErrorNOOP(_ *testing.T, _ error, _ string) { +} + +func readKeyDiff(t *testing.T, eKey, aKey string, eTyp, aTyp bsontype.Type, err error, errF expectedErrorFunc, desc string) { + keyDiff(t, eKey, aKey, desc) + typDiff(t, eTyp, aTyp, desc) + errF(t, err, desc) +} + +func readValueDiff(t *testing.T, eVal, aVal *extJSONValue, err error, errF expectedErrorFunc, desc string) { + if aVal != nil { + typDiff(t, eVal.t, aVal.t, desc) + valDiff(t, eVal.v, aVal.v, desc) + } else { + valDiff(t, eVal, aVal, desc) + } + + errF(t, err, desc) +} + +func TestExtJSONParserPeekType(t *testing.T) { + makeValidPeekTypeTestCase := func(input string, typ bsontype.Type, desc string) peekTypeTestCase { + return peekTypeTestCase{ + desc: desc, input: input, + typs: []bsontype.Type{typ}, + errFs: []expectedErrorFunc{expectNoError}, + } + } + + makeInvalidPeekTypeTestCase := func(desc, input string, lastEF expectedErrorFunc) peekTypeTestCase { + return peekTypeTestCase{ + desc: desc, input: input, + typs: []bsontype.Type{bsontype.Array, bsontype.String, bsontype.Type(0)}, + errFs: []expectedErrorFunc{expectNoError, expectNoError, lastEF}, + } + } + + cases := []peekTypeTestCase{ + makeValidPeekTypeTestCase(`null`, bsontype.Null, "Null"), + makeValidPeekTypeTestCase(`"string"`, bsontype.String, "String"), + makeValidPeekTypeTestCase(`true`, bsontype.Boolean, "Boolean--true"), + makeValidPeekTypeTestCase(`false`, bsontype.Boolean, "Boolean--false"), + makeValidPeekTypeTestCase(`{"$minKey": 1}`, bsontype.MinKey, "MinKey"), + makeValidPeekTypeTestCase(`{"$maxKey": 1}`, bsontype.MaxKey, "MaxKey"), + makeValidPeekTypeTestCase(`{"$numberInt": "42"}`, bsontype.Int32, "Int32"), + makeValidPeekTypeTestCase(`{"$numberLong": "42"}`, bsontype.Int64, "Int64"), + makeValidPeekTypeTestCase(`{"$symbol": "symbol"}`, bsontype.Symbol, "Symbol"), + makeValidPeekTypeTestCase(`{"$numberDouble": "42.42"}`, bsontype.Double, "Double"), + makeValidPeekTypeTestCase(`{"$undefined": true}`, bsontype.Undefined, "Undefined"), + makeValidPeekTypeTestCase(`{"$numberDouble": "NaN"}`, bsontype.Double, "Double--NaN"), + makeValidPeekTypeTestCase(`{"$numberDecimal": "1234"}`, bsontype.Decimal128, "Decimal"), + makeValidPeekTypeTestCase(`{"foo": "bar"}`, bsontype.EmbeddedDocument, "Toplevel document"), + makeValidPeekTypeTestCase(`{"$date": {"$numberLong": "0"}}`, bsontype.DateTime, "Datetime"), + makeValidPeekTypeTestCase(`{"$code": "function() {}"}`, bsontype.JavaScript, "Code no scope"), + makeValidPeekTypeTestCase(`[{"$numberInt": "1"},{"$numberInt": "2"}]`, bsontype.Array, "Array"), + makeValidPeekTypeTestCase(`{"$timestamp": {"t": 42, "i": 1}}`, bsontype.Timestamp, "Timestamp"), + makeValidPeekTypeTestCase(`{"$oid": "57e193d7a9cc81b4027498b5"}`, bsontype.ObjectID, "Object ID"), + makeValidPeekTypeTestCase(`{"$binary": {"base64": "AQIDBAU=", "subType": "80"}}`, bsontype.Binary, "Binary"), + makeValidPeekTypeTestCase(`{"$code": "function() {}", "$scope": {}}`, bsontype.CodeWithScope, "Code With Scope"), + makeValidPeekTypeTestCase(`{"$binary": {"base64": "o0w498Or7cijeBSpkquNtg==", "subType": "03"}}`, bsontype.Binary, "Binary"), + makeValidPeekTypeTestCase(`{"$binary": "o0w498Or7cijeBSpkquNtg==", "$type": "03"}`, bsontype.Binary, "Binary"), + makeValidPeekTypeTestCase(`{"$regularExpression": {"pattern": "foo*", "options": "ix"}}`, bsontype.Regex, "Regular expression"), + makeValidPeekTypeTestCase(`{"$dbPointer": {"$ref": "db.collection", "$id": {"$oid": "57e193d7a9cc81b4027498b1"}}}`, bsontype.DBPointer, "DBPointer"), + makeValidPeekTypeTestCase(`{"$ref": "collection", "$id": {"$oid": "57fd71e96e32ab4225b723fb"}, "$db": "database"}`, bsontype.EmbeddedDocument, "DBRef"), + makeInvalidPeekTypeTestCase("invalid array--missing ]", `["a"`, expectError), + makeInvalidPeekTypeTestCase("invalid array--colon in array", `["a":`, expectError), + makeInvalidPeekTypeTestCase("invalid array--extra comma", `["a",,`, expectError), + makeInvalidPeekTypeTestCase("invalid array--trailing comma", `["a",]`, expectError), + makeInvalidPeekTypeTestCase("peekType after end of array", `["a"]`, expectErrEOA), + { + desc: "invalid array--leading comma", + input: `[,`, + typs: []bsontype.Type{bsontype.Array, bsontype.Type(0)}, + errFs: []expectedErrorFunc{expectNoError, expectError}, + }, + } + + for _, tc := range cases { + ejp := newExtJSONParser(strings.NewReader(tc.input), true) + + for i, eTyp := range tc.typs { + errF := tc.errFs[i] + + typ, err := ejp.peekType() + typDiff(t, eTyp, typ, tc.desc) + errF(t, err, tc.desc) + } + } +} + +func TestExtJSONParserReadKeyReadValue(t *testing.T) { + // several test cases will use the same keys, types, and values, and only differ on input structure + + keys := []string{"_id", "Symbol", "String", "Int32", "Int64", "Int", "MinKey"} + types := []bsontype.Type{bsontype.ObjectID, bsontype.Symbol, bsontype.String, bsontype.Int32, bsontype.Int64, bsontype.Int32, bsontype.MinKey} + values := []*extJSONValue{ + {t: bsontype.String, v: "57e193d7a9cc81b4027498b5"}, + {t: bsontype.String, v: "symbol"}, + {t: bsontype.String, v: "string"}, + {t: bsontype.String, v: "42"}, + {t: bsontype.String, v: "42"}, + {t: bsontype.Int32, v: int32(42)}, + {t: bsontype.Int32, v: int32(1)}, + } + + errFuncs := make([]expectedErrorFunc, 7) + for i := 0; i < 7; i++ { + errFuncs[i] = expectNoError + } + + firstKeyError := func(desc, input string) readKeyValueTestCase { + return readKeyValueTestCase{ + desc: desc, + input: input, + keys: []string{""}, + typs: []bsontype.Type{bsontype.Type(0)}, + vals: []*extJSONValue{nil}, + keyEFs: []expectedErrorFunc{expectError}, + valEFs: []expectedErrorFunc{expectErrorNOOP}, + } + } + + secondKeyError := func(desc, input, firstKey string, firstType bsontype.Type, firstValue *extJSONValue) readKeyValueTestCase { + return readKeyValueTestCase{ + desc: desc, + input: input, + keys: []string{firstKey, ""}, + typs: []bsontype.Type{firstType, bsontype.Type(0)}, + vals: []*extJSONValue{firstValue, nil}, + keyEFs: []expectedErrorFunc{expectNoError, expectError}, + valEFs: []expectedErrorFunc{expectNoError, expectErrorNOOP}, + } + } + + cases := []readKeyValueTestCase{ + { + desc: "normal spacing", + input: `{ + "_id": { "$oid": "57e193d7a9cc81b4027498b5" }, + "Symbol": { "$symbol": "symbol" }, + "String": "string", + "Int32": { "$numberInt": "42" }, + "Int64": { "$numberLong": "42" }, + "Int": 42, + "MinKey": { "$minKey": 1 } + }`, + keys: keys, typs: types, vals: values, + keyEFs: errFuncs, valEFs: errFuncs, + }, + { + desc: "new line before comma", + input: `{ "_id": { "$oid": "57e193d7a9cc81b4027498b5" } + , "Symbol": { "$symbol": "symbol" } + , "String": "string" + , "Int32": { "$numberInt": "42" } + , "Int64": { "$numberLong": "42" } + , "Int": 42 + , "MinKey": { "$minKey": 1 } + }`, + keys: keys, typs: types, vals: values, + keyEFs: errFuncs, valEFs: errFuncs, + }, + { + desc: "tabs around colons", + input: `{ + "_id": { "$oid" : "57e193d7a9cc81b4027498b5" }, + "Symbol": { "$symbol" : "symbol" }, + "String": "string", + "Int32": { "$numberInt" : "42" }, + "Int64": { "$numberLong": "42" }, + "Int": 42, + "MinKey": { "$minKey": 1 } + }`, + keys: keys, typs: types, vals: values, + keyEFs: errFuncs, valEFs: errFuncs, + }, + { + desc: "no whitespace", + input: `{"_id":{"$oid":"57e193d7a9cc81b4027498b5"},"Symbol":{"$symbol":"symbol"},"String":"string","Int32":{"$numberInt":"42"},"Int64":{"$numberLong":"42"},"Int":42,"MinKey":{"$minKey":1}}`, + keys: keys, typs: types, vals: values, + keyEFs: errFuncs, valEFs: errFuncs, + }, + { + desc: "mixed whitespace", + input: ` { + "_id" : { "$oid": "57e193d7a9cc81b4027498b5" }, + "Symbol" : { "$symbol": "symbol" } , + "String" : "string", + "Int32" : { "$numberInt": "42" } , + "Int64" : {"$numberLong" : "42"}, + "Int" : 42, + "MinKey" : { "$minKey": 1 } } `, + keys: keys, typs: types, vals: values, + keyEFs: errFuncs, valEFs: errFuncs, + }, + { + desc: "nested object", + input: `{"k1": 1, "k2": { "k3": { "k4": 4 } }, "k5": 5}`, + keys: []string{"k1", "k2", "k3", "k4", "", "", "k5", ""}, + typs: []bsontype.Type{bsontype.Int32, bsontype.EmbeddedDocument, bsontype.EmbeddedDocument, bsontype.Int32, bsontype.Type(0), bsontype.Type(0), bsontype.Int32, bsontype.Type(0)}, + vals: []*extJSONValue{ + {t: bsontype.Int32, v: int32(1)}, nil, nil, {t: bsontype.Int32, v: int32(4)}, nil, nil, {t: bsontype.Int32, v: int32(5)}, nil, + }, + keyEFs: []expectedErrorFunc{ + expectNoError, expectNoError, expectNoError, expectNoError, expectErrEOD, + expectErrEOD, expectNoError, expectErrEOD, + }, + valEFs: []expectedErrorFunc{ + expectNoError, expectError, expectError, expectNoError, expectErrorNOOP, + expectErrorNOOP, expectNoError, expectErrorNOOP, + }, + }, + { + desc: "invalid input: invalid values for extended type", + input: `{"a": {"$numberInt": "1", "x"`, + keys: []string{"a"}, + typs: []bsontype.Type{bsontype.Int32}, + vals: []*extJSONValue{nil}, + keyEFs: []expectedErrorFunc{expectNoError}, + valEFs: []expectedErrorFunc{expectError}, + }, + firstKeyError("invalid input: missing key--EOF", "{"), + firstKeyError("invalid input: missing key--colon first", "{:"), + firstKeyError("invalid input: missing value", `{"a":`), + firstKeyError("invalid input: missing colon", `{"a" 1`), + firstKeyError("invalid input: extra colon", `{"a"::`), + secondKeyError("invalid input: missing }", `{"a": 1`, "a", bsontype.Int32, &extJSONValue{t: bsontype.Int32, v: int32(1)}), + secondKeyError("invalid input: missing comma", `{"a": 1 "b"`, "a", bsontype.Int32, &extJSONValue{t: bsontype.Int32, v: int32(1)}), + secondKeyError("invalid input: extra comma", `{"a": 1,, "b"`, "a", bsontype.Int32, &extJSONValue{t: bsontype.Int32, v: int32(1)}), + secondKeyError("invalid input: trailing comma in object", `{"a": 1,}`, "a", bsontype.Int32, &extJSONValue{t: bsontype.Int32, v: int32(1)}), + } + + for _, tc := range cases { + ejp := newExtJSONParser(strings.NewReader(tc.input), true) + + for i, eKey := range tc.keys { + eTyp := tc.typs[i] + eVal := tc.vals[i] + + keyErrF := tc.keyEFs[i] + valErrF := tc.valEFs[i] + + k, typ, err := ejp.readKey() + readKeyDiff(t, eKey, k, eTyp, typ, err, keyErrF, tc.desc) + + v, err := ejp.readValue(typ) + readValueDiff(t, eVal, v, err, valErrF, tc.desc) + } + } +} + +type ejpExpectationTest func(t *testing.T, p *extJSONParser, expectedKey string, expectedType bsontype.Type, expectedValue interface{}) + +type ejpTestCase struct { + f ejpExpectationTest + p *extJSONParser + k string + t bsontype.Type + v interface{} +} + +// expectSingleValue is used for simple JSON types (strings, numbers, literals) and for extended JSON types that +// have single key-value pairs (i.e. { "$minKey": 1 }, { "$numberLong": "42.42" }) +func expectSingleValue(t *testing.T, p *extJSONParser, expectedKey string, expectedType bsontype.Type, expectedValue interface{}) { + eVal := expectedValue.(*extJSONValue) + + k, typ, err := p.readKey() + readKeyDiff(t, expectedKey, k, expectedType, typ, err, expectNoError, expectedKey) + + v, err := p.readValue(typ) + readValueDiff(t, eVal, v, err, expectNoError, expectedKey) +} + +// expectMultipleValues is used for values that are subdocuments of known size and with known keys (such as extended +// JSON types { "$timestamp": {"t": 1, "i": 1} } and { "$regularExpression": {"pattern": "", options: ""} }) +func expectMultipleValues(t *testing.T, p *extJSONParser, expectedKey string, expectedType bsontype.Type, expectedValue interface{}) { + k, typ, err := p.readKey() + readKeyDiff(t, expectedKey, k, expectedType, typ, err, expectNoError, expectedKey) + + v, err := p.readValue(typ) + expectNoError(t, err, "") + typDiff(t, bsontype.EmbeddedDocument, v.t, expectedKey) + + actObj := v.v.(*extJSONObject) + expObj := expectedValue.(*extJSONObject) + + for i, actKey := range actObj.keys { + expKey := expObj.keys[i] + actVal := actObj.values[i] + expVal := expObj.values[i] + + keyDiff(t, expKey, actKey, expectedKey) + typDiff(t, expVal.t, actVal.t, expectedKey) + valDiff(t, expVal.v, actVal.v, expectedKey) + } +} + +type ejpKeyTypValTriple struct { + key string + typ bsontype.Type + val *extJSONValue +} + +type ejpSubDocumentTestValue struct { + code string // code is only used for TypeCodeWithScope (and is ignored for TypeEmbeddedDocument + ktvs []ejpKeyTypValTriple // list of (key, type, value) triples; this is "scope" for TypeCodeWithScope +} + +// expectSubDocument is used for embedded documents and code with scope types; it reads all the keys and values +// in the embedded document (or scope for codeWithScope) and compares them to the expectedValue's list of (key, type, +// value) triples +func expectSubDocument(t *testing.T, p *extJSONParser, expectedKey string, expectedType bsontype.Type, expectedValue interface{}) { + subdoc := expectedValue.(ejpSubDocumentTestValue) + + k, typ, err := p.readKey() + readKeyDiff(t, expectedKey, k, expectedType, typ, err, expectNoError, expectedKey) + + if expectedType == bsontype.CodeWithScope { + v, err := p.readValue(typ) + readValueDiff(t, &extJSONValue{t: bsontype.String, v: subdoc.code}, v, err, expectNoError, expectedKey) + } + + for _, ktv := range subdoc.ktvs { + eKey := ktv.key + eTyp := ktv.typ + eVal := ktv.val + + k, typ, err = p.readKey() + readKeyDiff(t, eKey, k, eTyp, typ, err, expectNoError, expectedKey) + + v, err := p.readValue(typ) + readValueDiff(t, eVal, v, err, expectNoError, expectedKey) + } + + if expectedType == bsontype.CodeWithScope { + // expect scope doc to close + k, typ, err = p.readKey() + readKeyDiff(t, "", k, bsontype.Type(0), typ, err, expectErrEOD, expectedKey) + } + + // expect subdoc to close + k, typ, err = p.readKey() + readKeyDiff(t, "", k, bsontype.Type(0), typ, err, expectErrEOD, expectedKey) +} + +// expectArray takes the expectedKey, ignores the expectedType, and uses the expectedValue +// as a slice of (type Type, value *extJSONValue) pairs +func expectArray(t *testing.T, p *extJSONParser, expectedKey string, _ bsontype.Type, expectedValue interface{}) { + ktvs := expectedValue.([]ejpKeyTypValTriple) + + k, typ, err := p.readKey() + readKeyDiff(t, expectedKey, k, bsontype.Array, typ, err, expectNoError, expectedKey) + + for _, ktv := range ktvs { + eTyp := ktv.typ + eVal := ktv.val + + typ, err = p.peekType() + typDiff(t, eTyp, typ, expectedKey) + expectNoError(t, err, expectedKey) + + v, err := p.readValue(typ) + readValueDiff(t, eVal, v, err, expectNoError, expectedKey) + } + + // expect array to end + typ, err = p.peekType() + typDiff(t, bsontype.Type(0), typ, expectedKey) + expectErrEOA(t, err, expectedKey) +} + +func TestExtJSONParserAllTypes(t *testing.T) { + in := ` { "_id" : { "$oid": "57e193d7a9cc81b4027498b5"} + , "Symbol" : { "$symbol": "symbol"} + , "String" : "string" + , "Int32" : { "$numberInt": "42"} + , "Int64" : { "$numberLong": "42"} + , "Double" : { "$numberDouble": "42.42"} + , "SpecialFloat" : { "$numberDouble": "NaN" } + , "Decimal" : { "$numberDecimal": "1234" } + , "Binary" : { "$binary": { "base64": "o0w498Or7cijeBSpkquNtg==", "subType": "03" } } + , "BinaryLegacy" : { "$binary": "o0w498Or7cijeBSpkquNtg==", "$type": "03" } + , "BinaryUserDefined" : { "$binary": { "base64": "AQIDBAU=", "subType": "80" } } + , "Code" : { "$code": "function() {}" } + , "CodeWithEmptyScope" : { "$code": "function() {}", "$scope": {} } + , "CodeWithScope" : { "$code": "function() {}", "$scope": { "x": 1 } } + , "EmptySubdocument" : {} + , "Subdocument" : { "foo": "bar", "baz": { "$numberInt": "42" } } + , "Array" : [{"$numberInt": "1"}, {"$numberLong": "2"}, {"$numberDouble": "3"}, 4, "string", 5.0] + , "Timestamp" : { "$timestamp": { "t": 42, "i": 1 } } + , "RegularExpression" : { "$regularExpression": { "pattern": "foo*", "options": "ix" } } + , "DatetimeEpoch" : { "$date": { "$numberLong": "0" } } + , "DatetimePositive" : { "$date": { "$numberLong": "9223372036854775807" } } + , "DatetimeNegative" : { "$date": { "$numberLong": "-9223372036854775808" } } + , "True" : true + , "False" : false + , "DBPointer" : { "$dbPointer": { "$ref": "db.collection", "$id": { "$oid": "57e193d7a9cc81b4027498b1" } } } + , "DBRef" : { "$ref": "collection", "$id": { "$oid": "57fd71e96e32ab4225b723fb" }, "$db": "database" } + , "DBRefNoDB" : { "$ref": "collection", "$id": { "$oid": "57fd71e96e32ab4225b723fb" } } + , "MinKey" : { "$minKey": 1 } + , "MaxKey" : { "$maxKey": 1 } + , "Null" : null + , "Undefined" : { "$undefined": true } + }` + + ejp := newExtJSONParser(strings.NewReader(in), true) + + cases := []ejpTestCase{ + { + f: expectSingleValue, p: ejp, + k: "_id", t: bsontype.ObjectID, v: &extJSONValue{t: bsontype.String, v: "57e193d7a9cc81b4027498b5"}, + }, + { + f: expectSingleValue, p: ejp, + k: "Symbol", t: bsontype.Symbol, v: &extJSONValue{t: bsontype.String, v: "symbol"}, + }, + { + f: expectSingleValue, p: ejp, + k: "String", t: bsontype.String, v: &extJSONValue{t: bsontype.String, v: "string"}, + }, + { + f: expectSingleValue, p: ejp, + k: "Int32", t: bsontype.Int32, v: &extJSONValue{t: bsontype.String, v: "42"}, + }, + { + f: expectSingleValue, p: ejp, + k: "Int64", t: bsontype.Int64, v: &extJSONValue{t: bsontype.String, v: "42"}, + }, + { + f: expectSingleValue, p: ejp, + k: "Double", t: bsontype.Double, v: &extJSONValue{t: bsontype.String, v: "42.42"}, + }, + { + f: expectSingleValue, p: ejp, + k: "SpecialFloat", t: bsontype.Double, v: &extJSONValue{t: bsontype.String, v: "NaN"}, + }, + { + f: expectSingleValue, p: ejp, + k: "Decimal", t: bsontype.Decimal128, v: &extJSONValue{t: bsontype.String, v: "1234"}, + }, + { + f: expectMultipleValues, p: ejp, + k: "Binary", t: bsontype.Binary, + v: &extJSONObject{ + keys: []string{"base64", "subType"}, + values: []*extJSONValue{ + {t: bsontype.String, v: "o0w498Or7cijeBSpkquNtg=="}, + {t: bsontype.String, v: "03"}, + }, + }, + }, + { + f: expectMultipleValues, p: ejp, + k: "BinaryLegacy", t: bsontype.Binary, + v: &extJSONObject{ + keys: []string{"base64", "subType"}, + values: []*extJSONValue{ + {t: bsontype.String, v: "o0w498Or7cijeBSpkquNtg=="}, + {t: bsontype.String, v: "03"}, + }, + }, + }, + { + f: expectMultipleValues, p: ejp, + k: "BinaryUserDefined", t: bsontype.Binary, + v: &extJSONObject{ + keys: []string{"base64", "subType"}, + values: []*extJSONValue{ + {t: bsontype.String, v: "AQIDBAU="}, + {t: bsontype.String, v: "80"}, + }, + }, + }, + { + f: expectSingleValue, p: ejp, + k: "Code", t: bsontype.JavaScript, v: &extJSONValue{t: bsontype.String, v: "function() {}"}, + }, + { + f: expectSubDocument, p: ejp, + k: "CodeWithEmptyScope", t: bsontype.CodeWithScope, + v: ejpSubDocumentTestValue{ + code: "function() {}", + ktvs: []ejpKeyTypValTriple{}, + }, + }, + { + f: expectSubDocument, p: ejp, + k: "CodeWithScope", t: bsontype.CodeWithScope, + v: ejpSubDocumentTestValue{ + code: "function() {}", + ktvs: []ejpKeyTypValTriple{ + {"x", bsontype.Int32, &extJSONValue{t: bsontype.Int32, v: int32(1)}}, + }, + }, + }, + { + f: expectSubDocument, p: ejp, + k: "EmptySubdocument", t: bsontype.EmbeddedDocument, + v: ejpSubDocumentTestValue{ + ktvs: []ejpKeyTypValTriple{}, + }, + }, + { + f: expectSubDocument, p: ejp, + k: "Subdocument", t: bsontype.EmbeddedDocument, + v: ejpSubDocumentTestValue{ + ktvs: []ejpKeyTypValTriple{ + {"foo", bsontype.String, &extJSONValue{t: bsontype.String, v: "bar"}}, + {"baz", bsontype.Int32, &extJSONValue{t: bsontype.String, v: "42"}}, + }, + }, + }, + { + f: expectArray, p: ejp, + k: "Array", t: bsontype.Array, + v: []ejpKeyTypValTriple{ + {typ: bsontype.Int32, val: &extJSONValue{t: bsontype.String, v: "1"}}, + {typ: bsontype.Int64, val: &extJSONValue{t: bsontype.String, v: "2"}}, + {typ: bsontype.Double, val: &extJSONValue{t: bsontype.String, v: "3"}}, + {typ: bsontype.Int32, val: &extJSONValue{t: bsontype.Int32, v: int32(4)}}, + {typ: bsontype.String, val: &extJSONValue{t: bsontype.String, v: "string"}}, + {typ: bsontype.Double, val: &extJSONValue{t: bsontype.Double, v: 5.0}}, + }, + }, + { + f: expectMultipleValues, p: ejp, + k: "Timestamp", t: bsontype.Timestamp, + v: &extJSONObject{ + keys: []string{"t", "i"}, + values: []*extJSONValue{ + {t: bsontype.Int32, v: int32(42)}, + {t: bsontype.Int32, v: int32(1)}, + }, + }, + }, + { + f: expectMultipleValues, p: ejp, + k: "RegularExpression", t: bsontype.Regex, + v: &extJSONObject{ + keys: []string{"pattern", "options"}, + values: []*extJSONValue{ + {t: bsontype.String, v: "foo*"}, + {t: bsontype.String, v: "ix"}, + }, + }, + }, + { + f: expectMultipleValues, p: ejp, + k: "DatetimeEpoch", t: bsontype.DateTime, + v: &extJSONObject{ + keys: []string{"$numberLong"}, + values: []*extJSONValue{ + {t: bsontype.String, v: "0"}, + }, + }, + }, + { + f: expectMultipleValues, p: ejp, + k: "DatetimePositive", t: bsontype.DateTime, + v: &extJSONObject{ + keys: []string{"$numberLong"}, + values: []*extJSONValue{ + {t: bsontype.String, v: "9223372036854775807"}, + }, + }, + }, + { + f: expectMultipleValues, p: ejp, + k: "DatetimeNegative", t: bsontype.DateTime, + v: &extJSONObject{ + keys: []string{"$numberLong"}, + values: []*extJSONValue{ + {t: bsontype.String, v: "-9223372036854775808"}, + }, + }, + }, + { + f: expectSingleValue, p: ejp, + k: "True", t: bsontype.Boolean, v: &extJSONValue{t: bsontype.Boolean, v: true}, + }, + { + f: expectSingleValue, p: ejp, + k: "False", t: bsontype.Boolean, v: &extJSONValue{t: bsontype.Boolean, v: false}, + }, + { + f: expectMultipleValues, p: ejp, + k: "DBPointer", t: bsontype.DBPointer, + v: &extJSONObject{ + keys: []string{"$ref", "$id"}, + values: []*extJSONValue{ + {t: bsontype.String, v: "db.collection"}, + {t: bsontype.String, v: "57e193d7a9cc81b4027498b1"}, + }, + }, + }, + { + f: expectSubDocument, p: ejp, + k: "DBRef", t: bsontype.EmbeddedDocument, + v: ejpSubDocumentTestValue{ + ktvs: []ejpKeyTypValTriple{ + {"$ref", bsontype.String, &extJSONValue{t: bsontype.String, v: "collection"}}, + {"$id", bsontype.ObjectID, &extJSONValue{t: bsontype.String, v: "57fd71e96e32ab4225b723fb"}}, + {"$db", bsontype.String, &extJSONValue{t: bsontype.String, v: "database"}}, + }, + }, + }, + { + f: expectSubDocument, p: ejp, + k: "DBRefNoDB", t: bsontype.EmbeddedDocument, + v: ejpSubDocumentTestValue{ + ktvs: []ejpKeyTypValTriple{ + {"$ref", bsontype.String, &extJSONValue{t: bsontype.String, v: "collection"}}, + {"$id", bsontype.ObjectID, &extJSONValue{t: bsontype.String, v: "57fd71e96e32ab4225b723fb"}}, + }, + }, + }, + { + f: expectSingleValue, p: ejp, + k: "MinKey", t: bsontype.MinKey, v: &extJSONValue{t: bsontype.Int32, v: int32(1)}, + }, + { + f: expectSingleValue, p: ejp, + k: "MaxKey", t: bsontype.MaxKey, v: &extJSONValue{t: bsontype.Int32, v: int32(1)}, + }, + { + f: expectSingleValue, p: ejp, + k: "Null", t: bsontype.Null, v: &extJSONValue{t: bsontype.Null, v: nil}, + }, + { + f: expectSingleValue, p: ejp, + k: "Undefined", t: bsontype.Undefined, v: &extJSONValue{t: bsontype.Boolean, v: true}, + }, + } + + // run the test cases + for _, tc := range cases { + tc.f(t, tc.p, tc.k, tc.t, tc.v) + } + + // expect end of whole document: read final } + k, typ, err := ejp.readKey() + readKeyDiff(t, "", k, bsontype.Type(0), typ, err, expectErrEOD, "") + + // expect end of whole document: read EOF + k, typ, err = ejp.readKey() + readKeyDiff(t, "", k, bsontype.Type(0), typ, err, expectErrEOF, "") + if diff := cmp.Diff(jpsDoneState, ejp.s); diff != "" { + t.Errorf("expected parser to be in done state but instead is in %v\n", ejp.s) + t.FailNow() + } +} + +func TestExtJSONValue(t *testing.T) { + t.Run("Large Date", func(t *testing.T) { + val := &extJSONValue{ + t: bsontype.String, + v: "3001-01-01T00:00:00Z", + } + + intVal, err := val.parseDateTime() + if err != nil { + t.Fatalf("error parsing date time: %v", err) + } + + if intVal <= 0 { + t.Fatalf("expected value above 0, got %v", intVal) + } + }) +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go new file mode 100644 index 0000000..dd560c9 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go @@ -0,0 +1,659 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsonrw + +import ( + "fmt" + "io" + "sync" + + "go.mongodb.org/mongo-driver/bson/bsontype" + "go.mongodb.org/mongo-driver/bson/primitive" +) + +// ExtJSONValueReaderPool is a pool for ValueReaders that read ExtJSON. +type ExtJSONValueReaderPool struct { + pool sync.Pool +} + +// NewExtJSONValueReaderPool instantiates a new ExtJSONValueReaderPool. +func NewExtJSONValueReaderPool() *ExtJSONValueReaderPool { + return &ExtJSONValueReaderPool{ + pool: sync.Pool{ + New: func() interface{} { + return new(extJSONValueReader) + }, + }, + } +} + +// Get retrieves a ValueReader from the pool and uses src as the underlying ExtJSON. +func (bvrp *ExtJSONValueReaderPool) Get(r io.Reader, canonical bool) (ValueReader, error) { + vr := bvrp.pool.Get().(*extJSONValueReader) + return vr.reset(r, canonical) +} + +// Put inserts a ValueReader into the pool. If the ValueReader is not a ExtJSON ValueReader nothing +// is inserted into the pool and ok will be false. +func (bvrp *ExtJSONValueReaderPool) Put(vr ValueReader) (ok bool) { + bvr, ok := vr.(*extJSONValueReader) + if !ok { + return false + } + + bvr, _ = bvr.reset(nil, false) + bvrp.pool.Put(bvr) + return true +} + +type ejvrState struct { + mode mode + vType bsontype.Type + depth int +} + +// extJSONValueReader is for reading extended JSON. +type extJSONValueReader struct { + p *extJSONParser + + stack []ejvrState + frame int +} + +// NewExtJSONValueReader creates a new ValueReader from a given io.Reader +// It will interpret the JSON of r as canonical or relaxed according to the +// given canonical flag +func NewExtJSONValueReader(r io.Reader, canonical bool) (ValueReader, error) { + return newExtJSONValueReader(r, canonical) +} + +func newExtJSONValueReader(r io.Reader, canonical bool) (*extJSONValueReader, error) { + ejvr := new(extJSONValueReader) + return ejvr.reset(r, canonical) +} + +func (ejvr *extJSONValueReader) reset(r io.Reader, canonical bool) (*extJSONValueReader, error) { + p := newExtJSONParser(r, canonical) + typ, err := p.peekType() + + if err != nil { + return nil, ErrInvalidJSON + } + + var m mode + switch typ { + case bsontype.EmbeddedDocument: + m = mTopLevel + case bsontype.Array: + m = mArray + default: + m = mValue + } + + stack := make([]ejvrState, 1, 5) + stack[0] = ejvrState{ + mode: m, + vType: typ, + } + return &extJSONValueReader{ + p: p, + stack: stack, + }, nil +} + +func (ejvr *extJSONValueReader) advanceFrame() { + if ejvr.frame+1 >= len(ejvr.stack) { // We need to grow the stack + length := len(ejvr.stack) + if length+1 >= cap(ejvr.stack) { + // double it + buf := make([]ejvrState, 2*cap(ejvr.stack)+1) + copy(buf, ejvr.stack) + ejvr.stack = buf + } + ejvr.stack = ejvr.stack[:length+1] + } + ejvr.frame++ + + // Clean the stack + ejvr.stack[ejvr.frame].mode = 0 + ejvr.stack[ejvr.frame].vType = 0 + ejvr.stack[ejvr.frame].depth = 0 +} + +func (ejvr *extJSONValueReader) pushDocument() { + ejvr.advanceFrame() + + ejvr.stack[ejvr.frame].mode = mDocument + ejvr.stack[ejvr.frame].depth = ejvr.p.depth +} + +func (ejvr *extJSONValueReader) pushCodeWithScope() { + ejvr.advanceFrame() + + ejvr.stack[ejvr.frame].mode = mCodeWithScope +} + +func (ejvr *extJSONValueReader) pushArray() { + ejvr.advanceFrame() + + ejvr.stack[ejvr.frame].mode = mArray +} + +func (ejvr *extJSONValueReader) push(m mode, t bsontype.Type) { + ejvr.advanceFrame() + + ejvr.stack[ejvr.frame].mode = m + ejvr.stack[ejvr.frame].vType = t +} + +func (ejvr *extJSONValueReader) pop() { + switch ejvr.stack[ejvr.frame].mode { + case mElement, mValue: + ejvr.frame-- + case mDocument, mArray, mCodeWithScope: + ejvr.frame -= 2 // we pop twice to jump over the vrElement: vrDocument -> vrElement -> vrDocument/TopLevel/etc... + } +} + +func (ejvr *extJSONValueReader) skipDocument() error { + // read entire document until ErrEOD (using readKey and readValue) + _, typ, err := ejvr.p.readKey() + for err == nil { + _, err = ejvr.p.readValue(typ) + if err != nil { + break + } + + _, typ, err = ejvr.p.readKey() + } + + return err +} + +func (ejvr *extJSONValueReader) skipArray() error { + // read entire array until ErrEOA (using peekType) + _, err := ejvr.p.peekType() + for err == nil { + _, err = ejvr.p.peekType() + } + + return err +} + +func (ejvr *extJSONValueReader) invalidTransitionErr(destination mode, name string, modes []mode) error { + te := TransitionError{ + name: name, + current: ejvr.stack[ejvr.frame].mode, + destination: destination, + modes: modes, + action: "read", + } + if ejvr.frame != 0 { + te.parent = ejvr.stack[ejvr.frame-1].mode + } + return te +} + +func (ejvr *extJSONValueReader) typeError(t bsontype.Type) error { + return fmt.Errorf("positioned on %s, but attempted to read %s", ejvr.stack[ejvr.frame].vType, t) +} + +func (ejvr *extJSONValueReader) ensureElementValue(t bsontype.Type, destination mode, callerName string, addModes ...mode) error { + switch ejvr.stack[ejvr.frame].mode { + case mElement, mValue: + if ejvr.stack[ejvr.frame].vType != t { + return ejvr.typeError(t) + } + default: + modes := []mode{mElement, mValue} + if addModes != nil { + modes = append(modes, addModes...) + } + return ejvr.invalidTransitionErr(destination, callerName, modes) + } + + return nil +} + +func (ejvr *extJSONValueReader) Type() bsontype.Type { + return ejvr.stack[ejvr.frame].vType +} + +func (ejvr *extJSONValueReader) Skip() error { + switch ejvr.stack[ejvr.frame].mode { + case mElement, mValue: + default: + return ejvr.invalidTransitionErr(0, "Skip", []mode{mElement, mValue}) + } + + defer ejvr.pop() + + t := ejvr.stack[ejvr.frame].vType + switch t { + case bsontype.Array: + // read entire array until ErrEOA + err := ejvr.skipArray() + if err != ErrEOA { + return err + } + case bsontype.EmbeddedDocument: + // read entire doc until ErrEOD + err := ejvr.skipDocument() + if err != ErrEOD { + return err + } + case bsontype.CodeWithScope: + // read the code portion and set up parser in document mode + _, err := ejvr.p.readValue(t) + if err != nil { + return err + } + + // read until ErrEOD + err = ejvr.skipDocument() + if err != ErrEOD { + return err + } + default: + _, err := ejvr.p.readValue(t) + if err != nil { + return err + } + } + + return nil +} + +func (ejvr *extJSONValueReader) ReadArray() (ArrayReader, error) { + switch ejvr.stack[ejvr.frame].mode { + case mTopLevel: // allow reading array from top level + case mArray: + return ejvr, nil + default: + if err := ejvr.ensureElementValue(bsontype.Array, mArray, "ReadArray", mTopLevel, mArray); err != nil { + return nil, err + } + } + + ejvr.pushArray() + + return ejvr, nil +} + +func (ejvr *extJSONValueReader) ReadBinary() (b []byte, btype byte, err error) { + if err := ejvr.ensureElementValue(bsontype.Binary, 0, "ReadBinary"); err != nil { + return nil, 0, err + } + + v, err := ejvr.p.readValue(bsontype.Binary) + if err != nil { + return nil, 0, err + } + + b, btype, err = v.parseBinary() + + ejvr.pop() + return b, btype, err +} + +func (ejvr *extJSONValueReader) ReadBoolean() (bool, error) { + if err := ejvr.ensureElementValue(bsontype.Boolean, 0, "ReadBoolean"); err != nil { + return false, err + } + + v, err := ejvr.p.readValue(bsontype.Boolean) + if err != nil { + return false, err + } + + if v.t != bsontype.Boolean { + return false, fmt.Errorf("expected type bool, but got type %s", v.t) + } + + ejvr.pop() + return v.v.(bool), nil +} + +func (ejvr *extJSONValueReader) ReadDocument() (DocumentReader, error) { + switch ejvr.stack[ejvr.frame].mode { + case mTopLevel: + return ejvr, nil + case mElement, mValue: + if ejvr.stack[ejvr.frame].vType != bsontype.EmbeddedDocument { + return nil, ejvr.typeError(bsontype.EmbeddedDocument) + } + + ejvr.pushDocument() + return ejvr, nil + default: + return nil, ejvr.invalidTransitionErr(mDocument, "ReadDocument", []mode{mTopLevel, mElement, mValue}) + } +} + +func (ejvr *extJSONValueReader) ReadCodeWithScope() (code string, dr DocumentReader, err error) { + if err = ejvr.ensureElementValue(bsontype.CodeWithScope, 0, "ReadCodeWithScope"); err != nil { + return "", nil, err + } + + v, err := ejvr.p.readValue(bsontype.CodeWithScope) + if err != nil { + return "", nil, err + } + + code, err = v.parseJavascript() + + ejvr.pushCodeWithScope() + return code, ejvr, err +} + +func (ejvr *extJSONValueReader) ReadDBPointer() (ns string, oid primitive.ObjectID, err error) { + if err = ejvr.ensureElementValue(bsontype.DBPointer, 0, "ReadDBPointer"); err != nil { + return "", primitive.NilObjectID, err + } + + v, err := ejvr.p.readValue(bsontype.DBPointer) + if err != nil { + return "", primitive.NilObjectID, err + } + + ns, oid, err = v.parseDBPointer() + + ejvr.pop() + return ns, oid, err +} + +func (ejvr *extJSONValueReader) ReadDateTime() (int64, error) { + if err := ejvr.ensureElementValue(bsontype.DateTime, 0, "ReadDateTime"); err != nil { + return 0, err + } + + v, err := ejvr.p.readValue(bsontype.DateTime) + if err != nil { + return 0, err + } + + d, err := v.parseDateTime() + + ejvr.pop() + return d, err +} + +func (ejvr *extJSONValueReader) ReadDecimal128() (primitive.Decimal128, error) { + if err := ejvr.ensureElementValue(bsontype.Decimal128, 0, "ReadDecimal128"); err != nil { + return primitive.Decimal128{}, err + } + + v, err := ejvr.p.readValue(bsontype.Decimal128) + if err != nil { + return primitive.Decimal128{}, err + } + + d, err := v.parseDecimal128() + + ejvr.pop() + return d, err +} + +func (ejvr *extJSONValueReader) ReadDouble() (float64, error) { + if err := ejvr.ensureElementValue(bsontype.Double, 0, "ReadDouble"); err != nil { + return 0, err + } + + v, err := ejvr.p.readValue(bsontype.Double) + if err != nil { + return 0, err + } + + d, err := v.parseDouble() + + ejvr.pop() + return d, err +} + +func (ejvr *extJSONValueReader) ReadInt32() (int32, error) { + if err := ejvr.ensureElementValue(bsontype.Int32, 0, "ReadInt32"); err != nil { + return 0, err + } + + v, err := ejvr.p.readValue(bsontype.Int32) + if err != nil { + return 0, err + } + + i, err := v.parseInt32() + + ejvr.pop() + return i, err +} + +func (ejvr *extJSONValueReader) ReadInt64() (int64, error) { + if err := ejvr.ensureElementValue(bsontype.Int64, 0, "ReadInt64"); err != nil { + return 0, err + } + + v, err := ejvr.p.readValue(bsontype.Int64) + if err != nil { + return 0, err + } + + i, err := v.parseInt64() + + ejvr.pop() + return i, err +} + +func (ejvr *extJSONValueReader) ReadJavascript() (code string, err error) { + if err = ejvr.ensureElementValue(bsontype.JavaScript, 0, "ReadJavascript"); err != nil { + return "", err + } + + v, err := ejvr.p.readValue(bsontype.JavaScript) + if err != nil { + return "", err + } + + code, err = v.parseJavascript() + + ejvr.pop() + return code, err +} + +func (ejvr *extJSONValueReader) ReadMaxKey() error { + if err := ejvr.ensureElementValue(bsontype.MaxKey, 0, "ReadMaxKey"); err != nil { + return err + } + + v, err := ejvr.p.readValue(bsontype.MaxKey) + if err != nil { + return err + } + + err = v.parseMinMaxKey("max") + + ejvr.pop() + return err +} + +func (ejvr *extJSONValueReader) ReadMinKey() error { + if err := ejvr.ensureElementValue(bsontype.MinKey, 0, "ReadMinKey"); err != nil { + return err + } + + v, err := ejvr.p.readValue(bsontype.MinKey) + if err != nil { + return err + } + + err = v.parseMinMaxKey("min") + + ejvr.pop() + return err +} + +func (ejvr *extJSONValueReader) ReadNull() error { + if err := ejvr.ensureElementValue(bsontype.Null, 0, "ReadNull"); err != nil { + return err + } + + v, err := ejvr.p.readValue(bsontype.Null) + if err != nil { + return err + } + + if v.t != bsontype.Null { + return fmt.Errorf("expected type null but got type %s", v.t) + } + + ejvr.pop() + return nil +} + +func (ejvr *extJSONValueReader) ReadObjectID() (primitive.ObjectID, error) { + if err := ejvr.ensureElementValue(bsontype.ObjectID, 0, "ReadObjectID"); err != nil { + return primitive.ObjectID{}, err + } + + v, err := ejvr.p.readValue(bsontype.ObjectID) + if err != nil { + return primitive.ObjectID{}, err + } + + oid, err := v.parseObjectID() + + ejvr.pop() + return oid, err +} + +func (ejvr *extJSONValueReader) ReadRegex() (pattern string, options string, err error) { + if err = ejvr.ensureElementValue(bsontype.Regex, 0, "ReadRegex"); err != nil { + return "", "", err + } + + v, err := ejvr.p.readValue(bsontype.Regex) + if err != nil { + return "", "", err + } + + pattern, options, err = v.parseRegex() + + ejvr.pop() + return pattern, options, err +} + +func (ejvr *extJSONValueReader) ReadString() (string, error) { + if err := ejvr.ensureElementValue(bsontype.String, 0, "ReadString"); err != nil { + return "", err + } + + v, err := ejvr.p.readValue(bsontype.String) + if err != nil { + return "", err + } + + if v.t != bsontype.String { + return "", fmt.Errorf("expected type string but got type %s", v.t) + } + + ejvr.pop() + return v.v.(string), nil +} + +func (ejvr *extJSONValueReader) ReadSymbol() (symbol string, err error) { + if err = ejvr.ensureElementValue(bsontype.Symbol, 0, "ReadSymbol"); err != nil { + return "", err + } + + v, err := ejvr.p.readValue(bsontype.Symbol) + if err != nil { + return "", err + } + + symbol, err = v.parseSymbol() + + ejvr.pop() + return symbol, err +} + +func (ejvr *extJSONValueReader) ReadTimestamp() (t uint32, i uint32, err error) { + if err = ejvr.ensureElementValue(bsontype.Timestamp, 0, "ReadTimestamp"); err != nil { + return 0, 0, err + } + + v, err := ejvr.p.readValue(bsontype.Timestamp) + if err != nil { + return 0, 0, err + } + + t, i, err = v.parseTimestamp() + + ejvr.pop() + return t, i, err +} + +func (ejvr *extJSONValueReader) ReadUndefined() error { + if err := ejvr.ensureElementValue(bsontype.Undefined, 0, "ReadUndefined"); err != nil { + return err + } + + v, err := ejvr.p.readValue(bsontype.Undefined) + if err != nil { + return err + } + + err = v.parseUndefined() + + ejvr.pop() + return err +} + +func (ejvr *extJSONValueReader) ReadElement() (string, ValueReader, error) { + switch ejvr.stack[ejvr.frame].mode { + case mTopLevel, mDocument, mCodeWithScope: + default: + return "", nil, ejvr.invalidTransitionErr(mElement, "ReadElement", []mode{mTopLevel, mDocument, mCodeWithScope}) + } + + name, t, err := ejvr.p.readKey() + + if err != nil { + if err == ErrEOD { + if ejvr.stack[ejvr.frame].mode == mCodeWithScope { + _, err := ejvr.p.peekType() + if err != nil { + return "", nil, err + } + } + + ejvr.pop() + } + + return "", nil, err + } + + ejvr.push(mElement, t) + return name, ejvr, nil +} + +func (ejvr *extJSONValueReader) ReadValue() (ValueReader, error) { + switch ejvr.stack[ejvr.frame].mode { + case mArray: + default: + return nil, ejvr.invalidTransitionErr(mValue, "ReadValue", []mode{mArray}) + } + + t, err := ejvr.p.peekType() + if err != nil { + if err == ErrEOA { + ejvr.pop() + } + + return nil, err + } + + ejvr.push(mValue, t) + return ejvr, nil +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader_test.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader_test.go new file mode 100644 index 0000000..8a9f0cc --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader_test.go @@ -0,0 +1,168 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsonrw + +import ( + "fmt" + "io" + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + "go.mongodb.org/mongo-driver/bson/bsontype" +) + +func TestExtJSONReader(t *testing.T) { + t.Run("ReadDocument", func(t *testing.T) { + t.Run("EmbeddedDocument", func(t *testing.T) { + ejvr := &extJSONValueReader{ + stack: []ejvrState{ + {mode: mTopLevel}, + {mode: mElement, vType: bsontype.Boolean}, + }, + frame: 1, + } + + ejvr.stack[1].mode = mArray + wanterr := ejvr.invalidTransitionErr(mDocument, "ReadDocument", []mode{mTopLevel, mElement, mValue}) + _, err := ejvr.ReadDocument() + if err == nil || err.Error() != wanterr.Error() { + t.Errorf("Incorrect returned error. got %v; want %v", err, wanterr) + } + + }) + }) + + t.Run("invalid transition", func(t *testing.T) { + t.Run("Skip", func(t *testing.T) { + ejvr := &extJSONValueReader{stack: []ejvrState{{mode: mTopLevel}}} + wanterr := (&extJSONValueReader{stack: []ejvrState{{mode: mTopLevel}}}).invalidTransitionErr(0, "Skip", []mode{mElement, mValue}) + goterr := ejvr.Skip() + if !cmp.Equal(goterr, wanterr, cmp.Comparer(compareErrors)) { + t.Errorf("Expected correct invalid transition error. got %v; want %v", goterr, wanterr) + } + }) + }) +} + +func TestReadMultipleTopLevelDocuments(t *testing.T) { + testCases := []struct { + name string + input string + expected [][]byte + }{ + { + "single top-level document", + "{\"foo\":1}", + [][]byte{ + {0x0E, 0x00, 0x00, 0x00, 0x10, 'f', 'o', 'o', 0x00, 0x01, 0x00, 0x00, 0x00, 0x00}, + }, + }, + { + "single top-level document with leading and trailing whitespace", + "\n\n {\"foo\":1} \n", + [][]byte{ + {0x0E, 0x00, 0x00, 0x00, 0x10, 'f', 'o', 'o', 0x00, 0x01, 0x00, 0x00, 0x00, 0x00}, + }, + }, + { + "two top-level documents", + "{\"foo\":1}{\"foo\":2}", + [][]byte{ + {0x0E, 0x00, 0x00, 0x00, 0x10, 'f', 'o', 'o', 0x00, 0x01, 0x00, 0x00, 0x00, 0x00}, + {0x0E, 0x00, 0x00, 0x00, 0x10, 'f', 'o', 'o', 0x00, 0x02, 0x00, 0x00, 0x00, 0x00}, + }, + }, + { + "two top-level documents with leading and trailing whitespace and whitespace separation ", + "\n\n {\"foo\":1}\n{\"foo\":2}\n ", + [][]byte{ + {0x0E, 0x00, 0x00, 0x00, 0x10, 'f', 'o', 'o', 0x00, 0x01, 0x00, 0x00, 0x00, 0x00}, + {0x0E, 0x00, 0x00, 0x00, 0x10, 'f', 'o', 'o', 0x00, 0x02, 0x00, 0x00, 0x00, 0x00}, + }, + }, + { + "top-level array with single document", + "[{\"foo\":1}]", + [][]byte{ + {0x0E, 0x00, 0x00, 0x00, 0x10, 'f', 'o', 'o', 0x00, 0x01, 0x00, 0x00, 0x00, 0x00}, + }, + }, + { + "top-level array with 2 documents", + "[{\"foo\":1},{\"foo\":2}]", + [][]byte{ + {0x0E, 0x00, 0x00, 0x00, 0x10, 'f', 'o', 'o', 0x00, 0x01, 0x00, 0x00, 0x00, 0x00}, + {0x0E, 0x00, 0x00, 0x00, 0x10, 'f', 'o', 'o', 0x00, 0x02, 0x00, 0x00, 0x00, 0x00}, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + r := strings.NewReader(tc.input) + vr, err := NewExtJSONValueReader(r, false) + if err != nil { + t.Fatalf("expected no error, but got %v", err) + } + + actual, err := readAllDocuments(vr) + if err != nil { + t.Fatalf("expected no error, but got %v", err) + } + + if diff := cmp.Diff(tc.expected, actual); diff != "" { + t.Fatalf("expected does not match actual: %v", diff) + } + }) + } +} + +func readAllDocuments(vr ValueReader) ([][]byte, error) { + c := NewCopier() + var actual [][]byte + + switch vr.Type() { + case bsontype.EmbeddedDocument: + for { + result, err := c.CopyDocumentToBytes(vr) + if err != nil { + if err == io.EOF { + break + } + return nil, err + } + + actual = append(actual, result) + } + case bsontype.Array: + ar, err := vr.ReadArray() + if err != nil { + return nil, err + } + for { + evr, err := ar.ReadValue() + if err != nil { + if err == ErrEOA { + break + } + return nil, err + } + + result, err := c.CopyDocumentToBytes(evr) + if err != nil { + return nil, err + } + + actual = append(actual, result) + } + default: + return nil, fmt.Errorf("expected an array or a document, but got %s", vr.Type()) + } + + return actual, nil +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_tables.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_tables.go new file mode 100644 index 0000000..ba39c96 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_tables.go @@ -0,0 +1,223 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 +// +// Based on github.com/golang/go by The Go Authors +// See THIRD-PARTY-NOTICES for original license terms. + +package bsonrw + +import "unicode/utf8" + +// safeSet holds the value true if the ASCII character with the given array +// position can be represented inside a JSON string without any further +// escaping. +// +// All values are true except for the ASCII control characters (0-31), the +// double quote ("), and the backslash character ("\"). +var safeSet = [utf8.RuneSelf]bool{ + ' ': true, + '!': true, + '"': false, + '#': true, + '$': true, + '%': true, + '&': true, + '\'': true, + '(': true, + ')': true, + '*': true, + '+': true, + ',': true, + '-': true, + '.': true, + '/': true, + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + ':': true, + ';': true, + '<': true, + '=': true, + '>': true, + '?': true, + '@': true, + 'A': true, + 'B': true, + 'C': true, + 'D': true, + 'E': true, + 'F': true, + 'G': true, + 'H': true, + 'I': true, + 'J': true, + 'K': true, + 'L': true, + 'M': true, + 'N': true, + 'O': true, + 'P': true, + 'Q': true, + 'R': true, + 'S': true, + 'T': true, + 'U': true, + 'V': true, + 'W': true, + 'X': true, + 'Y': true, + 'Z': true, + '[': true, + '\\': false, + ']': true, + '^': true, + '_': true, + '`': true, + 'a': true, + 'b': true, + 'c': true, + 'd': true, + 'e': true, + 'f': true, + 'g': true, + 'h': true, + 'i': true, + 'j': true, + 'k': true, + 'l': true, + 'm': true, + 'n': true, + 'o': true, + 'p': true, + 'q': true, + 'r': true, + 's': true, + 't': true, + 'u': true, + 'v': true, + 'w': true, + 'x': true, + 'y': true, + 'z': true, + '{': true, + '|': true, + '}': true, + '~': true, + '\u007f': true, +} + +// htmlSafeSet holds the value true if the ASCII character with the given +// array position can be safely represented inside a JSON string, embedded +// inside of HTML