diff --git a/.travis.yml b/.travis.yml index d00aa36..d895cc9 100644 --- a/.travis.yml +++ b/.travis.yml @@ -3,14 +3,10 @@ os: - linux - osx go: - - "go1.8" - - "go1.9" - - "go1.10" + - 1.9.x + - 1.10.x + - 1.11.x go_import_path: github.com/kataras/go-sessions -# we disable test caching via GOCACHE=off -env: - global: - - GOCACHE=off install: - go get -t ./... script: diff --git a/README.md b/README.md index c41d3e9..3de1166 100644 --- a/README.md +++ b/README.md @@ -5,7 +5,7 @@ Build Status License - Releases + Releases Read me docs Build Status Built with GoLang @@ -477,7 +477,7 @@ If you'd like to discuss this package, or ask questions about it, feel free to Versioning ------------ -Current: **v3.0.0** +Current: **v3.1.0** Read more about Semantic Versioning 2.0.0 @@ -506,7 +506,7 @@ License can be found [here](LICENSE). [Travis]: http://travis-ci.org/kataras/go-sessions [License Widget]: https://img.shields.io/badge/license-MIT%20%20License%20-E91E63.svg?style=flat-square [License]: https://github.com/kataras/go-sessions/blob/master/LICENSE -[Release Widget]: https://img.shields.io/badge/release-v3.0.0-blue.svg?style=flat-square +[Release Widget]: https://img.shields.io/badge/release-v3.1.0-blue.svg?style=flat-square [Release]: https://github.com/kataras/go-sessions/releases [Chat Widget]: https://img.shields.io/badge/community-chat-00BCD4.svg?style=flat-square [Chat]: https://kataras.rocket.chat/channel/go-sessions diff --git a/config.go b/config.go index 9d4239d..f4dce55 100644 --- a/config.go +++ b/config.go @@ -3,7 +3,7 @@ package sessions import ( "time" - "github.com/satori/go.uuid" + "github.com/iris-contrib/go.uuid" ) const ( diff --git a/database.go b/database.go index 567a675..171723a 100644 --- a/database.go +++ b/database.go @@ -1,10 +1,15 @@ package sessions import ( + "errors" "sync" "time" ) +// ErrNotImplemented is returned when a particular feature is not yet implemented yet. +// It can be matched directly, i.e: `isNotImplementedError := sessions.ErrNotImplemented.Equal(err)`. +var ErrNotImplemented = errors.New("not implemented yet") + // Database is the interface which all session databases should implement // By design it doesn't support any type of cookie session like other frameworks. // I want to protect you, believe me. @@ -18,6 +23,15 @@ type Database interface { // Acquire receives a session's lifetime from the database, // if the return value is LifeTime{} then the session manager sets the life time based on the expiration duration lives in configuration. Acquire(sid string, expires time.Duration) LifeTime + // OnUpdateExpiration should re-set the expiration (ttl) of the session entry inside the database, + // it is fired on `ShiftExpiration` and `UpdateExpiration`. + // If the database does not support change of ttl then the session entry will be cloned to another one + // and the old one will be removed, it depends on the chosen database storage. + // + // Check of error is required, if error returned then the rest session's keys are not proceed. + // + // If a database does not support this feature then an `ErrNotImplemented` will be returned instead. + OnUpdateExpiration(sid string, newExpires time.Duration) error // Set sets a key value of a specific session. // The "immutable" input argument depends on the store, it may not implement it at all. Set(sid string, lifetime LifeTime, key string, value interface{}, immutable bool) @@ -52,6 +66,9 @@ func (s *mem) Acquire(sid string, expires time.Duration) LifeTime { return LifeTime{} } +// Do nothing, the `LifeTime` of the Session will be managed by the callers automatically on memory-based storage. +func (s *mem) OnUpdateExpiration(string, time.Duration) error { return nil } + // immutable depends on the store, it may not implement it at all. func (s *mem) Set(sid string, lifetime LifeTime, key string, value interface{}, immutable bool) { s.mu.RLock() @@ -60,7 +77,11 @@ func (s *mem) Set(sid string, lifetime LifeTime, key string, value interface{}, } func (s *mem) Get(sid string, key string) interface{} { - return s.values[sid].Get(key) + s.mu.RLock() + v := s.values[sid].Get(key) + s.mu.RUnlock() + + return v } func (s *mem) Visit(sid string, cb func(key string, value interface{})) { @@ -68,7 +89,11 @@ func (s *mem) Visit(sid string, cb func(key string, value interface{})) { } func (s *mem) Len(sid string) int { - return s.values[sid].Len() + s.mu.RLock() + n := s.values[sid].Len() + s.mu.RUnlock() + + return n } func (s *mem) Delete(sid string, key string) (deleted bool) { @@ -79,9 +104,9 @@ func (s *mem) Delete(sid string, key string) (deleted bool) { } func (s *mem) Clear(sid string) { - s.mu.RLock() + s.mu.Lock() s.values[sid].Reset() - s.mu.RUnlock() + s.mu.Unlock() } func (s *mem) Release(sid string) { diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..46adeac --- /dev/null +++ b/go.mod @@ -0,0 +1,31 @@ +module github.com/kataras/go-sessions + +require ( + github.com/AndreasBriese/bbloom v0.0.0-20180913140656-343706a395b7 // indirect + github.com/ajg/form v0.0.0-20160822230020-523a5da1a92f // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/dgraph-io/badger v1.5.4 + github.com/dgryski/go-farm v0.0.0-20180109070241-2de33835d102 // indirect + github.com/etcd-io/bbolt v1.3.0 + github.com/fatih/structs v1.1.0 // indirect + github.com/gavv/monotime v0.0.0-20171021193802-6f8212e8d10d // indirect + github.com/golang/protobuf v1.2.0 // indirect + github.com/gomodule/redigo v2.0.0+incompatible + github.com/google/go-querystring v1.0.0 // indirect + github.com/gorilla/securecookie v1.1.1 + github.com/imkira/go-interpol v1.1.0 // indirect + github.com/iris-contrib/go.uuid v2.0.0+incompatible + github.com/iris-contrib/httpexpect v0.0.0-20180314041918-ebe99fcebbce + github.com/moul/http2curl v0.0.0-20170919181001-9ac6cf4d929b // indirect + github.com/pkg/errors v0.8.0 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/sergi/go-diff v1.0.0 // indirect + github.com/stretchr/testify v1.2.2 // indirect + github.com/valyala/fasthttp v1.0.0 + github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect + github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect + github.com/xeipuuv/gojsonschema v0.0.0-20181016150526-f3a9dae5b194 // indirect + github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0 // indirect + github.com/yudai/gojsondiff v1.0.0 // indirect + github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82 // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..3e7fde8 --- /dev/null +++ b/go.sum @@ -0,0 +1,63 @@ +github.com/AndreasBriese/bbloom v0.0.0-20180913140656-343706a395b7 h1:PqzgE6kAMi81xWQA2QIVxjWkFHptGgC547vchpUbtFo= +github.com/AndreasBriese/bbloom v0.0.0-20180913140656-343706a395b7/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= +github.com/ajg/form v0.0.0-20160822230020-523a5da1a92f h1:zvClvFQwU++UpIUBGC8YmDlfhUrweEy1R1Fj1gu5iIM= +github.com/ajg/form v0.0.0-20160822230020-523a5da1a92f/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgraph-io/badger v1.5.4 h1:gVTrpUTbbr/T24uvoCaqY2KSHfNLVGm0w+hbee2HMeg= +github.com/dgraph-io/badger v1.5.4/go.mod h1:VZxzAIRPHRVNRKRo6AXrX9BJegn6il06VMTZVJYCIjQ= +github.com/dgryski/go-farm v0.0.0-20180109070241-2de33835d102 h1:afESQBXJEnj3fu+34X//E8Wg3nEbMJxJkwSc0tPePK0= +github.com/dgryski/go-farm v0.0.0-20180109070241-2de33835d102/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/etcd-io/bbolt v1.3.0 h1:ec0U3x11Mk69A8YwQyZEhNaUqHkQSv2gDR3Bioz5DfU= +github.com/etcd-io/bbolt v1.3.0/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw= +github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= +github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/gavv/monotime v0.0.0-20171021193802-6f8212e8d10d h1:oYXrtNhqNKL1dVtKdv8XUq5zqdGVFNQ0/4tvccXZOLM= +github.com/gavv/monotime v0.0.0-20171021193802-6f8212e8d10d/go.mod h1:vmp8DIyckQMXOPl0AQVHt+7n5h7Gb7hS6CUydiV8QeA= +github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/gomodule/redigo v2.0.0+incompatible h1:K/R+8tc58AaqLkqG2Ol3Qk+DR/TlNuhuh457pBFPtt0= +github.com/gomodule/redigo v2.0.0+incompatible/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= +github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= +github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/gorilla/securecookie v1.1.1 h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyCS8BvQ= +github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= +github.com/imkira/go-interpol v1.1.0 h1:KIiKr0VSG2CUW1hl1jpiyuzuJeKUUpC8iM1AIE7N1Vk= +github.com/imkira/go-interpol v1.1.0/go.mod h1:z0h2/2T3XF8kyEPpRgJ3kmNv+C43p+I/CoI+jC3w2iA= +github.com/iris-contrib/go.uuid v2.0.0+incompatible h1:XZubAYg61/JwnJNbZilGjf3b3pB80+OQg2qf6c8BfWE= +github.com/iris-contrib/go.uuid v2.0.0+incompatible/go.mod h1:iz2lgM/1UnEf1kP0L/+fafWORmlnuysV2EMP8MW+qe0= +github.com/iris-contrib/httpexpect v0.0.0-20180314041918-ebe99fcebbce h1:q8Ka/exfHNgK7izJE+aUOZd7KZXJ7oQbnJWiZakEiMo= +github.com/iris-contrib/httpexpect v0.0.0-20180314041918-ebe99fcebbce/go.mod h1:VER17o2JZqquOx41avolD/wMGQSFEFBKWmhag9/RQRY= +github.com/klauspost/compress v1.4.0 h1:8nsMz3tWa9SWWPL60G1V6CUsf4lLjWLTNEtibhe8gh8= +github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e h1:+lIPJOWl+jSiJOc70QXJ07+2eg2Jy2EC7Mi11BWujeM= +github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/moul/http2curl v0.0.0-20170919181001-9ac6cf4d929b h1:Pip12xNtMvEFUBF4f8/b5yRXj94LLrNdLWELfOr2KcY= +github.com/moul/http2curl v0.0.0-20170919181001-9ac6cf4d929b/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ= +github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasthttp v1.0.0 h1:BwIoZQbBsTo3v2F5lz5Oy3TlTq4wLKTLV260EVTEWco= +github.com/valyala/fasthttp v1.0.0/go.mod h1:4vX61m6KN+xDduDNwXrhIAVZaZaZiQ1luJk8LWSxF3s= +github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v0.0.0-20181016150526-f3a9dae5b194 h1:va8F6ctiwxAm980W2zwP4vfSFaKeYlqPo24rRvYjmdc= +github.com/xeipuuv/gojsonschema v0.0.0-20181016150526-f3a9dae5b194/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= +github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0 h1:6fRhSjgLCkTD3JnJxvaJ4Sj+TYblw757bqYgZaOq5ZY= +github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0/go.mod h1:/LWChgwKmvncFJFHJ7Gvn9wZArjbV5/FppcK2fKk/tI= +github.com/yudai/gojsondiff v1.0.0 h1:27cbfqXLVEJ1o8I6v3y9lg8Ydm53EKqHXAOMxEGlCOA= +github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg= +github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82 h1:BHyfKlQyqbsFN5p3IfnEUduWvb9is428/nNb5L3U01M= +github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM= +golang.org/x/net v0.0.0-20180911220305-26e67e76b6c3 h1:czFLhve3vsQetD6JOJ8NZZvGQIXlnN3/yXxbT6/awxI= +golang.org/x/net v0.0.0-20180911220305-26e67e76b6c3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= diff --git a/provider.go b/provider.go index 7d503ec..b53b481 100644 --- a/provider.go +++ b/provider.go @@ -1,6 +1,7 @@ package sessions import ( + "errors" "sync" "time" ) @@ -76,23 +77,32 @@ func (p *provider) Init(sid string, expires time.Duration) *Session { return newSession } -// UpdateExpiration update expire date of a session. -// if expires > 0 then it updates the destroy task. -// if expires <=0 then it does nothing, to destroy a session call the `Destroy` func instead. -func (p *provider) UpdateExpiration(sid string, expires time.Duration) bool { +// ErrNotFound can be returned when calling `UpdateExpiration` on a non-existing or invalid session entry. +// It can be matched directly, i.e: `isErrNotFound := sessions.ErrNotFound.Equal(err)`. +var ErrNotFound = errors.New("not found") + +// UpdateExpiration resets the expiration of a session. +// if expires > 0 then it will try to update the expiration and destroy task is delayed. +// if expires <= 0 then it does nothing it returns nil, to destroy a session call the `Destroy` func instead. +// +// If the session is not found, it returns a `NotFound` error, this can only happen when you restart the server and you used the memory-based storage(default), +// because the call of the provider's `UpdateExpiration` is always called when the client has a valid session cookie. +// +// If a backend database is used then it may return an `ErrNotImplemented` error if the underline database does not support this operation. +func (p *provider) UpdateExpiration(sid string, expires time.Duration) error { if expires <= 0 { - return false + return nil } p.mu.Lock() sess, found := p.sessions[sid] p.mu.Unlock() if !found { - return false + return ErrNotFound } sess.Lifetime.Shift(expires) - return true + return p.db.OnUpdateExpiration(sid, expires) } // Read returns the store which sid parameter belongs diff --git a/sessiondb/badger/database.go b/sessiondb/badger/database.go index d1e8461..8f4a332 100644 --- a/sessiondb/badger/database.go +++ b/sessiondb/badger/database.go @@ -1,13 +1,15 @@ package badger import ( - "fmt" + "bytes" + "errors" + "log" "os" "runtime" + "sync/atomic" "time" "github.com/kataras/go-sessions" - "github.com/kataras/golog" "github.com/dgraph-io/badger" ) @@ -24,6 +26,8 @@ type Database struct { // it's initialized at `New` or `NewFromDB`. // Can be used to get stats. Service *badger.DB + + closed uint32 // if 1 is closed. } var _ sessions.Database = (*Database)(nil) @@ -36,7 +40,7 @@ var _ sessions.Database = (*Database)(nil) // It will remove any old session files. func New(directoryPath string) (*Database, error) { if directoryPath == "" { - return nil, fmt.Errorf("directoryPath is missing") + return nil, errors.New("directoryPath is missing") } lindex := directoryPath[len(directoryPath)-1] @@ -55,7 +59,7 @@ func New(directoryPath string) (*Database, error) { service, err := badger.Open(opts) if err != nil { - golog.Errorf("unable to initialize the badger-based session database: %v", err) + log.Printf("unable to initialize the badger-based session database: %v\n", err) return nil, err } @@ -76,7 +80,7 @@ func (db *Database) Acquire(sid string, expires time.Duration) sessions.LifeTime txn := db.Service.NewTransaction(true) defer txn.Commit(nil) - bsid := []byte(sid) + bsid := makePrefix(sid) item, err := txn.Get(bsid) if err == nil { // found, return the expiration. @@ -92,16 +96,26 @@ func (db *Database) Acquire(sid string, expires time.Duration) sessions.LifeTime } if err != nil { - golog.Error(err) + return sessions.LifeTime{Time: sessions.CookieExpireDelete} } return sessions.LifeTime{} // session manager will handle the rest. } -var delim = byte('*') +// OnUpdateExpiration not implemented here, yet. +// Note that this error will not be logged, callers should catch it manually. +func (db *Database) OnUpdateExpiration(sid string, newExpires time.Duration) error { + return sessions.ErrNotImplemented +} + +var delim = byte('_') + +func makePrefix(sid string) []byte { + return append([]byte(sid), delim) +} func makeKey(sid, key string) []byte { - return append([]byte(sid), append([]byte(key), delim)...) + return append(makePrefix(sid), []byte(key)...) } // Set sets a key value of a specific session. @@ -109,17 +123,13 @@ func makeKey(sid, key string) []byte { func (db *Database) Set(sid string, lifetime sessions.LifeTime, key string, value interface{}, immutable bool) { valueBytes, err := sessions.DefaultTranscoder.Marshal(value) if err != nil { - golog.Error(err) return } - err = db.Service.Update(func(txn *badger.Txn) error { - return txn.SetWithTTL(makeKey(sid, key), valueBytes, lifetime.DurationUntilExpiration()) + db.Service.Update(func(txn *badger.Txn) error { + dur := lifetime.DurationUntilExpiration() + return txn.SetWithTTL(makeKey(sid, key), valueBytes, dur) }) - - if err != nil { - golog.Error(err) - } } // Get retrieves a session value based on the key. @@ -129,7 +139,13 @@ func (db *Database) Get(sid string, key string) (value interface{}) { if err != nil { return err } - // item.ValueCopy + + // return item.Value(func(valueBytes []byte) { + // if err := sessions.DefaultTranscoder.Unmarshal(valueBytes, &value); err != nil { + // golog.Error(err) + // } + // }) + valueBytes, err := item.Value() if err != nil { return err @@ -139,7 +155,6 @@ func (db *Database) Get(sid string, key string) (value interface{}) { }) if err != nil && err != badger.ErrKeyNotFound { - golog.Error(err) return nil } @@ -148,7 +163,7 @@ func (db *Database) Get(sid string, key string) (value interface{}) { // Visit loops through all session keys and values. func (db *Database) Visit(sid string, cb func(key string, value interface{})) { - prefix := append([]byte(sid), delim) + prefix := makePrefix(sid) txn := db.Service.NewTransaction(false) defer txn.Discard() @@ -158,19 +173,29 @@ func (db *Database) Visit(sid string, cb func(key string, value interface{})) { for iter.Rewind(); iter.ValidForPrefix(prefix); iter.Next() { item := iter.Item() + var value interface{} + + // err := item.Value(func(valueBytes []byte) { + // if err := sessions.DefaultTranscoder.Unmarshal(valueBytes, &value); err != nil { + // golog.Error(err) + // } + // }) + + // if err != nil { + // golog.Error(err) + // continue + // } + valueBytes, err := item.Value() if err != nil { - golog.Error(err) continue } - var value interface{} if err = sessions.DefaultTranscoder.Unmarshal(valueBytes, &value); err != nil { - golog.Error(err) continue } - cb(string(item.Key()), value) + cb(string(bytes.TrimPrefix(item.Key(), prefix)), value) } } @@ -183,7 +208,7 @@ var iterOptionsNoValues = badger.IteratorOptions{ // Len returns the length of the session's entries (keys). func (db *Database) Len(sid string) (n int) { - prefix := append([]byte(sid), delim) + prefix := makePrefix(sid) txn := db.Service.NewTransaction(false) iter := txn.NewIterator(iterOptionsNoValues) @@ -202,7 +227,7 @@ func (db *Database) Delete(sid string, key string) (deleted bool) { txn := db.Service.NewTransaction(true) err := txn.Delete(makeKey(sid, key)) if err != nil { - golog.Error(err) + return } txn.Commit(nil) return err == nil @@ -210,7 +235,7 @@ func (db *Database) Delete(sid string, key string) (deleted bool) { // Clear removes all session key values but it keeps the session entry. func (db *Database) Clear(sid string) { - prefix := append([]byte(sid), delim) + prefix := makePrefix(sid) txn := db.Service.NewTransaction(true) defer txn.Commit(nil) @@ -240,9 +265,12 @@ func (db *Database) Close() error { } func closeDB(db *Database) error { + if atomic.LoadUint32(&db.closed) > 0 { + return nil + } err := db.Service.Close() - if err != nil { - golog.Warnf("closing the badger connection: %v", err) + if err == nil { + atomic.StoreUint32(&db.closed, 1) } return err } diff --git a/sessiondb/badger/vendor/badger.txt b/sessiondb/badger/vendor/badger.txt new file mode 100644 index 0000000..f890a03 --- /dev/null +++ b/sessiondb/badger/vendor/badger.txt @@ -0,0 +1,5 @@ +02 Oct 2018 +----------- +Keep this vendor because go modules uses the v1.5.4 which is the latest published release(~19 Sep 2018) +while the master repo will be used if host's golang version is lower than 1.11 or inside $GOPATH, which +has a breaking change on the `item.Value` input & output values, see the comments inside this `database.go`. \ No newline at end of file diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger.txt b/sessiondb/badger/vendor/github.com/dgraph-io/badger.txt deleted file mode 100644 index 12081a4..0000000 --- a/sessiondb/badger/vendor/github.com/dgraph-io/badger.txt +++ /dev/null @@ -1,3 +0,0 @@ -27 Dec 2017 - -Commit: 0225b784d8dfc250b3be597fa806f0fd677a6628 \ No newline at end of file diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/README.md b/sessiondb/badger/vendor/github.com/dgraph-io/badger/README.md deleted file mode 100644 index ccb6428..0000000 --- a/sessiondb/badger/vendor/github.com/dgraph-io/badger/README.md +++ /dev/null @@ -1,606 +0,0 @@ -# BadgerDB [![GoDoc](https://godoc.org/github.com/dgraph-io/badger?status.svg)](https://godoc.org/github.com/dgraph-io/badger) [![Go Report Card](https://goreportcard.com/badge/github.com/dgraph-io/badger)](https://goreportcard.com/report/github.com/dgraph-io/badger) [![Build Status](https://travis-ci.org/dgraph-io/badger.svg?branch=master)](https://travis-ci.org/dgraph-io/badger) ![Appveyor](https://ci.appveyor.com/api/projects/status/github/dgraph-io/badger?branch=master&svg=true) [![Coverage Status](https://coveralls.io/repos/github/dgraph-io/badger/badge.svg?branch=master)](https://coveralls.io/github/dgraph-io/badger?branch=master) - -![Badger mascot](images/diggy-shadow.png) - -BadgerDB is an embeddable, persistent, simple and fast key-value (KV) database -written in pure Go. It's meant to be a performant alternative to non-Go-based -key-value stores like [RocksDB](https://github.com/facebook/rocksdb). - -## Project Status -Badger v1.0 was released in Nov 2017. Check the [Changelog] for the full details. - -[Changelog]:https://github.com/dgraph-io/badger/blob/master/CHANGELOG.md - -We introduced transactions in [v0.9.0] which involved a major API change. If you have a Badger -datastore prior to that, please use [v0.8.1], but we strongly urge you to upgrade. Upgrading from -both v0.8 and v0.9 will require you to [take backups](#database-backup) and restore using the new -version. - -[v1.0.1]: //github.com/dgraph-io/badger/tree/v1.0.1 -[v0.8.1]: //github.com/dgraph-io/badger/tree/v0.8.1 -[v0.9.0]: //github.com/dgraph-io/badger/tree/v0.9.0 - -## Table of Contents - * [Getting Started](#getting-started) - + [Installing](#installing) - + [Opening a database](#opening-a-database) - + [Transactions](#transactions) - - [Read-only transactions](#read-only-transactions) - - [Read-write transactions](#read-write-transactions) - - [Managing transactions manually](#managing-transactions-manually) - + [Using key/value pairs](#using-keyvalue-pairs) - + [Monotonically increasing integers](#monotonically-increasing-integers) - * [Merge Operations](#merge-operations) - + [Setting Time To Live(TTL) and User Metadata on Keys](#setting-time-to-livettl-and-user-metadata-on-keys) - + [Iterating over keys](#iterating-over-keys) - - [Prefix scans](#prefix-scans) - - [Key-only iteration](#key-only-iteration) - + [Garbage Collection](#garbage-collection) - + [Database backup](#database-backup) - + [Memory usage](#memory-usage) - + [Statistics](#statistics) - * [Resources](#resources) - + [Blog Posts](#blog-posts) - * [Contact](#contact) - * [Design](#design) - + [Comparisons](#comparisons) - + [Benchmarks](#benchmarks) - * [Other Projects Using Badger](#other-projects-using-badger) - * [Frequently Asked Questions](#frequently-asked-questions) - -## Getting Started - -### Installing -To start using Badger, install Go 1.8 or above and run `go get`: - -```sh -$ go get github.com/dgraph-io/badger/... -``` - -This will retrieve the library and install the `badger_info` command line -utility into your `$GOBIN` path. - - -### Opening a database -The top-level object in Badger is a `DB`. It represents multiple files on disk -in specific directories, which contain the data for a single database. - -To open your database, use the `badger.Open()` function, with the appropriate -options. The `Dir` and `ValueDir` options are mandatory and must be -specified by the client. They can be set to the same value to simplify things. - -```go -package main - -import ( - "log" - - "github.com/dgraph-io/badger" -) - -func main() { - // Open the Badger database located in the /tmp/badger directory. - // It will be created if it doesn't exist. - opts := badger.DefaultOptions - opts.Dir = "/tmp/badger" - opts.ValueDir = "/tmp/badger" - db, err := badger.Open(opts) - if err != nil { - log.Fatal(err) - } - defer db.Close() -  // Your code here… -} -``` - -Please note that Badger obtains a lock on the directories so multiple processes -cannot open the same database at the same time. - -### Transactions - -#### Read-only transactions -To start a read-only transaction, you can use the `DB.View()` method: - -```go -err := db.View(func(tx *badger.Txn) error { -  // Your code here… -  return nil -}) -``` - -You cannot perform any writes or deletes within this transaction. Badger -ensures that you get a consistent view of the database within this closure. Any -writes that happen elsewhere after the transaction has started, will not be -seen by calls made within the closure. - -#### Read-write transactions -To start a read-write transaction, you can use the `DB.Update()` method: - -```go -err := db.Update(func(tx *badger.Txn) error { -  // Your code here… -  return nil -}) -``` - -All database operations are allowed inside a read-write transaction. - -Always check the returned error value. If you return an error -within your closure it will be passed through. - -An `ErrConflict` error will be reported in case of a conflict. Depending on the state -of your application, you have the option to retry the operation if you receive -this error. - -An `ErrTxnTooBig` will be reported in case the number of pending writes/deletes in -the transaction exceed a certain limit. In that case, it is best to commit the -transaction and start a new transaction immediately. Here is an example (we are -not checking for errors in some places for simplicity): - -```go -updates := make(map[string]string) -txn := db.NewTransaction(true) -for k,v := range updates { - if err := txn.Set(byte[](k),byte[](v)); err == ErrTxnTooBig { - _ = txn.Commit() - txn = db.NewTransaction(..) - _ = txn.Set(k,v) - } -} -_ = txn.Commit() -``` - -#### Managing transactions manually -The `DB.View()` and `DB.Update()` methods are wrappers around the -`DB.NewTransaction()` and `Txn.Commit()` methods (or `Txn.Discard()` in case of -read-only transactions). These helper methods will start the transaction, -execute a function, and then safely discard your transaction if an error is -returned. This is the recommended way to use Badger transactions. - -However, sometimes you may want to manually create and commit your -transactions. You can use the `DB.NewTransaction()` function directly, which -takes in a boolean argument to specify whether a read-write transaction is -required. For read-write transactions, it is necessary to call `Txn.Commit()` -to ensure the transaction is committed. For read-only transactions, calling -`Txn.Discard()` is sufficient. `Txn.Commit()` also calls `Txn.Discard()` -internally to cleanup the transaction, so just calling `Txn.Commit()` is -sufficient for read-write transaction. However, if your code doesn’t call -`Txn.Commit()` for some reason (for e.g it returns prematurely with an error), -then please make sure you call `Txn.Discard()` in a `defer` block. Refer to the -code below. - -```go -// Start a writable transaction. -txn, err := db.NewTransaction(true) -if err != nil { - return err -} -defer txn.Discard() - -// Use the transaction... -err := txn.Set([]byte("answer"), []byte("42")) -if err != nil { - return err -} - -// Commit the transaction and check for error. -if err := txn.Commit(nil); err != nil { - return err -} -``` - -The first argument to `DB.NewTransaction()` is a boolean stating if the transaction -should be writable. - -Badger allows an optional callback to the `Txn.Commit()` method. Normally, the -callback can be set to `nil`, and the method will return after all the writes -have succeeded. However, if this callback is provided, the `Txn.Commit()` -method returns as soon as it has checked for any conflicts. The actual writing -to the disk happens asynchronously, and the callback is invoked once the -writing has finished, or an error has occurred. This can improve the throughput -of the application in some cases. But it also means that a transaction is not -durable until the callback has been invoked with a `nil` error value. - -### Using key/value pairs -To save a key/value pair, use the `Txn.Set()` method: - -```go -err := db.Update(func(txn *badger.Txn) error { - err := txn.Set([]byte("answer"), []byte("42")) - return err -}) -``` - -This will set the value of the `"answer"` key to `"42"`. To retrieve this -value, we can use the `Txn.Get()` method: - -```go -err := db.View(func(txn *badger.Txn) error { - item, err := txn.Get([]byte("answer")) - if err != nil { - return err - } - val, err := item.Value() - if err != nil { - return err - } - fmt.Printf("The answer is: %s\n", val) - return nil -}) -``` - -`Txn.Get()` returns `ErrKeyNotFound` if the value is not found. - -Please note that values returned from `Get()` are only valid while the -transaction is open. If you need to use a value outside of the transaction -then you must use `copy()` to copy it to another byte slice. - -Use the `Txn.Delete()` method to delete a key. - -### Monotonically increasing integers - -To get unique monotonically increasing integers with strong durability, you can -use the `DB.GetSequence` method. This method returns a `Sequence` object, which -is thread-safe and can be used concurrently via various goroutines. - -Badger would lease a range of integers to hand out from memory, with the -bandwidth provided to `DB.GetSequence`. The frequency at which disk writes are -done is determined by this lease bandwidth and the frequency of `Next` -invocations. Setting a bandwith too low would do more disk writes, setting it -too high would result in wasted integers if Badger is closed or crashes. -To avoid wasted integers, call `Release` before closing Badger. - -```go -seq, err := db.GetSequence(key, 1000) -defer seq.Release() -for { - num, err := seq.Next() -} -``` - -### Merge Operations -Badger provides support for unordered merge operations. You can define a func -of type `MergeFunc` which takes in an existing value, and a value to be -_merged_ with it. It returns a new value which is the result of the _merge_ -operation. All values are specified in byte arrays. For e.g., here is a merge -function (`add`) which adds a `uint64` value to an existing `uint64` value. - -```Go -uint64ToBytes(i uint64) []byte { - var buf [8]byte - binary.BigEndian.PutUint64(buf[:], i) - return buf[:] -} - -func bytesToUint64(b []byte) uint64 { - return binary.BigEndian.Uint64(b) -} - -// Merge function to add two uint64 numbers -func add(existing, new []byte) []byte { - return uint64ToBytes(bytesToUint64(existing) + bytesToUint64(new)) -} -``` - -This function can then be passed to the `DB.GetMergeOperator()` method, along -with a key, and a duration value. The duration specifies how often the merge -function is run on values that have been added using the `MergeOperator.Add()` -method. - -`MergeOperator.Get()` method can be used to retrieve the cumulative value of the key -associated with the merge operation. - -```Go -key := []byte("merge") -m := db.GetMergeOperator(key, add, 200*time.Millisecond) -defer m.Stop() - -m.Add(uint64ToBytes(1)) -m.Add(uint64ToBytes(2)) -m.Add(uint64ToBytes(3)) - -res, err := m.Get() // res should have value 6 encoded -fmt.Println(bytesToUint64(res)) -``` - -### Setting Time To Live(TTL) and User Metadata on Keys -Badger allows setting an optional Time to Live (TTL) value on keys. Once the TTL has -elapsed, the key will no longer be retrievable and will be eligible for garbage -collection. A TTL can be set as a `time.Duration` value using the `Txn.SetWithTTL()` -API method. - -An optional user metadata value can be set on each key. A user metadata value -is represented by a single byte. It can be used to set certain bits along -with the key to aid in interpreting or decoding the key-value pair. User -metadata can be set using the `Txn.SetWithMeta()` API method. - -`Txn.SetEntry()` can be used to set the key, value, user metatadata and TTL, -all at once. - -### Iterating over keys -To iterate over keys, we can use an `Iterator`, which can be obtained using the -`Txn.NewIterator()` method. Iteration happens in byte-wise lexicographical sorting -order. - - -```go -err := db.View(func(txn *badger.Txn) error { - opts := badger.DefaultIteratorOptions - opts.PrefetchSize = 10 - it := txn.NewIterator(opts) - for it.Rewind(); it.Valid(); it.Next() { - item := it.Item() - k := item.Key() - v, err := item.Value() - if err != nil { - return err - } - fmt.Printf("key=%s, value=%s\n", k, v) - } - return nil -}) -``` - -The iterator allows you to move to a specific point in the list of keys and move -forward or backward through the keys one at a time. - -By default, Badger prefetches the values of the next 100 items. You can adjust -that with the `IteratorOptions.PrefetchSize` field. However, setting it to -a value higher than GOMAXPROCS (which we recommend to be 128 or higher) -shouldn’t give any additional benefits. You can also turn off the fetching of -values altogether. See section below on key-only iteration. - -#### Prefix scans -To iterate over a key prefix, you can combine `Seek()` and `ValidForPrefix()`: - -```go -db.View(func(txn *badger.Txn) error { - it := txn.NewIterator(&DefaultIteratorOptions) - prefix := []byte("1234") - for it.Seek(prefix); it.ValidForPrefix(prefix); it.Next() { - item := it.Item() - k := item.Key() - v, err := item.Value() - if err != nil { - return err - } - fmt.Printf("key=%s, value=%s\n", k, v) - } - return nil -}) -``` - -#### Key-only iteration -Badger supports a unique mode of iteration called _key-only_ iteration. It is -several order of magnitudes faster than regular iteration, because it involves -access to the LSM-tree only, which is usually resident entirely in RAM. To -enable key-only iteration, you need to set the `IteratorOptions.PrefetchValues` -field to `false`. This can also be used to do sparse reads for selected keys -during an iteration, by calling `item.Value()` only when required. - -```go -err := db.View(func(txn *badger.Txn) error { - opts := badger.DefaultIteratorOptions - opts.PrefetchValues = false - it := txn.NewIterator(opts) - for it.Rewind(); it.Valid(); it.Next() { - item := it.Item() - k := item.Key() - fmt.Printf("key=%s\n", k) - } - return nil -}) -``` - -### Garbage Collection -Badger values need to be garbage collected, because of two reasons: - -* Badger keeps values separately from the LSM tree. This means that the compaction operations -that clean up the LSM tree do not touch the values at all. Values need to be cleaned up -separately. - -* Concurrent read/write transactions could leave behind multiple values for a single key, because they -are stored with different versions. These could accumulate, and take up unneeded space beyond the -time these older versions are needed. - -Badger relies on the client to perform garbage collection at a time of their choosing. It provides -the following methods, which can be invoked at an appropriate time: - -* `DB.PurgeOlderVersions()`: This method iterates over the database, and cleans up all but the latest -versions of the key-value pairs. It marks the older versions as deleted, which makes them eligible for -garbage collection. -* `DB.PurgeVersionsBelow(key, ts)`: This method is useful to do a more targeted clean up of older versions -of key-value pairs. You can specify a key, and a timestamp. All versions of the key older than the timestamp -are marked as deleted, making them eligible for garbage collection. -* `DB.RunValueLogGC()`: This method is designed to do garbage collection while - Badger is online. Please ensure that you call the `DB.Purge…()` methods first - before invoking this method. It uses any statistics generated by the - `DB.Purge(…)` methods to pick files that are likely to lead to maximum space - reclamation. It loops until it encounters a file which does not lead to any - garbage collection. - - It could lead to increased I/O if `DB.RunValueLogGC()` hasn’t been called for - a long time, and many deletes have happened in the meanwhile. So it is recommended - that this method be called regularly. - -### Database backup -There are two public API methods `DB.Backup()` and `DB.Load()` which can be -used to do online backups and restores. Badger v0.9 provides a CLI tool -`badger`, which can do offline backup/restore. Make sure you have `$GOPATH/bin` -in your PATH to use this tool. - -The command below will create a version-agnostic backup of the database, to a -file `badger.bak` in the current working directory - -``` -badger backup --dir -``` - -To restore `badger.bak` in the current working directory to a new database: - -``` -badger restore --dir -``` - -See `badger --help` for more details. - -If you have a Badger database that was created using v0.8 (or below), you can -use the `badger_backup` tool provided in v0.8.1, and then restore it using the -command above to upgrade your database to work with the latest version. - -``` -badger_backup --dir --backup-file badger.bak -``` - -### Memory usage -Badger's memory usage can be managed by tweaking several options available in -the `Options` struct that is passed in when opening the database using -`DB.Open`. - -- `Options.ValueLogLoadingMode` can be set to `options.FileIO` (instead of the - default `options.MemoryMap`) to avoid memory-mapping log files. This can be - useful in environments with low RAM. -- Number of memtables (`Options.NumMemtables`) - - If you modify `Options.NumMemtables`, also adjust `Options.NumLevelZeroTables` and - `Options.NumLevelZeroTablesStall` accordingly. -- Number of concurrent compactions (`Options.NumCompactors`) -- Mode in which LSM tree is loaded (`Options.TableLoadingMode`) -- Size of table (`Options.MaxTableSize`) -- Size of value log file (`Options.ValueLogFileSize`) - -If you want to decrease the memory usage of Badger instance, tweak these -options (ideally one at a time) until you achieve the desired -memory usage. - -### Statistics -Badger records metrics using the [expvar] package, which is included in the Go -standard library. All the metrics are documented in [y/metrics.go][metrics] -file. - -`expvar` package adds a handler in to the default HTTP server (which has to be -started explicitly), and serves up the metrics at the `/debug/vars` endpoint. -These metrics can then be collected by a system like [Prometheus], to get -better visibility into what Badger is doing. - -[expvar]: https://golang.org/pkg/expvar/ -[metrics]: https://github.com/dgraph-io/badger/blob/master/y/metrics.go -[Prometheus]: https://prometheus.io/ - -## Resources - -### Blog Posts -1. [Introducing Badger: A fast key-value store written natively in -Go](https://open.dgraph.io/post/badger/) -2. [Make Badger crash resilient with ALICE](https://blog.dgraph.io/post/alice/) -3. [Badger vs LMDB vs BoltDB: Benchmarking key-value databases in Go](https://blog.dgraph.io/post/badger-lmdb-boltdb/) -4. [Concurrent ACID Transactions in Badger](https://blog.dgraph.io/post/badger-txn/) - -## Design -Badger was written with these design goals in mind: - -- Write a key-value database in pure Go. -- Use latest research to build the fastest KV database for data sets spanning terabytes. -- Optimize for SSDs. - -Badger’s design is based on a paper titled _[WiscKey: Separating Keys from -Values in SSD-conscious Storage][wisckey]_. - -[wisckey]: https://www.usenix.org/system/files/conference/fast16/fast16-papers-lu.pdf - -### Comparisons -| Feature | Badger | RocksDB | BoltDB | -| ------- | ------ | ------- | ------ | -| Design | LSM tree with value log | LSM tree only | B+ tree | -| High Read throughput | Yes | No | Yes | -| High Write throughput | Yes | Yes | No | -| Designed for SSDs | Yes (with latest research 1) | Not specifically 2 | No | -| Embeddable | Yes | Yes | Yes | -| Sorted KV access | Yes | Yes | Yes | -| Pure Go (no Cgo) | Yes | No | Yes | -| Transactions | Yes, ACID, concurrent with SSI3 | Yes (but non-ACID) | Yes, ACID | -| Snapshots | Yes | Yes | Yes | -| TTL support | Yes | Yes | No | - -1 The [WISCKEY paper][wisckey] (on which Badger is based) saw big -wins with separating values from keys, significantly reducing the write -amplification compared to a typical LSM tree. - -2 RocksDB is an SSD optimized version of LevelDB, which was designed specifically for rotating disks. -As such RocksDB's design isn't aimed at SSDs. - -3 SSI: Serializable Snapshot Isolation. For more details, see the blog post [Concurrent ACID Transactions in Badger](https://blog.dgraph.io/post/badger-txn/) - -### Benchmarks -We have run comprehensive benchmarks against RocksDB, Bolt and LMDB. The -benchmarking code, and the detailed logs for the benchmarks can be found in the -[badger-bench] repo. More explanation, including graphs can be found the blog posts (linked -above). - -[badger-bench]: https://github.com/dgraph-io/badger-bench - -## Other Projects Using Badger -Below is a list of public, open source projects that use Badger: - -* [Dgraph](https://github.com/dgraph-io/dgraph) - Distributed graph database. -* [go-ipfs](https://github.com/ipfs/go-ipfs) - Go client for the InterPlanetary File System (IPFS), a new hypermedia distribution protocol. -* [0-stor](https://github.com/zero-os/0-stor) - Single device object store. -* [Sandglass](https://github.com/celrenheit/sandglass) - distributed, horizontally scalable, persistent, time sorted message queue. - -If you are using Badger in a project please send a pull request to add it to the list. - -## Frequently Asked Questions -- **My writes are getting stuck. Why?** - -This can happen if a long running iteration with `Prefetch` is set to false, but -a `Item::Value` call is made internally in the loop. That causes Badger to -acquire read locks over the value log files to avoid value log GC removing the -file from underneath. As a side effect, this also blocks a new value log GC -file from being created, when the value log file boundary is hit. - -Please see Github issues [#293](https://github.com/dgraph-io/badger/issues/293) -and [#315](https://github.com/dgraph-io/badger/issues/315). - -There are multiple workarounds during iteration: - -1. Use `Item::ValueCopy` instead of `Item::Value` when retrieving value. -1. Set `Prefetch` to true. Badger would then copy over the value and release the - file lock immediately. -1. When `Prefetch` is false, don't call `Item::Value` and do a pure key-only - iteration. This might be useful if you just want to delete a lot of keys. -1. Do the writes in a separate transaction after the reads. - -- **My writes are really slow. Why?** - -Are you creating a new transaction for every single key update? This will lead -to very low throughput. To get best write performance, batch up multiple writes -inside a transaction using single `DB.Update()` call. You could also have -multiple such `DB.Update()` calls being made concurrently from multiple -goroutines. - -- **I don't see any disk write. Why?** - -If you're using Badger with `SyncWrites=false`, then your writes might not be written to value log -and won't get synced to disk immediately. Writes to LSM tree are done inmemory first, before they -get compacted to disk. The compaction would only happen once `MaxTableSize` has been reached. So, if -you're doing a few writes and then checking, you might not see anything on disk. Once you `Close` -the database, you'll see these writes on disk. - -- **Which instances should I use for Badger?** - -We recommend using instances which provide local SSD storage, without any limit -on the maximum IOPS. In AWS, these are storage optimized instances like i3. They -provide local SSDs which clock 100K IOPS over 4KB blocks easily. - -- **Are there any Go specific settings that I should use?** - -We *highly* recommend setting a high number for GOMAXPROCS, which allows Go to -observe the full IOPS throughput provided by modern SSDs. In Dgraph, we have set -it to 128. For more details, [see this -thread](https://groups.google.com/d/topic/golang-nuts/jPb_h3TvlKE/discussion). - -## Contact -- Please use [discuss.dgraph.io](https://discuss.dgraph.io) for questions, feature requests and discussions. -- Please use [Github issue tracker](https://github.com/dgraph-io/badger/issues) for filing bugs or feature requests. -- Join [![Slack Status](http://slack.dgraph.io/badge.svg)](http://slack.dgraph.io). -- Follow us on Twitter [@dgraphlabs](https://twitter.com/dgraphlabs). - diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/appveyor.yml b/sessiondb/badger/vendor/github.com/dgraph-io/badger/appveyor.yml deleted file mode 100644 index 79dac33..0000000 --- a/sessiondb/badger/vendor/github.com/dgraph-io/badger/appveyor.yml +++ /dev/null @@ -1,48 +0,0 @@ -# version format -version: "{build}" - -# Operating system (build VM template) -os: Windows Server 2012 R2 - -# Platform. -platform: x64 - -clone_folder: c:\gopath\src\github.com\dgraph-io\badger - -# Environment variables -environment: - GOVERSION: 1.8.3 - GOPATH: c:\gopath - -# scripts that run after cloning repository -install: - - set PATH=%GOPATH%\bin;c:\go\bin;%PATH% - - go version - - go env - - python --version - -# To run your custom scripts instead of automatic MSBuild -build_script: - # We need to disable firewall - https://github.com/appveyor/ci/issues/1579#issuecomment-309830648 - - ps: Disable-NetFirewallRule -DisplayName 'File and Printer Sharing (SMB-Out)' - - cd c:\gopath\src\github.com\dgraph-io\badger - - git branch - - go get -t ./... - -# To run your custom scripts instead of automatic tests -test_script: - # Unit tests - - ps: Add-AppveyorTest "Unit Tests" -Outcome Running - - go test -v github.com/dgraph-io/badger/... - - go test -v -vlog_mmap=false github.com/dgraph-io/badger/... - - ps: Update-AppveyorTest "Unit Tests" -Outcome Passed - -notifications: - - provider: Email - to: - - pawan@dgraph.io - on_build_failure: true - on_build_status_changed: true -# to disable deployment -deploy: off - diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/backup.go b/sessiondb/badger/vendor/github.com/dgraph-io/badger/backup.go index 375b5a3..4649de2 100644 --- a/sessiondb/badger/vendor/github.com/dgraph-io/badger/backup.go +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/backup.go @@ -1,9 +1,26 @@ +/* + * Copyright 2017 Dgraph Labs, Inc. and Contributors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package badger import ( "bufio" "encoding/binary" "io" + "log" "sync" "github.com/dgraph-io/badger/y" @@ -36,6 +53,8 @@ func (db *DB) Backup(w io.Writer, since uint64) (uint64, error) { opts := DefaultIteratorOptions opts.AllVersions = true it := txn.NewIterator(opts) + defer it.Close() + for it.Rewind(); it.Valid(); it.Next() { item := it.Item() if item.Version() < since { @@ -44,7 +63,8 @@ func (db *DB) Backup(w io.Writer, since uint64) (uint64, error) { } val, err := item.Value() if err != nil { - return err + log.Printf("Key [%x]. Error while fetching value [%v]\n", item.Key(), err) + continue } entry := &protos.KVPair{ @@ -68,7 +88,7 @@ func (db *DB) Backup(w io.Writer, since uint64) (uint64, error) { // Load reads a protobuf-encoded list of all entries from a reader and writes // them to the database. This can be used to restore the database from a backup -// made by calling DB.Dump(). +// made by calling DB.Backup(). // // DB.Load() should be called on a database that is not running any other // concurrent transactions while it is running. @@ -124,12 +144,17 @@ func (db *DB) Load(r io.Reader) error { UserMeta: e.UserMeta[0], ExpiresAt: e.ExpiresAt, }) + // Update nextCommit, memtable stores this timestamp in badger head + // when flushed. + if e.Version >= db.orc.commitTs() { + db.orc.nextCommit = e.Version + 1 + } if len(entries) == 1000 { if err := batchSetAsyncIfNoErr(entries); err != nil { return err } - entries = entries[:0] + entries = make([]*Entry, 0, 1000) } } @@ -145,6 +170,7 @@ func (db *DB) Load(r io.Reader) error { case err := <-errChan: return err default: + db.orc.curRead = db.orc.commitTs() - 1 return nil } } diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/compaction.go b/sessiondb/badger/vendor/github.com/dgraph-io/badger/compaction.go index 23824ae..00be8f6 100644 --- a/sessiondb/badger/vendor/github.com/dgraph-io/badger/compaction.go +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/compaction.go @@ -20,6 +20,7 @@ import ( "bytes" "fmt" "log" + "math" "sync" "golang.org/x/net/trace" @@ -37,7 +38,7 @@ type keyRange struct { var infRange = keyRange{inf: true} func (r keyRange) String() string { - return fmt.Sprintf("[left=%q, right=%q, inf=%v]", r.left, r.right, r.inf) + return fmt.Sprintf("[left=%x, right=%x, inf=%v]", r.left, r.right, r.inf) } func (r keyRange) equals(dst keyRange) bool { @@ -75,7 +76,10 @@ func getKeyRange(tables []*table.Table) keyRange { biggest = tables[i].Biggest() } } - return keyRange{left: smallest, right: biggest} + return keyRange{ + left: y.KeyWithTs(y.ParseKey(smallest), math.MaxUint64), + right: y.KeyWithTs(y.ParseKey(biggest), 0), + } } type levelCompactStatus struct { diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/db.go b/sessiondb/badger/vendor/github.com/dgraph-io/badger/db.go index f03a5ca..1bd59e0 100644 --- a/sessiondb/badger/vendor/github.com/dgraph-io/badger/db.go +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/db.go @@ -17,8 +17,6 @@ package badger import ( - "bytes" - "container/heap" "encoding/binary" "expvar" "log" @@ -27,6 +25,7 @@ import ( "path/filepath" "strconv" "sync" + "sync/atomic" "time" "github.com/dgraph-io/badger/options" @@ -43,6 +42,7 @@ var ( badgerPrefix = []byte("!badger!") // Prefix for internal keys used by badger. head = []byte("!badger!head") // For storing value offset for replay. txnKey = []byte("!badger!txn") // For indicating end of entries in txn. + badgerMove = []byte("!badger!move") // For key-value pairs which got moved during GC. ) type closers struct { @@ -74,6 +74,8 @@ type DB struct { writeCh chan *request flushChan chan flushTask // For flushing memtables. + blockWrites int32 + orc *oracle } @@ -101,7 +103,7 @@ func replayFunction(out *DB) func(Entry, valuePointer) error { first := true return func(e Entry, vp valuePointer) error { // Function for replaying. if first { - out.elog.Printf("First key=%s\n", e.Key) + out.elog.Printf("First key=%q\n", e.Key) } first = false @@ -168,12 +170,24 @@ func Open(opt Options) (db *DB, err error) { opt.maxBatchSize = (15 * opt.MaxTableSize) / 100 opt.maxBatchCount = opt.maxBatchSize / int64(skl.MaxNodeSize) + if opt.ValueThreshold > math.MaxUint16-16 { + return nil, ErrValueThreshold + } + + if opt.ReadOnly { + // Can't truncate if the DB is read only. + opt.Truncate = false + } + for _, path := range []string{opt.Dir, opt.ValueDir} { dirExists, err := exists(path) if err != nil { return nil, y.Wrapf(err, "Invalid Dir: %q", path) } if !dirExists { + if opt.ReadOnly { + return nil, y.Wrapf(err, "Cannot find Dir for read-only open: %q", path) + } // Try to create the directory err = os.Mkdir(path, 0700) if err != nil { @@ -189,8 +203,8 @@ func Open(opt Options) (db *DB, err error) { if err != nil { return nil, err } - - dirLockGuard, err := acquireDirectoryLock(opt.Dir, lockFile) + var dirLockGuard, valueDirLockGuard *directoryLockGuard + dirLockGuard, err = acquireDirectoryLock(opt.Dir, lockFile, opt.ReadOnly) if err != nil { return nil, err } @@ -199,9 +213,8 @@ func Open(opt Options) (db *DB, err error) { _ = dirLockGuard.release() } }() - var valueDirLockGuard *directoryLockGuard if absValueDir != absDir { - valueDirLockGuard, err = acquireDirectoryLock(opt.ValueDir, lockFile) + valueDirLockGuard, err = acquireDirectoryLock(opt.ValueDir, lockFile, opt.ReadOnly) if err != nil { return nil, err } @@ -218,7 +231,7 @@ func Open(opt Options) (db *DB, err error) { opt.ValueLogLoadingMode == options.MemoryMap) { return nil, ErrInvalidLoadingMode } - manifestFile, manifest, err := openOrCreateManifestFile(opt.Dir) + manifestFile, manifest, err := openOrCreateManifestFile(opt.Dir, opt.ReadOnly) if err != nil { return nil, err } @@ -229,12 +242,12 @@ func Open(opt Options) (db *DB, err error) { }() orc := &oracle{ - isManaged: opt.managedTxns, - nextCommit: 1, - pendingCommits: make(map[uint64]struct{}), - commits: make(map[uint64]uint64), + isManaged: opt.managedTxns, + nextCommit: 1, + commits: make(map[uint64]uint64), + readMark: y.WaterMark{}, } - heap.Init(&orc.commitMark) + orc.readMark.Init() db = &DB{ imm: make([]*skl.Skiplist, 0, opt.NumMemtables), @@ -259,11 +272,13 @@ func Open(opt Options) (db *DB, err error) { return nil, err } - db.closers.compactors = y.NewCloser(1) - db.lc.startCompact(db.closers.compactors) + if !opt.ReadOnly { + db.closers.compactors = y.NewCloser(1) + db.lc.startCompact(db.closers.compactors) - db.closers.memtable = y.NewCloser(1) - go db.flushMemtable(db.closers.memtable) // Need levels controller to be up. + db.closers.memtable = y.NewCloser(1) + go db.flushMemtable(db.closers.memtable) // Need levels controller to be up. + } if err = db.vlog.Open(db, opt); err != nil { return nil, err @@ -319,7 +334,8 @@ func Open(opt Options) (db *DB, err error) { } // Close closes a DB. It's crucial to call it to ensure all the pending updates -// make their way to disk. +// make their way to disk. Calling DB.Close() multiple times is not safe and would +// cause panic. func (db *DB) Close() (err error) { db.elog.Printf("Closing database") // Stop value GC first. @@ -366,11 +382,31 @@ func (db *DB) Close() (err error) { } db.flushChan <- flushTask{nil, valuePointer{}} // Tell flusher to quit. - db.closers.memtable.Wait() - db.elog.Printf("Memtable flushed") + if db.closers.memtable != nil { + db.closers.memtable.Wait() + db.elog.Printf("Memtable flushed") + } + if db.closers.compactors != nil { + db.closers.compactors.SignalAndWait() + db.elog.Printf("Compaction finished") + } - db.closers.compactors.SignalAndWait() - db.elog.Printf("Compaction finished") + // Force Compact L0 + // We don't need to care about cstatus since no parallel compaction is running. + cd := compactDef{ + elog: trace.New("Badger", "Compact"), + thisLevel: db.lc.levels[0], + nextLevel: db.lc.levels[1], + } + cd.elog.SetMaxEvents(100) + defer cd.elog.Finish() + if db.lc.fillTablesL0(&cd) { + if err := db.lc.runCompactDef(0, cd); err != nil { + cd.elog.LazyPrintf("\tLOG Compact FAILED with error: %+v: %+v", err, cd) + } + } else { + cd.elog.LazyPrintf("fillTables failed for level zero. No compaction required") + } if lcErr := db.lc.close(); err == nil { err = errors.Wrap(lcErr, "DB.Close") @@ -380,8 +416,10 @@ func (db *DB) Close() (err error) { db.elog.Finish() - if guardErr := db.dirLockGuard.release(); err == nil { - err = errors.Wrap(guardErr, "DB.Close") + if db.dirLockGuard != nil { + if guardErr := db.dirLockGuard.release(); err == nil { + err = errors.Wrap(guardErr, "DB.Close") + } } if db.valueDirGuard != nil { if guardErr := db.valueDirGuard.release(); err == nil { @@ -451,33 +489,25 @@ func (db *DB) getMemTables() ([]*skl.Skiplist, func()) { // get returns the value in memtable or disk for given key. // Note that value will include meta byte. +// +// IMPORTANT: We should never write an entry with an older timestamp for the same key, We need to +// maintain this invariant to search for the latest value of a key, or else we need to search in all +// tables and find the max version among them. To maintain this invariant, we also need to ensure +// that all versions of a key are always present in the same table from level 1, because compaction +// can push any table down. func (db *DB) get(key []byte) (y.ValueStruct, error) { tables, decr := db.getMemTables() // Lock should be released. defer decr() y.NumGets.Add(1) - version := y.ParseTs(key) - var maxVs y.ValueStruct - // Need to search for values in all tables, with managed db - // latest value needn't be present in the latest table. - // Even without managed db, purging can cause this constraint - // to be violated. - // Search until required version is found or iterate over all - // tables and return max version. for i := 0; i < len(tables); i++ { vs := tables[i].Get(key) y.NumMemtableGets.Add(1) - if vs.Meta == 0 && vs.Value == nil { - continue - } - if vs.Version == version { + if vs.Meta != 0 || vs.Value != nil { return vs, nil } - if maxVs.Version < vs.Version { - maxVs = vs - } } - return db.lc.get(key, maxVs) + return db.lc.get(key) } func (db *DB) updateOffset(ptrs []valuePointer) { @@ -568,8 +598,12 @@ func (db *DB) writeRequests(reqs []*request) error { continue } count += len(b.Entries) - for err := db.ensureRoomForWrite(); err != nil; err = db.ensureRoomForWrite() { - db.elog.Printf("Making room for writes") + var i uint64 + for err := db.ensureRoomForWrite(); err == errNoRoom; err = db.ensureRoomForWrite() { + i++ + if i%100 == 0 { + db.elog.Printf("Making room for writes") + } // We need to poll a bit because both hasRoomForWrite and the flusher need access to s.imm. // When flushChan is full and you are blocked there, and the flusher is trying to update s.imm, // you will get a deadlock. @@ -591,6 +625,9 @@ func (db *DB) writeRequests(reqs []*request) error { } func (db *DB) sendToWriteCh(entries []*Entry) (*request, error) { + if atomic.LoadInt32(&db.blockWrites) == 1 { + return nil, ErrBlockedWrites + } var count, size int64 for _, e := range entries { size += int64(e.estimateSize(db.opt.ValueThreshold)) @@ -681,11 +718,7 @@ func (db *DB) batchSet(entries []*Entry) error { return err } - req.Wg.Wait() - req.Entries = nil - err = req.Err - requestPool.Put(req) - return err + return req.Wait() } // batchSetAsync is the asynchronous version of batchSet. It accepts a callback @@ -700,10 +733,7 @@ func (db *DB) batchSetAsync(entries []*Entry, f func(error)) error { return err } go func() { - req.Wg.Wait() - err := req.Err - req.Entries = nil - requestPool.Put(req) + err := req.Wait() // Write is complete. Let's call the callback function now. f(err) }() @@ -748,7 +778,7 @@ func arenaSize(opt Options) int64 { return opt.MaxTableSize + opt.maxBatchSize + opt.maxBatchCount*int64(skl.MaxNodeSize) } -// WriteLevel0Table flushes memtable. It drops deleteValues. +// WriteLevel0Table flushes memtable. func writeLevel0Table(s *skl.Skiplist, f *os.File) error { iter := s.NewIterator() defer iter.Close() @@ -768,6 +798,8 @@ type flushTask struct { vptr valuePointer } +// TODO: Ensure that this function doesn't return, or is handled by another wrapper function. +// Otherwise, we would have no goroutine which can flush memtables. func (db *DB) flushMemtable(lc *y.Closer) error { defer lc.Done() @@ -824,7 +856,12 @@ func (db *DB) flushMemtable(lc *y.Closer) error { // Update s.imm. Need a lock. db.Lock() - y.AssertTrue(ft.mt == db.imm[0]) //For now, single threaded. + // This is a single-threaded operation. ft.mt corresponds to the head of + // db.imm list. Once we flush it, we advance db.imm. The next ft.mt + // which would arrive here would match db.imm[0], because we acquire a + // lock over DB when pushing to flushChan. + // TODO: This logic is dirty AF. Any change and this could easily break. + y.AssertTrue(ft.mt == db.imm[0]) db.imm = db.imm[1:] ft.mt.DecrRef() // Return memory. db.Unlock() @@ -879,7 +916,6 @@ func (db *DB) calculateSize() { _, vlogSize = totalSize(db.opt.ValueDir) } y.VlogSize.Set(db.opt.Dir, newInt(vlogSize)) - } func (db *DB) updateSize(lc *y.Closer) { @@ -898,141 +934,14 @@ func (db *DB) updateSize(lc *y.Closer) { } } -// PurgeVersionsBelow will delete all versions of a key below the specified version -func (db *DB) PurgeVersionsBelow(key []byte, ts uint64) error { - txn := db.NewTransaction(false) - defer txn.Discard() - return db.purgeVersionsBelow(txn, key, ts) -} - -func (db *DB) purgeVersionsBelow(txn *Txn, key []byte, ts uint64) error { - opts := DefaultIteratorOptions - opts.AllVersions = true - opts.PrefetchValues = false - it := txn.NewIterator(opts) - defer it.Close() - - var entries []*Entry - - for it.Seek(key); it.ValidForPrefix(key); it.Next() { - item := it.Item() - if !bytes.Equal(key, item.Key()) || item.Version() >= ts { - continue - } - if isDeletedOrExpired(item.meta, item.ExpiresAt()) { - continue - } - - // Found an older version. Mark for deletion - entries = append(entries, - &Entry{ - Key: y.KeyWithTs(key, item.version), - meta: bitDelete, - }) - db.vlog.updateGCStats(item) - } - return db.batchSet(entries) -} - -// PurgeOlderVersions deletes older versions of all keys. -// -// This function could be called prior to doing garbage collection to clean up -// older versions that are no longer needed. The caller must make sure that -// there are no long-running read transactions running before this function is -// called, otherwise they will not work as expected. -func (db *DB) PurgeOlderVersions() error { - return db.View(func(txn *Txn) error { - opts := DefaultIteratorOptions - opts.AllVersions = true - opts.PrefetchValues = false - it := txn.NewIterator(opts) - defer it.Close() - - var entries []*Entry - var lastKey []byte - var count, size int - var wg sync.WaitGroup - errChan := make(chan error, 1) - - // func to check for pending error before sending off a batch for writing - batchSetAsyncIfNoErr := func(entries []*Entry) error { - select { - case err := <-errChan: - return err - default: - wg.Add(1) - return txn.db.batchSetAsync(entries, func(err error) { - defer wg.Done() - if err != nil { - select { - case errChan <- err: - default: - } - } - }) - } - } - - for it.Rewind(); it.Valid(); it.Next() { - item := it.Item() - if !bytes.Equal(lastKey, item.Key()) { - lastKey = y.SafeCopy(lastKey, item.Key()) - continue - } - if isDeletedOrExpired(item.meta, item.ExpiresAt()) { - continue - } - // Found an older version. Mark for deletion - e := &Entry{ - Key: y.KeyWithTs(lastKey, item.version), - meta: bitDelete, - } - db.vlog.updateGCStats(item) - curSize := e.estimateSize(db.opt.ValueThreshold) - - // Batch up min(1000, maxBatchCount) entries at a time and write - // Ensure that total batch size doesn't exceed maxBatchSize - if count == 1000 || count+1 >= int(db.opt.maxBatchCount) || - size+curSize >= int(db.opt.maxBatchSize) { - if err := batchSetAsyncIfNoErr(entries); err != nil { - return err - } - count = 0 - size = 0 - entries = []*Entry{} - } - size += curSize - count++ - entries = append(entries, e) - } - - // Write last batch pending deletes - if count > 0 { - if err := batchSetAsyncIfNoErr(entries); err != nil { - return err - } - } - - wg.Wait() - - select { - case err := <-errChan: - return err - default: - return nil - } - }) -} - // RunValueLogGC triggers a value log garbage collection. // // It picks value log files to perform GC based on statistics that are collected -// duing the session, when DB.PurgeOlderVersions() and DB.PurgeVersions() is -// called. If no such statistics are available, then log files are picked in -// random order. The process stops as soon as the first log file is encountered -// which does not result in garbage collection. +// duing compactions. If no such statistics are available, then log files are +// picked in random order. The process stops as soon as the first log file is +// encountered which does not result in garbage collection. // -// When a log file is picked, it is first sampled If the sample shows that we +// When a log file is picked, it is first sampled. If the sample shows that we // can discard at least discardRatio space of that file, it would be rewritten. // // If a call to RunValueLogGC results in no rewrites, then an ErrNoRewrite is @@ -1060,8 +969,7 @@ func (db *DB) RunValueLogGC(discardRatio float64) error { // Find head on disk headKey := y.KeyWithTs(head, math.MaxUint64) // Need to pass with timestamp, lsm get removes the last 8 bytes and compares key - var maxVs y.ValueStruct - val, err := db.lc.get(headKey, maxVs) + val, err := db.lc.get(headKey) if err != nil { return errors.Wrap(err, "Retrieving head from on-disk LSM") } @@ -1179,14 +1087,27 @@ func (db *DB) GetSequence(key []byte, bandwidth uint64) (*Sequence, error) { return seq, err } +func (db *DB) Tables() []TableInfo { + return db.lc.getTableInfo() +} + +// MaxBatchCount returns max possible entries in batch +func (db *DB) MaxBatchCount() int64 { + return db.opt.maxBatchCount +} + +// MaxBatchCount returns max possible batch size +func (db *DB) MaxBatchSize() int64 { + return db.opt.maxBatchSize +} + // MergeOperator represents a Badger merge operator. type MergeOperator struct { sync.RWMutex - f MergeFunc - db *DB - key []byte - skipAtOrBelow uint64 - closer *y.Closer + f MergeFunc + db *DB + key []byte + closer *y.Closer } // MergeFunc accepts two byte slices, one representing an existing value, and @@ -1214,66 +1135,67 @@ func (db *DB) GetMergeOperator(key []byte, return op } -func (op *MergeOperator) iterateAndMerge(txn *Txn) (maxVersion uint64, val []byte, err error) { +var errNoMerge = errors.New("No need for merge") + +func (op *MergeOperator) iterateAndMerge(txn *Txn) (val []byte, err error) { opt := DefaultIteratorOptions opt.AllVersions = true it := txn.NewIterator(opt) - var first bool + defer it.Close() + + var numVersions int for it.Rewind(); it.ValidForPrefix(op.key); it.Next() { item := it.Item() - if item.Version() <= op.skipAtOrBelow { - continue - } - if item.Version() > maxVersion { - maxVersion = item.Version() - } - if !first { - first = true + numVersions++ + if numVersions == 1 { val, err = item.ValueCopy(val) if err != nil { - return 0, nil, err + return nil, err } } else { newVal, err := item.Value() if err != nil { - return 0, nil, err + return nil, err } val = op.f(val, newVal) } + if item.DiscardEarlierVersions() { + break + } } - if !first { - return 0, nil, ErrKeyNotFound + if numVersions == 0 { + return nil, ErrKeyNotFound + } else if numVersions == 1 { + return val, errNoMerge } - return maxVersion, val, nil + return val, nil } func (op *MergeOperator) compact() error { op.Lock() defer op.Unlock() - var maxVersion uint64 err := op.db.Update(func(txn *Txn) error { var ( val []byte err error ) - maxVersion, val, err = op.iterateAndMerge(txn) + val, err = op.iterateAndMerge(txn) if err != nil { return err } // Write value back to db - if maxVersion > op.skipAtOrBelow { - if err := txn.Set(op.key, val); err != nil { - return err - } + if err := txn.SetWithDiscard(op.key, val, 0); err != nil { + return err } return nil }) - if err != nil && err != ErrKeyNotFound { // Ignore ErrKeyNotFound errors during compaction + + if err == ErrKeyNotFound || err == errNoMerge { + // pass. + } else if err != nil { return err } - // Update version - op.skipAtOrBelow = maxVersion return nil } @@ -1287,16 +1209,9 @@ func (op *MergeOperator) runCompactions(dur time.Duration) { stop = true case <-ticker.C: // wait for tick } - oldSkipVersion := op.skipAtOrBelow if err := op.compact(); err != nil { log.Printf("Error while running merge operation: %s", err) } - // Purge older versions if version has updated - if op.skipAtOrBelow > oldSkipVersion { - if err := op.db.PurgeVersionsBelow(op.key, op.skipAtOrBelow+1); err != nil { - log.Printf("Error purging merged keys: %s", err) - } - } if stop { ticker.Stop() break @@ -1315,15 +1230,18 @@ func (op *MergeOperator) Add(val []byte) error { // Get returns the latest value for the merge operator, which is derived by // applying the merge function to all the values added so far. // -// If Add has not been called even once, Get will return ErrKeyNotFound +// If Add has not been called even once, Get will return ErrKeyNotFound. func (op *MergeOperator) Get() ([]byte, error) { op.RLock() defer op.RUnlock() var existing []byte err := op.db.View(func(txn *Txn) (err error) { - _, existing, err = op.iterateAndMerge(txn) + existing, err = op.iterateAndMerge(txn) return err }) + if err == errNoMerge { + return existing, nil + } return existing, err } diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/dir_unix.go b/sessiondb/badger/vendor/github.com/dgraph-io/badger/dir_unix.go index a058f45..a5e0fa3 100644 --- a/sessiondb/badger/vendor/github.com/dgraph-io/badger/dir_unix.go +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/dir_unix.go @@ -35,11 +35,14 @@ type directoryLockGuard struct { f *os.File // The absolute path to our pid file. path string + // Was this a shared lock for a read-only database? + readOnly bool } -// acquireDirectoryLock gets an exclusive lock on the directory (using flock). It writes our pid -// to dirPath/pidFileName for convenience. -func acquireDirectoryLock(dirPath string, pidFileName string) (*directoryLockGuard, error) { +// acquireDirectoryLock gets a lock on the directory (using flock). If +// this is not read-only, it will also write our pid to +// dirPath/pidFileName for convenience. +func acquireDirectoryLock(dirPath string, pidFileName string, readOnly bool) (*directoryLockGuard, error) { // Convert to absolute path so that Release still works even if we do an unbalanced // chdir in the meantime. absPidFilePath, err := filepath.Abs(filepath.Join(dirPath, pidFileName)) @@ -50,7 +53,12 @@ func acquireDirectoryLock(dirPath string, pidFileName string) (*directoryLockGua if err != nil { return nil, errors.Wrapf(err, "cannot open directory %q", dirPath) } - err = unix.Flock(int(f.Fd()), unix.LOCK_EX|unix.LOCK_NB) + opts := unix.LOCK_EX | unix.LOCK_NB + if readOnly { + opts = unix.LOCK_SH | unix.LOCK_NB + } + + err = unix.Flock(int(f.Fd()), opts) if err != nil { f.Close() return nil, errors.Wrapf(err, @@ -58,22 +66,27 @@ func acquireDirectoryLock(dirPath string, pidFileName string) (*directoryLockGua dirPath) } - // Yes, we happily overwrite a pre-existing pid file. We're the only badger process using this - // directory. - err = ioutil.WriteFile(absPidFilePath, []byte(fmt.Sprintf("%d\n", os.Getpid())), 0666) - if err != nil { - f.Close() - return nil, errors.Wrapf(err, - "Cannot write pid file %q", absPidFilePath) + if !readOnly { + // Yes, we happily overwrite a pre-existing pid file. We're the + // only read-write badger process using this directory. + err = ioutil.WriteFile(absPidFilePath, []byte(fmt.Sprintf("%d\n", os.Getpid())), 0666) + if err != nil { + f.Close() + return nil, errors.Wrapf(err, + "Cannot write pid file %q", absPidFilePath) + } } - - return &directoryLockGuard{f, absPidFilePath}, nil + return &directoryLockGuard{f, absPidFilePath, readOnly}, nil } // Release deletes the pid file and releases our lock on the directory. func (guard *directoryLockGuard) release() error { - // It's important that we remove the pid file first. - err := os.Remove(guard.path) + var err error + if !guard.readOnly { + // It's important that we remove the pid file first. + err = os.Remove(guard.path) + } + if closeErr := guard.f.Close(); err == nil { err = closeErr } diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/dir_windows.go b/sessiondb/badger/vendor/github.com/dgraph-io/badger/dir_windows.go index 36d599f..80de84e 100644 --- a/sessiondb/badger/vendor/github.com/dgraph-io/badger/dir_windows.go +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/dir_windows.go @@ -57,7 +57,11 @@ type directoryLockGuard struct { } // AcquireDirectoryLock acquires exclusive access to a directory. -func acquireDirectoryLock(dirPath string, pidFileName string) (*directoryLockGuard, error) { +func acquireDirectoryLock(dirPath string, pidFileName string, readOnly bool) (*directoryLockGuard, error) { + if readOnly { + return nil, ErrWindowsNotSupported + } + // Convert to absolute path so that Release still works even if we do an unbalanced // chdir in the meantime. absLockFilePath, err := filepath.Abs(filepath.Join(dirPath, pidFileName)) diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/doc.go b/sessiondb/badger/vendor/github.com/dgraph-io/badger/doc.go deleted file mode 100644 index 83dc9a2..0000000 --- a/sessiondb/badger/vendor/github.com/dgraph-io/badger/doc.go +++ /dev/null @@ -1,28 +0,0 @@ -/* -Package badger implements an embeddable, simple and fast key-value database, -written in pure Go. It is designed to be highly performant for both reads and -writes simultaneously. Badger uses Multi-Version Concurrency Control (MVCC), and -supports transactions. It runs transactions concurrently, with serializable -snapshot isolation guarantees. - -Badger uses an LSM tree along with a value log to separate keys from values, -hence reducing both write amplification and the size of the LSM tree. This -allows LSM tree to be served entirely from RAM, while the values are served -from SSD. - - -Usage - -Badger has the following main types: DB, Txn, Item and Iterator. DB contains -keys that are associated with values. It must be opened with the appropriate -options before it can be accessed. - -All operations happen inside a Txn. Txn represents a transaction, which can -be read-only or read-write. Read-only transactions can read values for a -given key (which are returned inside an Item), or iterate over a set of -key-value pairs using an Iterator (which are returned as Item type values as -well). Read-write transactions can also update and delete keys from the DB. - -See the examples for more usage details. -*/ -package badger diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/errors.go b/sessiondb/badger/vendor/github.com/dgraph-io/badger/errors.go index b03a24f..1de3582 100644 --- a/sessiondb/badger/vendor/github.com/dgraph-io/badger/errors.go +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/errors.go @@ -27,6 +27,10 @@ var ( // range. ErrValueLogSize = errors.New("Invalid ValueLogFileSize, must be between 1MB and 2GB") + // ErrValueThreshold is returned when ValueThreshold is set to a value close to or greater than + // uint16. + ErrValueThreshold = errors.New("Invalid ValueThreshold, must be lower than uint16.") + // ErrKeyNotFound is returned when key isn't found on a txn.Get. ErrKeyNotFound = errors.New("Key not found") @@ -81,9 +85,25 @@ var ( // ErrInvalidLoadingMode is returned when opt.ValueLogLoadingMode option is not // within the valid range ErrInvalidLoadingMode = errors.New("Invalid ValueLogLoadingMode, must be FileIO or MemoryMap") + + // ErrReplayNeeded is returned when opt.ReadOnly is set but the + // database requires a value log replay. + ErrReplayNeeded = errors.New("Database was not properly closed, cannot open read-only") + + // ErrWindowsNotSupported is returned when opt.ReadOnly is used on Windows + ErrWindowsNotSupported = errors.New("Read-only mode is not supported on Windows") + + // ErrTruncateNeeded is returned when the value log gets corrupt, and requires truncation of + // corrupt data to allow Badger to run properly. + ErrTruncateNeeded = errors.New("Value log truncate required to run DB. This might result in data loss.") + + // ErrBlockedWrites is returned if the user called DropAll. During the process of dropping all + // data from Badger, we stop accepting new writes, by returning this error. + ErrBlockedWrites = errors.New("Writes are blocked possibly due to DropAll") ) -const maxKeySize = 1 << 16 // Key length can't be more than uint16, as determined by table::header. +// Key length can't be more than uint16, as determined by table::header. +const maxKeySize = 1<<16 - 8 // 8 bytes are for storing timestamp func exceedsMaxKeySizeError(key []byte) error { return errors.Errorf("Key with size %d exceeded %d limit. Key:\n%s", diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/iterator.go b/sessiondb/badger/vendor/github.com/dgraph-io/badger/iterator.go index c7ce634..3a6bec9 100644 --- a/sessiondb/badger/vendor/github.com/dgraph-io/badger/iterator.go +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/iterator.go @@ -20,6 +20,7 @@ import ( "bytes" "fmt" "sync" + "sync/atomic" "time" "github.com/dgraph-io/badger/options" @@ -53,20 +54,32 @@ type Item struct { txn *Txn } -// ToString returns a string representation of Item -func (item *Item) ToString() string { +// String returns a string representation of Item +func (item *Item) String() string { return fmt.Sprintf("key=%q, version=%d, meta=%x", item.Key(), item.Version(), item.meta) +} +// Deprecated +// ToString returns a string representation of Item +func (item *Item) ToString() string { + return item.String() } // Key returns the key. // // Key is only valid as long as item is valid, or transaction is valid. If you need to use it -// outside its validity, please copy it. +// outside its validity, please use KeyCopy func (item *Item) Key() []byte { return item.key } +// KeyCopy returns a copy of the key of the item, writing it to dst slice. +// If nil is passed, or capacity of dst isn't sufficient, a new slice would be allocated and +// returned. +func (item *Item) KeyCopy(dst []byte) []byte { + return y.SafeCopy(dst, item.key) +} + // Version returns the commit timestamp of the item. func (item *Item) Version() uint64 { return item.version @@ -80,7 +93,8 @@ func (item *Item) Version() uint64 { // reused. // // If you need to use a value outside a transaction, please use Item.ValueCopy -// instead, or copy it yourself. +// instead, or copy it yourself. Value might change once discard or commit is called. +// Use ValueCopy if you want to do a Set after Get. func (item *Item) Value() ([]byte, error) { item.wg.Wait() if item.status == prefetched { @@ -117,24 +131,70 @@ func (item *Item) hasValue() bool { return true } +// IsDeletedOrExpired returns true if item contains deleted or expired value. +func (item *Item) IsDeletedOrExpired() bool { + return isDeletedOrExpired(item.meta, item.expiresAt) +} + +func (item *Item) DiscardEarlierVersions() bool { + return item.meta&bitDiscardEarlierVersions > 0 +} + func (item *Item) yieldItemValue() ([]byte, func(), error) { - if !item.hasValue() { - return nil, nil, nil - } + key := item.Key() // No need to copy. + for { + if !item.hasValue() { + return nil, nil, nil + } - if item.slice == nil { - item.slice = new(y.Slice) - } + if item.slice == nil { + item.slice = new(y.Slice) + } - if (item.meta & bitValuePointer) == 0 { - val := item.slice.Resize(len(item.vptr)) - copy(val, item.vptr) - return val, nil, nil - } + if (item.meta & bitValuePointer) == 0 { + val := item.slice.Resize(len(item.vptr)) + copy(val, item.vptr) + return val, nil, nil + } - var vp valuePointer - vp.Decode(item.vptr) - return item.db.vlog.Read(vp, item.slice) + var vp valuePointer + vp.Decode(item.vptr) + result, cb, err := item.db.vlog.Read(vp, item.slice) + if err != ErrRetry { + return result, cb, err + } + if bytes.HasPrefix(key, badgerMove) { + // err == ErrRetry + // Error is retry even after checking the move keyspace. So, let's + // just assume that value is not present. + return nil, cb, nil + } + + // The value pointer is pointing to a deleted value log. Look for the + // move key and read that instead. + runCallback(cb) + // Do not put badgerMove on the left in append. It seems to cause some sort of manipulation. + key = append([]byte{}, badgerMove...) + key = append(key, y.KeyWithTs(item.Key(), item.Version())...) + // Note that we can't set item.key to move key, because that would + // change the key user sees before and after this call. Also, this move + // logic is internal logic and should not impact the external behavior + // of the retrieval. + vs, err := item.db.get(key) + if err != nil { + return nil, nil, err + } + if vs.Version != item.Version() { + return nil, nil, nil + } + // Bug fix: Always copy the vs.Value into vptr here. Otherwise, when item is reused this + // slice gets overwritten. + item.vptr = y.SafeCopy(item.vptr, vs.Value) + item.meta &^= bitValuePointer // Clear the value pointer bit. + if vs.Meta&bitValuePointer > 0 { + item.meta |= bitValuePointer // This meta would only be about value pointer. + } + } } func runCallback(cb func()) { @@ -235,6 +295,8 @@ type IteratorOptions struct { PrefetchSize int Reverse bool // Direction of iteration. False is forward, true is backward. AllVersions bool // Fetch all valid versions of the same key. + + internalAccess bool // Used to allow internal access to badger keys. } // DefaultIteratorOptions contains default options when iterating over Badger key-value stores. @@ -264,6 +326,10 @@ type Iterator struct { // Using prefetch is highly recommended if you're doing a long running iteration. // Avoid long running iterations in update transactions. func (txn *Txn) NewIterator(opt IteratorOptions) *Iterator { + if atomic.AddInt32(&txn.numIterators, 1) > 1 { + panic("Only one iterator can be active at one time.") + } + tables, decr := txn.db.getMemTables() defer decr() txn.db.vlog.incrIteratorCount() @@ -315,8 +381,22 @@ func (it *Iterator) ValidForPrefix(prefix []byte) bool { // Close would close the iterator. It is important to call this when you're done with iteration. func (it *Iterator) Close() { it.iitr.Close() + + // It is important to wait for the fill goroutines to finish. Otherwise, we might leave zombie + // goroutines behind, which are waiting to acquire file read locks after DB has been closed. + waitFor := func(l list) { + item := l.pop() + for item != nil { + item.wg.Wait() + item = l.pop() + } + } + waitFor(it.waste) + waitFor(it.data) + // TODO: We could handle this error. _ = it.txn.db.vlog.decrIteratorCount() + atomic.AddInt32(&it.txn.numIterators, -1) } // Next would advance the iterator by one. Always check it.Valid() after a Next() @@ -367,7 +447,7 @@ func (it *Iterator) parseItem() bool { } // Skip badger keys. - if bytes.HasPrefix(key, badgerPrefix) { + if !it.opt.internalAccess && bytes.HasPrefix(key, badgerPrefix) { mi.Next() return false } diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/levels.go b/sessiondb/badger/vendor/github.com/dgraph-io/badger/levels.go index 55002b4..31b7fe6 100644 --- a/sessiondb/badger/vendor/github.com/dgraph-io/badger/levels.go +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/levels.go @@ -18,6 +18,7 @@ package badger import ( "fmt" + "math" "math/rand" "os" "sort" @@ -104,7 +105,11 @@ func newLevelsController(kv *DB, mf *Manifest) (*levelsController, error) { var maxFileID uint64 for fileID, tableManifest := range mf.Tables { fname := table.NewFilename(fileID, kv.opt.Dir) - fd, err := y.OpenExistingSyncedFile(fname, true) + var flags uint32 = y.Sync + if kv.opt.ReadOnly { + flags |= y.ReadOnly + } + fd, err := y.OpenExistingFile(fname, flags) if err != nil { closeAllTables(tables) return nil, errors.Wrapf(err, "Opening file: %q", fname) @@ -165,6 +170,62 @@ func (s *levelsController) cleanupLevels() error { return firstErr } +// This function picks all tables from all levels, creates a manifest changeset, +// applies it, and then decrements the refs of these tables, which would result +// in their deletion. It spares one table from L0, to keep the badgerHead key +// persisted, so we don't lose where we are w.r.t. value log. +// NOTE: This function in itself isn't sufficient to completely delete all the +// data. After this, one would still need to iterate over the KV pairs and mark +// them as deleted. +func (s *levelsController) deleteLSMTree() (int, error) { + var all []*table.Table + var keepOne *table.Table + for _, l := range s.levels { + l.RLock() + if l.level == 0 && len(l.tables) > 1 { + // Skip the last table. We do this to keep the badgerMove key persisted. + lastIdx := len(l.tables) - 1 + keepOne = l.tables[lastIdx] + all = append(all, l.tables[:lastIdx]...) + } else { + all = append(all, l.tables...) + } + l.RUnlock() + } + if len(all) == 0 { + return 0, nil + } + + // Generate the manifest changes. + changes := []*protos.ManifestChange{} + for _, table := range all { + changes = append(changes, makeTableDeleteChange(table.ID())) + } + changeSet := protos.ManifestChangeSet{Changes: changes} + if err := s.kv.manifest.addChanges(changeSet.Changes); err != nil { + return 0, err + } + + for _, l := range s.levels { + l.Lock() + l.totalSize = 0 + if l.level == 0 && len(l.tables) > 1 { + l.tables = []*table.Table{keepOne} + l.totalSize += keepOne.Size() + } else { + l.tables = l.tables[:0] + } + l.Unlock() + } + // Now allow deletion of tables. + for _, table := range all { + if err := table.DecrRef(); err != nil { + return 0, err + } + } + return len(all), nil +} + func (s *levelsController) startCompact(lc *y.Closer) { n := s.kv.opt.NumCompactors lc.AddRunning(n - 1) @@ -258,6 +319,35 @@ func (s *levelsController) compactBuildTables( topTables := cd.top botTables := cd.bot + var hasOverlap bool + { + kr := getKeyRange(cd.top) + for i, lh := range s.levels { + if i <= l { // Skip upper levels. + continue + } + lh.RLock() + left, right := lh.overlappingTables(levelHandlerRLocked{}, kr) + lh.RUnlock() + if right-left > 0 { + hasOverlap = true + break + } + } + cd.elog.LazyPrintf("Key range overlaps with lower levels: %v", hasOverlap) + } + + // Try to collect stats so that we can inform value log about GC. That would help us find which + // value log file should be GCed. + discardStats := make(map[uint32]int64) + updateStats := func(vs y.ValueStruct) { + if vs.Meta&bitValuePointer > 0 { + var vp valuePointer + vp.Decode(vs.Value) + discardStats[vp.Fid] += int64(vp.Len) + } + } + // Create iterators across all the tables involved first. var iters []y.Iterator if l == 0 { @@ -274,54 +364,111 @@ func (s *levelsController) compactBuildTables( it.Rewind() + // Pick a discard ts, so we can discard versions below this ts. We should + // never discard any versions starting from above this timestamp, because + // that would affect the snapshot view guarantee provided by transactions. + discardTs := s.kv.orc.discardAtOrBelow() + // Start generating new tables. type newTableResult struct { table *table.Table err error } resultCh := make(chan newTableResult) - var i int - for ; it.Valid(); i++ { + var numBuilds, numVersions int + var lastKey, skipKey []byte + for it.Valid() { timeStart := time.Now() builder := table.NewTableBuilder() + var numKeys, numSkips uint64 for ; it.Valid(); it.Next() { - if builder.ReachedCapacity(s.kv.opt.MaxTableSize) { - break + // See if we need to skip this key. + if len(skipKey) > 0 { + if y.SameKey(it.Key(), skipKey) { + numSkips++ + updateStats(it.Value()) + continue + } else { + skipKey = skipKey[:0] + } + } + + if !y.SameKey(it.Key(), lastKey) { + if builder.ReachedCapacity(s.kv.opt.MaxTableSize) { + // Only break if we are on a different key, and have reached capacity. We want + // to ensure that all versions of the key are stored in the same sstable, and + // not divided across multiple tables at the same level. + break + } + lastKey = y.SafeCopy(lastKey, it.Key()) + numVersions = 0 } + + vs := it.Value() + version := y.ParseTs(it.Key()) + if version <= discardTs { + // Keep track of the number of versions encountered for this key. Only consider the + // versions which are below the minReadTs, otherwise, we might end up discarding the + // only valid version for a running transaction. + numVersions++ + lastValidVersion := vs.Meta&bitDiscardEarlierVersions > 0 + if isDeletedOrExpired(vs.Meta, vs.ExpiresAt) || + numVersions > s.kv.opt.NumVersionsToKeep || + lastValidVersion { + // If this version of the key is deleted or expired, skip all the rest of the + // versions. Ensure that we're only removing versions below readTs. + skipKey = y.SafeCopy(skipKey, it.Key()) + + if lastValidVersion { + // Add this key. We have set skipKey, so the following key versions + // would be skipped. + } else if hasOverlap { + // If this key range has overlap with lower levels, then keep the deletion + // marker with the latest version, discarding the rest. We have set skipKey, + // so the following key versions would be skipped. + } else { + // If no overlap, we can skip all the versions, by continuing here. + numSkips++ + updateStats(vs) + continue // Skip adding this key. + } + } + } + numKeys++ y.Check(builder.Add(it.Key(), it.Value())) } // It was true that it.Valid() at least once in the loop above, which means we // called Add() at least once, and builder is not Empty(). - y.AssertTrue(!builder.Empty()) - - cd.elog.LazyPrintf("LOG Compact. Iteration to generate one table took: %v\n", time.Since(timeStart)) - - fileID := s.reserveFileID() - go func(builder *table.Builder) { - defer builder.Close() - - fd, err := y.CreateSyncedFile(table.NewFilename(fileID, s.kv.opt.Dir), true) - if err != nil { - resultCh <- newTableResult{nil, errors.Wrapf(err, "While opening new table: %d", fileID)} - return - } + cd.elog.LazyPrintf("Added %d keys. Skipped %d keys.", numKeys, numSkips) + cd.elog.LazyPrintf("LOG Compact. Iteration took: %v\n", time.Since(timeStart)) + if !builder.Empty() { + numBuilds++ + fileID := s.reserveFileID() + go func(builder *table.Builder) { + defer builder.Close() + + fd, err := y.CreateSyncedFile(table.NewFilename(fileID, s.kv.opt.Dir), true) + if err != nil { + resultCh <- newTableResult{nil, errors.Wrapf(err, "While opening new table: %d", fileID)} + return + } - if _, err := fd.Write(builder.Finish()); err != nil { - resultCh <- newTableResult{nil, errors.Wrapf(err, "Unable to write to file: %d", fileID)} - return - } + if _, err := fd.Write(builder.Finish()); err != nil { + resultCh <- newTableResult{nil, errors.Wrapf(err, "Unable to write to file: %d", fileID)} + return + } - tbl, err := table.OpenTable(fd, s.kv.opt.TableLoadingMode) - // decrRef is added below. - resultCh <- newTableResult{tbl, errors.Wrapf(err, "Unable to open table: %q", fd.Name())} - }(builder) + tbl, err := table.OpenTable(fd, s.kv.opt.TableLoadingMode) + // decrRef is added below. + resultCh <- newTableResult{tbl, errors.Wrapf(err, "Unable to open table: %q", fd.Name())} + }(builder) + } } newTables := make([]*table.Table, 0, 20) - // Wait for all table builders to finish. var firstErr error - for x := 0; x < i; x++ { + for x := 0; x < numBuilds; x++ { res := <-resultCh newTables = append(newTables, res.table) if firstErr == nil { @@ -339,7 +486,7 @@ func (s *levelsController) compactBuildTables( if firstErr != nil { // An error happened. Delete all the newly created table files (by calling DecrRef // -- we're the only holders of a ref). - for j := 0; j < i; j++ { + for j := 0; j < numBuilds; j++ { if newTables[j] != nil { newTables[j].DecrRef() } @@ -351,7 +498,8 @@ func (s *levelsController) compactBuildTables( sort.Slice(newTables, func(i, j int) bool { return y.CompareKeys(newTables[i].Biggest(), newTables[j].Biggest()) < 0 }) - + s.kv.vlog.updateGCStats(discardStats) + cd.elog.LazyPrintf("Discard stats: %v", discardStats) return newTables, func() error { return decrRefs(newTables) }, nil } @@ -442,8 +590,10 @@ func (s *levelsController) fillTables(cd *compactDef) bool { for _, t := range tbls { cd.thisSize = t.Size() cd.thisRange = keyRange{ - left: t.Smallest(), - right: t.Biggest(), + // We pick all the versions of the smallest and the biggest key. + left: y.KeyWithTs(y.ParseKey(t.Smallest()), math.MaxUint64), + // Note that version zero would be the rightmost key. + right: y.KeyWithTs(y.ParseKey(t.Biggest()), 0), } if s.cstatus.overlapsWith(cd.thisLevel.level, cd.thisRange) { continue @@ -482,40 +632,8 @@ func (s *levelsController) runCompactDef(l int, cd compactDef) (err error) { thisLevel := cd.thisLevel nextLevel := cd.nextLevel - if thisLevel.level >= 1 && len(cd.bot) == 0 { - y.AssertTrue(len(cd.top) == 1) - tbl := cd.top[0] - - // We write to the manifest _before_ we delete files (and after we created files). - changes := []*protos.ManifestChange{ - // The order matters here -- you can't temporarily have two copies of the same - // table id when reloading the manifest. - makeTableDeleteChange(tbl.ID()), - makeTableCreateChange(tbl.ID(), nextLevel.level), - } - if err := s.kv.manifest.addChanges(changes); err != nil { - return err - } - - // We have to add to nextLevel before we remove from thisLevel, not after. This way, we - // don't have a bug where reads would see keys missing from both levels. - - // Note: It's critical that we add tables (replace them) in nextLevel before deleting them - // in thisLevel. (We could finagle it atomically somehow.) Also, when reading we must - // read, or at least acquire s.RLock(), in increasing order by level, so that we don't skip - // a compaction. - - if err := nextLevel.replaceTables(cd.top); err != nil { - return err - } - if err := thisLevel.deleteTables(cd.top); err != nil { - return err - } - - cd.elog.LazyPrintf("\tLOG Compact-Move %d->%d smallest:%s biggest:%s took %v\n", - l, l+1, string(tbl.Smallest()), string(tbl.Biggest()), time.Since(timeStart)) - return nil - } + // Table should never be moved directly between levels, always be rewritten to allow discarding + // invalid versions. newTables, decr, err := s.compactBuildTables(l, cd) if err != nil { @@ -557,7 +675,7 @@ func (s *levelsController) doCompact(p compactionPriority) (bool, error) { y.AssertTrue(l+1 < s.kv.opt.MaxLevels) // Sanity check. cd := compactDef{ - elog: trace.New("Badger", "Compact"), + elog: trace.New(fmt.Sprintf("Badger.L%d", l), "Compact"), thisLevel: s.levels[l], nextLevel: s.levels[l+1], } @@ -580,6 +698,7 @@ func (s *levelsController) doCompact(p compactionPriority) (bool, error) { return false, nil } } + defer s.cstatus.delete(cd) // Remove the ranges from compaction status. cd.elog.LazyPrintf("Running for level: %d\n", cd.thisLevel.level) s.cstatus.toLog(cd.elog) @@ -589,8 +708,6 @@ func (s *levelsController) doCompact(p compactionPriority) (bool, error) { return false, err } - // Done with compaction. So, remove the ranges from compaction status. - s.cstatus.delete(cd) s.cstatus.toLog(cd.elog) cd.elog.LazyPrintf("Compaction for level: %d DONE", cd.thisLevel.level) return true, nil @@ -625,7 +742,7 @@ func (s *levelsController) addLevel0Table(t *table.Table) error { // Before we unstall, we need to make sure that level 0 and 1 are healthy. Otherwise, we // will very quickly fill up level 0 again and if the compaction strategy favors level 0, // then level 1 is going to super full. - for { + for i := 0; ; i++ { // Passing 0 for delSize to compactable means we're treating incomplete compactions as // not having finished -- we wait for them to finish. Also, it's crucial this behavior // replicates pickCompactLevels' behavior in computing compactability in order to @@ -634,6 +751,11 @@ func (s *levelsController) addLevel0Table(t *table.Table) error { break } time.Sleep(10 * time.Millisecond) + if i%100 == 0 { + prios := s.pickCompactLevels() + s.elog.Printf("Waiting to add level 0 table. Compaction priorities: %+v\n", prios) + i = 0 + } } { s.elog.Printf("UNSTALLED UNSTALLED UNSTALLED UNSTALLED UNSTALLED UNSTALLED: %v\n", @@ -651,14 +773,12 @@ func (s *levelsController) close() error { } // get returns the found value if any. If not found, we return nil. -func (s *levelsController) get(key []byte, maxVs y.ValueStruct) (y.ValueStruct, error) { +func (s *levelsController) get(key []byte) (y.ValueStruct, error) { // It's important that we iterate the levels from 0 on upward. The reason is, if we iterated // in opposite order, or in parallel (naively calling all the h.RLock() in some order) we could // read level L's tables post-compaction and level L+1's tables pre-compaction. (If we do // parallelize this, we will need to call the h.RLock() function by increasing order of level // number.) - - version := y.ParseTs(key) for _, h := range s.levels { vs, err := h.get(key) // Calls h.RLock() and h.RUnlock(). if err != nil { @@ -667,14 +787,9 @@ func (s *levelsController) get(key []byte, maxVs y.ValueStruct) (y.ValueStruct, if vs.Value == nil && vs.Meta == 0 { continue } - if vs.Version == version { - return vs, nil - } - if maxVs.Version < vs.Version { - maxVs = vs - } + return vs, nil } - return maxVs, nil + return y.ValueStruct{}, nil } func appendIteratorsReversed(out []y.Iterator, th []*table.Table, reversed bool) []y.Iterator { @@ -696,3 +811,31 @@ func (s *levelsController) appendIterators( } return iters } + +type TableInfo struct { + ID uint64 + Level int + Left []byte + Right []byte +} + +func (s *levelsController) getTableInfo() (result []TableInfo) { + for _, l := range s.levels { + for _, t := range l.tables { + info := TableInfo{ + ID: t.ID(), + Level: l.level, + Left: t.Smallest(), + Right: t.Biggest(), + } + result = append(result, info) + } + } + sort.Slice(result, func(i, j int) bool { + if result[i].Level != result[j].Level { + return result[i].Level < result[j].Level + } + return result[i].ID < result[j].ID + }) + return +} diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/managed_db.go b/sessiondb/badger/vendor/github.com/dgraph-io/badger/managed_db.go index 3b52c1e..c389813 100644 --- a/sessiondb/badger/vendor/github.com/dgraph-io/badger/managed_db.go +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/managed_db.go @@ -16,6 +16,16 @@ package badger +import ( + "math" + "sync" + "sync/atomic" + "time" + + "github.com/dgraph-io/badger/y" + "github.com/pkg/errors" +) + // ManagedDB allows end users to manage the transactions themselves. Transaction // start and commit timestamps are set by end-user. // @@ -72,15 +82,112 @@ func (txn *Txn) CommitAt(commitTs uint64, callback func(error)) error { return txn.Commit(callback) } -// PurgeVersionsBelow will delete all versions of a key below the specified version -func (db *ManagedDB) PurgeVersionsBelow(key []byte, ts uint64) error { - txn := db.NewTransactionAt(ts, false) - defer txn.Discard() - return db.purgeVersionsBelow(txn, key, ts) -} - // GetSequence is not supported on ManagedDB. Calling this would result // in a panic. func (db *ManagedDB) GetSequence(_ []byte, _ uint64) (*Sequence, error) { panic("Cannot use GetSequence for ManagedDB.") } + +// SetDiscardTs sets a timestamp at or below which, any invalid or deleted +// versions can be discarded from the LSM tree, and thence from the value log to +// reclaim disk space. +func (db *ManagedDB) SetDiscardTs(ts uint64) { + db.orc.setDiscardTs(ts) +} + +var errDone = errors.New("Done deleting keys") + +// DropAll would drop all the data stored in Badger. It does this in the following way. +// - Stop accepting new writes. +// - Pause the compactions. +// - Pick all tables from all levels, create a changeset to delete all these +// tables and apply it to manifest. DO not pick up the latest table from level +// 0, to preserve the (persistent) badgerHead key. +// - Iterate over the KVs in Level 0, and run deletes on them via transactions. +// - The deletions are done at the same timestamp as the latest version of the +// key. Thus, we could write the keys back at the same timestamp as before. +func (db *ManagedDB) DropAll() error { + // Stop accepting new writes. + atomic.StoreInt32(&db.blockWrites, 1) + + // Wait for writeCh to reach size of zero. This is not ideal, but a very + // simple way to allow writeCh to flush out, before we proceed. + tick := time.NewTicker(100 * time.Millisecond) + for range tick.C { + if len(db.writeCh) == 0 { + break + } + } + tick.Stop() + + // Stop the compactions. + if db.closers.compactors != nil { + db.closers.compactors.SignalAndWait() + } + + _, err := db.lc.deleteLSMTree() + // Allow writes so that we can run transactions. Ideally, the user must ensure that they're not + // doing more writes concurrently while this operation is happening. + atomic.StoreInt32(&db.blockWrites, 0) + // Need compactions to happen so deletes below can be flushed out. + if db.closers.compactors != nil { + db.closers.compactors = y.NewCloser(1) + db.lc.startCompact(db.closers.compactors) + } + if err != nil { + return err + } + + type KV struct { + key []byte + version uint64 + } + + var kvs []KV + getKeys := func() error { + txn := db.NewTransactionAt(math.MaxUint64, false) + defer txn.Discard() + + opts := DefaultIteratorOptions + opts.PrefetchValues = false + itr := txn.NewIterator(opts) + defer itr.Close() + + for itr.Rewind(); itr.Valid(); itr.Next() { + item := itr.Item() + kvs = append(kvs, KV{item.KeyCopy(nil), item.Version()}) + } + return nil + } + if err := getKeys(); err != nil { + return err + } + + var wg sync.WaitGroup + errCh := make(chan error, 1) + for _, kv := range kvs { + wg.Add(1) + txn := db.NewTransactionAt(math.MaxUint64, true) + if err := txn.Delete(kv.key); err != nil { + return err + } + if err := txn.CommitAt(kv.version, func(rerr error) { + if rerr != nil { + select { + case errCh <- rerr: + default: + } + } + wg.Done() + }); err != nil { + return err + } + } + wg.Wait() + select { + case err := <-errCh: + return err + default: + return nil + } +} diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/manifest.go b/sessiondb/badger/vendor/github.com/dgraph-io/badger/manifest.go index f2e57d9..e16bbfb 100644 --- a/sessiondb/badger/vendor/github.com/dgraph-io/badger/manifest.go +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/manifest.go @@ -32,7 +32,7 @@ import ( "github.com/pkg/errors" ) -// Manifest represnts the contents of the MANIFEST file in a Badger store. +// Manifest represents the contents of the MANIFEST file in a Badger store. // // The MANIFEST file describes the startup state of the db -- all LSM files and what level they're // at. @@ -112,17 +112,24 @@ func (m *Manifest) clone() Manifest { // openOrCreateManifestFile opens a Badger manifest file if it exists, or creates on if // one doesn’t. -func openOrCreateManifestFile(dir string) (ret *manifestFile, result Manifest, err error) { - return helpOpenOrCreateManifestFile(dir, manifestDeletionsRewriteThreshold) +func openOrCreateManifestFile(dir string, readOnly bool) (ret *manifestFile, result Manifest, err error) { + return helpOpenOrCreateManifestFile(dir, readOnly, manifestDeletionsRewriteThreshold) } -func helpOpenOrCreateManifestFile(dir string, deletionsThreshold int) (ret *manifestFile, result Manifest, err error) { +func helpOpenOrCreateManifestFile(dir string, readOnly bool, deletionsThreshold int) (ret *manifestFile, result Manifest, err error) { path := filepath.Join(dir, ManifestFilename) - fp, err := y.OpenExistingSyncedFile(path, false) // We explicitly sync in addChanges, outside the lock. + var flags uint32 + if readOnly { + flags |= y.ReadOnly + } + fp, err := y.OpenExistingFile(path, flags) // We explicitly sync in addChanges, outside the lock. if err != nil { if !os.IsNotExist(err) { return nil, Manifest{}, err } + if readOnly { + return nil, Manifest{}, fmt.Errorf("no manifest found, required for read-only db") + } m := createManifest() fp, netCreations, err := helpRewrite(dir, &m) if err != nil { @@ -144,12 +151,13 @@ func helpOpenOrCreateManifestFile(dir string, deletionsThreshold int) (ret *mani return nil, Manifest{}, err } - // Truncate file so we don't have a half-written entry at the end. - if err := fp.Truncate(truncOffset); err != nil { - _ = fp.Close() - return nil, Manifest{}, err + if !readOnly { + // Truncate file so we don't have a half-written entry at the end. + if err := fp.Truncate(truncOffset); err != nil { + _ = fp.Close() + return nil, Manifest{}, err + } } - if _, err = fp.Seek(0, io.SeekEnd); err != nil { _ = fp.Close() return nil, Manifest{}, err @@ -256,7 +264,7 @@ func helpRewrite(dir string, m *Manifest) (*os.File, int, error) { if err := os.Rename(rewritePath, manifestPath); err != nil { return nil, 0, err } - fp, err = y.OpenExistingSyncedFile(manifestPath, false) + fp, err = y.OpenExistingFile(manifestPath, 0) if err != nil { return nil, 0, err } diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/options.go b/sessiondb/badger/vendor/github.com/dgraph-io/badger/options.go index 9b8bc6e..21fc2a5 100644 --- a/sessiondb/badger/vendor/github.com/dgraph-io/badger/options.go +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/options.go @@ -46,9 +46,12 @@ type Options struct { // How should LSM tree be accessed. TableLoadingMode options.FileLoadingMode - // How should value log be accessed + // How should value log be accessed. ValueLogLoadingMode options.FileLoadingMode + // How many versions to keep per key. + NumVersionsToKeep int + // 3. Flags that user might want to review // ---------------------------------------- // The following affect all levels of LSM tree. @@ -73,6 +76,10 @@ type Options struct { // Size of single value log file. ValueLogFileSize int64 + // Max number of entries a value log file can hold (approximately). A value log file would be + // determined by the smaller of its file size and max entries. + ValueLogMaxEntries uint32 + // Number of compaction workers to run concurrently. NumCompactors int @@ -86,6 +93,15 @@ type Options struct { maxBatchCount int64 // max entries in batch maxBatchSize int64 // max batch size in bytes + + // Open the DB as read-only. With this set, multiple processes can + // open the same Badger DB. Note: if the DB being opened had crashed + // before and has vlog data to be replayed, ReadOnly will cause Open + // to fail with an appropriate message. + ReadOnly bool + + // Truncate value log to delete corrupt data, if any. Would not truncate if ReadOnly is set. + Truncate bool } // DefaultOptions sets a list of recommended options for good performance. @@ -105,8 +121,27 @@ var DefaultOptions = Options{ NumLevelZeroTablesStall: 10, NumMemtables: 5, SyncWrites: true, + NumVersionsToKeep: 1, // Nothing to read/write value log using standard File I/O // MemoryMap to mmap() the value log files - ValueLogFileSize: 1 << 30, - ValueThreshold: 20, + // (2^30 - 1)*2 when mmapping < 2^31 - 1, max int32. + // -1 so 2*ValueLogFileSize won't overflow on 32-bit systems. + ValueLogFileSize: 1<<30 - 1, + + ValueLogMaxEntries: 1000000, + ValueThreshold: 32, + Truncate: false, +} + +// LSMOnlyOptions follows from DefaultOptions, but sets a higher ValueThreshold so values would +// be colocated with the LSM tree, with value log largely acting as a write-ahead log only. These +// options would reduce the disk usage of value log, and make Badger act like a typical LSM tree. +var LSMOnlyOptions = Options{} + +func init() { + LSMOnlyOptions = DefaultOptions + + LSMOnlyOptions.ValueThreshold = 65500 // Max value length which fits in uint16. + LSMOnlyOptions.ValueLogFileSize = 64 << 20 // Allow easy space reclamation. + LSMOnlyOptions.ValueLogLoadingMode = options.FileIO } diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/skl/arena.go b/sessiondb/badger/vendor/github.com/dgraph-io/badger/skl/arena.go index 849e5ee..def5507 100644 --- a/sessiondb/badger/vendor/github.com/dgraph-io/badger/skl/arena.go +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/skl/arena.go @@ -25,7 +25,12 @@ import ( const ( offsetSize = int(unsafe.Sizeof(uint32(0))) - ptrAlign = int(unsafe.Sizeof(uintptr(0))) - 1 + + // Always align nodes on 64-bit boundaries, even on 32-bit architectures, + // so that the node.value field is 64-bit aligned. This is necessary because + // node.getValueOffset uses atomic.LoadUint64, which expects its input + // pointer to be 64-bit aligned. + nodeAlign = int(unsafe.Sizeof(uint64(0))) - 1 ) // Arena should be lock-free. @@ -61,14 +66,14 @@ func (s *Arena) putNode(height int) uint32 { unusedSize := (maxHeight - height) * offsetSize // Pad the allocation with enough bytes to ensure pointer alignment. - l := uint32(MaxNodeSize - unusedSize + ptrAlign) + l := uint32(MaxNodeSize - unusedSize + nodeAlign) n := atomic.AddUint32(&s.n, l) y.AssertTruef(int(n) <= len(s.buf), "Arena too small, toWrite:%d newTotal:%d limit:%d", l, n, len(s.buf)) // Return the aligned offset. - m := (n - l + uint32(ptrAlign)) & ^uint32(ptrAlign) + m := (n - l + uint32(nodeAlign)) & ^uint32(nodeAlign) return m } diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/skl/skl.go b/sessiondb/badger/vendor/github.com/dgraph-io/badger/skl/skl.go index 7361751..b465b09 100644 --- a/sessiondb/badger/vendor/github.com/dgraph-io/badger/skl/skl.go +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/skl/skl.go @@ -54,8 +54,7 @@ type node struct { // can be atomically loaded and stored: // value offset: uint32 (bits 0-31) // value size : uint16 (bits 32-47) - // 12 bytes are allocated to ensure 8 byte alignment also on 32bit systems. - value [12]byte + value uint64 // A byte slice is 24 bytes. We are trying to save space here. keyOffset uint32 // Immutable. No need to lock to access key. @@ -109,7 +108,7 @@ func newNode(arena *Arena, key []byte, v y.ValueStruct, height int) *node { node.keyOffset = arena.putKey(key) node.keySize = uint16(len(key)) node.height = uint16(height) - *node.value64BitAlignedPtr() = encodeValue(arena.putVal(v), v.EncodedSize()) + node.value = encodeValue(arena.putVal(v), v.EncodedSize()) return node } @@ -135,15 +134,8 @@ func NewSkiplist(arenaSize int64) *Skiplist { } } -func (s *node) value64BitAlignedPtr() *uint64 { - if uintptr(unsafe.Pointer(&s.value))%8 == 0 { - return (*uint64)(unsafe.Pointer(&s.value)) - } - return (*uint64)(unsafe.Pointer(&s.value[4])) -} - func (s *node) getValueOffset() (uint32, uint16) { - value := atomic.LoadUint64(s.value64BitAlignedPtr()) + value := atomic.LoadUint64(&s.value) return decodeValue(value) } @@ -154,7 +146,7 @@ func (s *node) key(arena *Arena) []byte { func (s *node) setValue(arena *Arena, v y.ValueStruct) { valOffset := arena.putVal(v) value := encodeValue(valOffset, v.EncodedSize()) - atomic.StoreUint64(s.value64BitAlignedPtr(), value) + atomic.StoreUint64(&s.value, value) } func (s *node) getNextOffset(h int) uint32 { diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/table/README.md b/sessiondb/badger/vendor/github.com/dgraph-io/badger/table/README.md deleted file mode 100644 index 5d33e96..0000000 --- a/sessiondb/badger/vendor/github.com/dgraph-io/badger/table/README.md +++ /dev/null @@ -1,51 +0,0 @@ -# BenchmarkRead - -``` -$ go test -bench Read$ -count 3 - -Size of table: 105843444 -BenchmarkRead-8 3 343846914 ns/op -BenchmarkRead-8 3 351790907 ns/op -BenchmarkRead-8 3 351762823 ns/op -``` - -Size of table is 105,843,444 bytes, which is ~101M. - -The rate is ~287M/s which matches our read speed. This is using mmap. - -To read a 64M table, this would take ~0.22s, which is negligible. - -``` -$ go test -bench BenchmarkReadAndBuild -count 3 - -BenchmarkReadAndBuild-8 1 2341034225 ns/op -BenchmarkReadAndBuild-8 1 2346349671 ns/op -BenchmarkReadAndBuild-8 1 2364064576 ns/op -``` - -The rate is ~43M/s. To build a ~64M table, this would take ~1.5s. Note that this -does NOT include the flushing of the table to disk. All we are doing above is -to read one table (mmaped) and write one table in memory. - -The table building takes 1.5-0.22 ~ 1.3s. - -If we are writing out up to 10 tables, this would take 1.5*10 ~ 15s, and ~13s -is spent building the tables. - -When running populate, building one table in memory tends to take ~1.5s to ~2.5s -on my system. Where does this overhead come from? Let's investigate the merging. - -Below, we merge 5 tables. The total size remains unchanged at ~101M. - -``` -$ go test -bench ReadMerged -count 3 -BenchmarkReadMerged-8 1 1321190264 ns/op -BenchmarkReadMerged-8 1 1296958737 ns/op -BenchmarkReadMerged-8 1 1314381178 ns/op -``` - -The rate is ~76M/s. To build a 64M table, this would take ~0.84s. The writing -takes ~1.3s as we saw above. So in total, we expect around 0.84+1.3 ~ 2.1s. -This roughly matches what we observe when running populate. There might be -some additional overhead due to the concurrent writes going on, in flushing the -table to disk. Also, the tables tend to be slightly bigger than 64M/s. \ No newline at end of file diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/table/builder.go b/sessiondb/badger/vendor/github.com/dgraph-io/badger/table/builder.go index 7ce0d61..43e6562 100644 --- a/sessiondb/badger/vendor/github.com/dgraph-io/badger/table/builder.go +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/table/builder.go @@ -212,7 +212,7 @@ func (b *Builder) Finish() []byte { } kl := int(binary.BigEndian.Uint16(klen[:])) if cap(key) < kl { - key = make([]byte, 2*kl) + key = make([]byte, 2*int(kl)) // 2 * uint16 will overflow } key = key[:kl] y.Check2(b.keyBuf.Read(key)) diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/table/iterator.go b/sessiondb/badger/vendor/github.com/dgraph-io/badger/table/iterator.go index 533fe2a..0eb5ed0 100644 --- a/sessiondb/badger/vendor/github.com/dgraph-io/badger/table/iterator.go +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/table/iterator.go @@ -341,6 +341,7 @@ func (itr *Iterator) next() { } itr.bi = block.NewIterator() itr.bi.SeekToFirst() + itr.err = itr.bi.Error() return } @@ -368,6 +369,7 @@ func (itr *Iterator) prev() { } itr.bi = block.NewIterator() itr.bi.SeekToLast() + itr.err = itr.bi.Error() return } diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/table/table.go b/sessiondb/badger/vendor/github.com/dgraph-io/badger/table/table.go index 52a17b1..9804fa1 100644 --- a/sessiondb/badger/vendor/github.com/dgraph-io/badger/table/table.go +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/table/table.go @@ -22,7 +22,6 @@ import ( "os" "path" "path/filepath" - "sort" "strconv" "strings" "sync" @@ -102,12 +101,6 @@ func (b block) NewIterator() *blockIterator { return &blockIterator{data: b.data} } -type byKey []keyOffset - -func (b byKey) Len() int { return len(b) } -func (b byKey) Swap(i int, j int) { b[i], b[j] = b[j], b[i] } -func (b byKey) Less(i int, j int) bool { return y.CompareKeys(b[i].key, b[j].key) < 0 } - // OpenTable assumes file has only one table and opens it. Takes ownership of fd upon function // entry. Returns a table with one reference count on it (decrementing which may delete the file! // -- consider t.Close() instead). The fd has to writeable because we call Truncate on it before @@ -290,7 +283,6 @@ func (t *Table) readIndex() error { return readError } - sort.Sort(byKey(t.blockIndex)) return nil } diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/transaction.go b/sessiondb/badger/vendor/github.com/dgraph-io/badger/transaction.go index ac9d1a2..32a6a96 100644 --- a/sessiondb/badger/vendor/github.com/dgraph-io/badger/transaction.go +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/transaction.go @@ -18,8 +18,6 @@ package badger import ( "bytes" - "container/heap" - "fmt" "math" "sort" "strconv" @@ -32,32 +30,20 @@ import ( "github.com/pkg/errors" ) -type uint64Heap []uint64 - -func (u uint64Heap) Len() int { return len(u) } -func (u uint64Heap) Less(i int, j int) bool { return u[i] < u[j] } -func (u uint64Heap) Swap(i int, j int) { u[i], u[j] = u[j], u[i] } -func (u *uint64Heap) Push(x interface{}) { *u = append(*u, x.(uint64)) } -func (u *uint64Heap) Pop() interface{} { - old := *u - n := len(old) - x := old[n-1] - *u = old[0 : n-1] - return x -} - type oracle struct { + // curRead must be at the top for memory alignment. See issue #311. curRead uint64 // Managed by the mutex. refCount int64 isManaged bool // Does not change value, so no locking required. sync.Mutex + writeLock sync.Mutex nextCommit uint64 - // These two structures are used to figure out when a commit is done. The minimum done commit is - // used to update curRead. - commitMark uint64Heap - pendingCommits map[uint64]struct{} + // Either of these is used to determine which versions can be permanently + // discarded during compaction. + discardTs uint64 // Used by ManagedDB. + readMark y.WaterMark // Used by DB. // commits stores a key fingerprint and latest commit counter for it. // refCount is used to clear out commits map to avoid a memory blowup. @@ -70,15 +56,14 @@ func (o *oracle) addRef() { func (o *oracle) decrRef() { if count := atomic.AddInt64(&o.refCount, -1); count == 0 { - // Clear out pendingCommits maps to release memory. + // Clear out commits maps to release memory. o.Lock() - // There could be race here, so check again. - // Checking commitMark is safe since it is protected by mutex. - if len(o.commitMark) > 0 { + // Avoids the race where something new is added to commitsMap + // after we check refCount and before we take Lock. + if atomic.LoadInt64(&o.refCount) != 0 { o.Unlock() return } - y.AssertTrue(len(o.pendingCommits) == 0) if len(o.commits) >= 1000 { // If the map is still small, let it slide. o.commits = make(map[uint64]uint64) } @@ -99,6 +84,23 @@ func (o *oracle) commitTs() uint64 { return o.nextCommit } +// Any deleted or invalid versions at or below ts would be discarded during +// compaction to reclaim disk space in LSM tree and thence value log. +func (o *oracle) setDiscardTs(ts uint64) { + o.Lock() + defer o.Unlock() + o.discardTs = ts +} + +func (o *oracle) discardAtOrBelow() uint64 { + if o.isManaged { + o.Lock() + defer o.Unlock() + return o.discardTs + } + return o.readMark.MinReadTs() +} + // hasConflict must be called while having a lock. func (o *oracle) hasConflict(txn *Txn) bool { if len(txn.reads) == 0 { @@ -134,15 +136,6 @@ func (o *oracle) newCommitTs(txn *Txn) uint64 { for _, w := range txn.writes { o.commits[w] = ts // Update the commitTs. } - if o.isManaged { - // No need to update the heap. - return ts - } - heap.Push(&o.commitMark, ts) - if _, has := o.pendingCommits[ts]; has { - panic(fmt.Sprintf("We shouldn't have the commit ts: %d", ts)) - } - o.pendingCommits[ts] = struct{}{} return ts } @@ -151,29 +144,14 @@ func (o *oracle) doneCommit(cts uint64) { // No need to update anything. return } - o.Lock() - defer o.Unlock() - if _, has := o.pendingCommits[cts]; !has { - panic(fmt.Sprintf("We should already have the commit ts: %d", cts)) - } - delete(o.pendingCommits, cts) - - var min uint64 - for len(o.commitMark) > 0 { - ts := o.commitMark[0] - if _, has := o.pendingCommits[ts]; has { - // Still waiting for a txn to commit. - break + for { + curRead := atomic.LoadUint64(&o.curRead) + if cts <= curRead { + return } - min = ts - heap.Pop(&o.commitMark) + atomic.CompareAndSwapUint64(&o.curRead, curRead, cts) } - if min == 0 { - return - } - atomic.StoreUint64(&o.curRead, min) - // nextCommit must never be reset. } // Txn represents a Badger transaction. @@ -191,8 +169,9 @@ type Txn struct { callbacks []func() discarded bool - size int64 - count int64 + size int64 + count int64 + numIterators int32 } type pendingWritesIterator struct { @@ -285,6 +264,9 @@ func (txn *Txn) checkSize(e *Entry) error { // // It will return ErrReadOnlyTxn if update flag was set to false when creating the // transaction. +// +// The current transaction keeps a reference to the key and val byte slice +// arguments. Users must not modify key and val until the end of the transaction. func (txn *Txn) Set(key, val []byte) error { e := &Entry{ Key: key, @@ -294,36 +276,66 @@ func (txn *Txn) Set(key, val []byte) error { } // SetWithMeta adds a key-value pair to the database, along with a metadata -// byte. This byte is stored alongside the key, and can be used as an aid to +// byte. +// +// This byte is stored alongside the key, and can be used as an aid to // interpret the value or store other contextual bits corresponding to the // key-value pair. +// +// The current transaction keeps a reference to the key and val byte slice +// arguments. Users must not modify key and val until the end of the transaction. func (txn *Txn) SetWithMeta(key, val []byte, meta byte) error { e := &Entry{Key: key, Value: val, UserMeta: meta} return txn.SetEntry(e) } +// SetWithDiscard acts like SetWithMeta, but adds a marker to discard earlier +// versions of the key. +// +// This method is only useful if you have set a higher limit for +// options.NumVersionsToKeep. The default setting is 1, in which case, this +// function doesn't add any more benefit than just calling the normal +// SetWithMeta (or Set) function. If however, you have a higher setting for +// NumVersionsToKeep (in Dgraph, we set it to infinity), you can use this method +// to indicate that all the older versions can be discarded and removed during +// compactions. +// +// The current transaction keeps a reference to the key and val byte slice +// arguments. Users must not modify key and val until the end of the +// transaction. +func (txn *Txn) SetWithDiscard(key, val []byte, meta byte) error { + e := &Entry{ + Key: key, + Value: val, + UserMeta: meta, + meta: bitDiscardEarlierVersions, + } + return txn.SetEntry(e) +} + // SetWithTTL adds a key-value pair to the database, along with a time-to-live -// (TTL) setting. A key stored with with a TTL would automatically expire after -// the time has elapsed , and be eligible for garbage collection. +// (TTL) setting. A key stored with a TTL would automatically expire after the +// time has elapsed , and be eligible for garbage collection. +// +// The current transaction keeps a reference to the key and val byte slice +// arguments. Users must not modify key and val until the end of the +// transaction. func (txn *Txn) SetWithTTL(key, val []byte, dur time.Duration) error { expire := time.Now().Add(dur).Unix() e := &Entry{Key: key, Value: val, ExpiresAt: uint64(expire)} return txn.SetEntry(e) } -// SetEntry takes an Entry struct and adds the key-value pair in the struct, along -// with other metadata to the database. -func (txn *Txn) SetEntry(e *Entry) error { - switch { - case !txn.update: +func (txn *Txn) modify(e *Entry) error { + if !txn.update { return ErrReadOnlyTxn - case txn.discarded: + } else if txn.discarded { return ErrDiscardedTxn - case len(e.Key) == 0: + } else if len(e.Key) == 0 { return ErrEmptyKey - case len(e.Key) > maxKeySize: + } else if len(e.Key) > maxKeySize { return exceedsMaxKeySizeError(e.Key) - case int64(len(e.Value)) > txn.db.opt.ValueLogFileSize: + } else if int64(len(e.Value)) > txn.db.opt.ValueLogFileSize { return exceedsMaxValueSizeError(e.Value, txn.db.opt.ValueLogFileSize) } if err := txn.checkSize(e); err != nil { @@ -336,33 +348,29 @@ func (txn *Txn) SetEntry(e *Entry) error { return nil } -// Delete deletes a key. This is done by adding a delete marker for the key at commit timestamp. -// Any reads happening before this timestamp would be unaffected. Any reads after this commit would -// see the deletion. -func (txn *Txn) Delete(key []byte) error { - if !txn.update { - return ErrReadOnlyTxn - } else if txn.discarded { - return ErrDiscardedTxn - } else if len(key) == 0 { - return ErrEmptyKey - } else if len(key) > maxKeySize { - return exceedsMaxKeySizeError(key) - } +// SetEntry takes an Entry struct and adds the key-value pair in the struct, +// along with other metadata to the database. +// +// The current transaction keeps a reference to the entry passed in argument. +// Users must not modify the entry until the end of the transaction. +func (txn *Txn) SetEntry(e *Entry) error { + return txn.modify(e) +} +// Delete deletes a key. +// +// This is done by adding a delete marker for the key at commit timestamp. Any +// reads happening before this timestamp would be unaffected. Any reads after +// this commit would see the deletion. +// +// The current transaction keeps a reference to the key byte slice argument. +// Users must not modify the key until the end of the transaction. +func (txn *Txn) Delete(key []byte) error { e := &Entry{ Key: key, meta: bitDelete, } - if err := txn.checkSize(e); err != nil { - return err - } - - fp := farm.Fingerprint64(key) // Avoid dealing with byte arrays. - txn.writes = append(txn.writes, fp) - - txn.pendingWrites[string(key)] = e - return nil + return txn.modify(e) } // Get looks for key and returns corresponding Item. @@ -387,6 +395,7 @@ func (txn *Txn) Get(key []byte) (item *Item, rerr error) { item.key = key item.status = prefetched item.version = txn.readTs + item.expiresAt = e.ExpiresAt // We probably don't need to set db on item here. return item, nil } @@ -415,9 +424,17 @@ func (txn *Txn) Get(key []byte) (item *Item, rerr error) { item.db = txn.db item.vptr = vs.Value item.txn = txn + item.expiresAt = vs.ExpiresAt return item, nil } +func (txn *Txn) runCallbacks() { + for _, cb := range txn.callbacks { + cb() + } + txn.callbacks = txn.callbacks[:0] +} + // Discard discards a created transaction. This method is very important and must be called. Commit // method calls this internally, however, calling this multiple times doesn't cause any issues. So, // this can safely be called via a defer right when transaction is created. @@ -427,11 +444,12 @@ func (txn *Txn) Discard() { if txn.discarded { // Avoid a re-run. return } - txn.discarded = true - - for _, cb := range txn.callbacks { - cb() + if atomic.LoadInt32(&txn.numIterators) > 0 { + panic("Unclosed iterator at time of Txn.Discard.") } + txn.discarded = true + txn.db.orc.readMark.Done(txn.readTs) + txn.runCallbacks() if txn.update { txn.db.orc.decrRef() } @@ -468,8 +486,10 @@ func (txn *Txn) Commit(callback func(error)) error { } state := txn.db.orc + state.writeLock.Lock() commitTs := state.newCommitTs(txn) if commitTs == 0 { + state.writeLock.Unlock() return ErrConflict } @@ -488,18 +508,30 @@ func (txn *Txn) Commit(callback func(error)) error { } entries = append(entries, e) + req, err := txn.db.sendToWriteCh(entries) + state.writeLock.Unlock() + if err != nil { + return err + } + + // Need to release all locks or writes can get deadlocked. + txn.runCallbacks() + if callback == nil { // If batchSet failed, LSM would not have been updated. So, no need to rollback anything. // TODO: What if some of the txns successfully make it to value log, but others fail. // Nothing gets updated to LSM, until a restart happens. defer state.doneCommit(commitTs) - return txn.db.batchSet(entries) + return req.Wait() } - return txn.db.batchSetAsync(entries, func(err error) { + go func() { + err := req.Wait() + // Write is complete. Let's call the callback function now. state.doneCommit(commitTs) callback(err) - }) + }() + return nil } // NewTransaction creates a new transaction. Badger supports concurrent execution of transactions, @@ -523,6 +555,11 @@ func (txn *Txn) Commit(callback func(error)) error { // defer txn.Discard() // // Call various APIs. func (db *DB) NewTransaction(update bool) *Txn { + if db.opt.ReadOnly && update { + // DB is read-only, force read-only transaction. + update = false + } + txn := &Txn{ update: update, db: db, @@ -530,6 +567,7 @@ func (db *DB) NewTransaction(update bool) *Txn { count: 1, // One extra entry for BitFin. size: int64(len(txnKey) + 10), // Some buffer for the extra entry. } + db.orc.readMark.Begin(txn.readTs) if update { txn.pendingWrites = make(map[string]*Entry) txn.db.orc.addRef() diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/value.go b/sessiondb/badger/vendor/github.com/dgraph-io/badger/value.go index ff78f84..6b8de91 100644 --- a/sessiondb/badger/vendor/github.com/dgraph-io/badger/value.go +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/value.go @@ -45,8 +45,9 @@ import ( // Values have their first byte being byteData or byteDelete. This helps us distinguish between // a key that has never been seen and a key that has been explicitly deleted. const ( - bitDelete byte = 1 << 0 // Set if the key has been deleted. - bitValuePointer byte = 1 << 1 // Set if the value is NOT stored directly next to key. + bitDelete byte = 1 << 0 // Set if the key has been deleted. + bitValuePointer byte = 1 << 1 // Set if the value is NOT stored directly next to key. + bitDiscardEarlierVersions byte = 1 << 2 // Set if earlier versions can be discarded. // The MSB 2 bits are for transactions. bitTxn byte = 1 << 6 // Set if the entry is part of a txn. @@ -175,9 +176,75 @@ func (lf *logFile) sync() error { } var errStop = errors.New("Stop iteration") +var errTruncate = errors.New("Do truncate") type logEntry func(e Entry, vp valuePointer) error +type safeRead struct { + k []byte + v []byte + + recordOffset uint32 +} + +func (r *safeRead) Entry(reader *bufio.Reader) (*Entry, error) { + var hbuf [headerBufSize]byte + var err error + + hash := crc32.New(y.CastagnoliCrcTable) + tee := io.TeeReader(reader, hash) + if _, err = io.ReadFull(tee, hbuf[:]); err != nil { + return nil, err + } + + var h header + h.Decode(hbuf[:]) + if h.klen > maxKeySize { + return nil, errTruncate + } + kl := int(h.klen) + if cap(r.k) < kl { + r.k = make([]byte, 2*kl) + } + vl := int(h.vlen) + if cap(r.v) < vl { + r.v = make([]byte, 2*vl) + } + + e := &Entry{} + e.offset = r.recordOffset + e.Key = r.k[:kl] + e.Value = r.v[:vl] + + if _, err = io.ReadFull(tee, e.Key); err != nil { + if err == io.EOF { + err = errTruncate + } + return nil, err + } + if _, err = io.ReadFull(tee, e.Value); err != nil { + if err == io.EOF { + err = errTruncate + } + return nil, err + } + var crcBuf [4]byte + if _, err = io.ReadFull(reader, crcBuf[:]); err != nil { + if err == io.EOF { + err = errTruncate + } + return nil, err + } + crc := binary.BigEndian.Uint32(crcBuf[:]) + if crc != hash.Sum32() { + return nil, errTruncate + } + e.meta = h.meta + e.UserMeta = h.userMeta + e.ExpiresAt = h.expiresAt + return e, nil +} + // iterate iterates over log file. It doesn't not allocate new memory for every kv pair. // Therefore, the kv pair is only valid for the duration of fn call. func (vlog *valueLog) iterate(lf *logFile, offset uint32, fn logEntry) error { @@ -187,87 +254,69 @@ func (vlog *valueLog) iterate(lf *logFile, offset uint32, fn logEntry) error { } reader := bufio.NewReader(lf.fd) - var hbuf [headerBufSize]byte - var h header - k := make([]byte, 1<<10) - v := make([]byte, 1<<20) + read := &safeRead{ + k: make([]byte, 10), + v: make([]byte, 10), + recordOffset: offset, + } truncate := false - recordOffset := offset + var lastCommit uint64 + var validEndOffset uint32 for { - hash := crc32.New(y.CastagnoliCrcTable) - tee := io.TeeReader(reader, hash) - - // TODO: Move this entry decode into structs.go - if _, err = io.ReadFull(tee, hbuf[:]); err != nil { - if err == io.EOF { - break - } else if err == io.ErrUnexpectedEOF { - truncate = true - break - } - return err - } - - var e Entry - e.offset = recordOffset - h.Decode(hbuf[:]) - if h.klen > maxKeySize { + e, err := read.Entry(reader) + if err == io.EOF { + break + } else if err == io.ErrUnexpectedEOF || err == errTruncate { truncate = true break - } - vl := int(h.vlen) - if cap(v) < vl { - v = make([]byte, 2*vl) + } else if err != nil { + return err + } else if e == nil { + continue } - kl := int(h.klen) - if cap(k) < kl { - k = make([]byte, 2*kl) - } - e.Key = k[:kl] - e.Value = v[:vl] + var vp valuePointer + vp.Len = uint32(headerBufSize + len(e.Key) + len(e.Value) + 4) // len(crcBuf) + read.recordOffset += vp.Len + + vp.Offset = e.offset + vp.Fid = lf.fid - if _, err = io.ReadFull(tee, e.Key); err != nil { - if err == io.EOF || err == io.ErrUnexpectedEOF { + if e.meta&bitTxn > 0 { + txnTs := y.ParseTs(e.Key) + if lastCommit == 0 { + lastCommit = txnTs + } + if lastCommit != txnTs { truncate = true break } - return err - } - if _, err = io.ReadFull(tee, e.Value); err != nil { - if err == io.EOF || err == io.ErrUnexpectedEOF { + + } else if e.meta&bitFinTxn > 0 { + txnTs, err := strconv.ParseUint(string(e.Value), 10, 64) + if err != nil || lastCommit != txnTs { truncate = true break } - return err - } + // Got the end of txn. Now we can store them. + lastCommit = 0 + validEndOffset = read.recordOffset - var crcBuf [4]byte - if _, err = io.ReadFull(reader, crcBuf[:]); err != nil { - if err == io.EOF || err == io.ErrUnexpectedEOF { + } else { + if lastCommit != 0 { + // This is most likely an entry which was moved as part of GC. + // We shouldn't get this entry in the middle of a transaction. truncate = true break } - return err - } - crc := binary.BigEndian.Uint32(crcBuf[:]) - if crc != hash.Sum32() { - truncate = true - break + validEndOffset = read.recordOffset } - e.meta = h.meta - e.UserMeta = h.userMeta - e.ExpiresAt = h.expiresAt - var vp valuePointer - vp.Len = headerBufSize + h.klen + h.vlen + uint32(len(crcBuf)) - recordOffset += vp.Len - - vp.Offset = e.offset - vp.Fid = lf.fid - - if err := fn(e, vp); err != nil { + if vlog.opt.ReadOnly { + return ErrReplayNeeded + } + if err := fn(*e, vp); err != nil { if err == errStop { break } @@ -275,33 +324,32 @@ func (vlog *valueLog) iterate(lf *logFile, offset uint32, fn logEntry) error { } } - if truncate && len(lf.fmap) == 0 { + if vlog.opt.Truncate && truncate && len(lf.fmap) == 0 { // Only truncate if the file isn't mmaped. Otherwise, Windows would puke. - if err := lf.fd.Truncate(int64(recordOffset)); err != nil { + if err := lf.fd.Truncate(int64(validEndOffset)); err != nil { return err } + } else if truncate { + return ErrTruncateNeeded } return nil } -func (vlog *valueLog) rewrite(f *logFile) error { +func (vlog *valueLog) rewrite(f *logFile, tr trace.Trace) error { maxFid := atomic.LoadUint32(&vlog.maxFid) y.AssertTruef(uint32(f.fid) < maxFid, "fid to move: %d. Current max fid: %d", f.fid, maxFid) - - elog := trace.NewEventLog("badger", "vlog-rewrite") - defer elog.Finish() - elog.Printf("Rewriting fid: %d", f.fid) + tr.LazyPrintf("Rewriting fid: %d", f.fid) wb := make([]*Entry, 0, 1000) var size int64 y.AssertTrue(vlog.kv != nil) - var count int + var count, moved int fe := func(e Entry) error { count++ - if count%10000 == 0 { - elog.Printf("Processing entry %d", count) + if count%100000 == 0 { + tr.LazyPrintf("Processing entry %d", count) } vs, err := vlog.kv.get(e.Key) @@ -326,18 +374,28 @@ func (vlog *valueLog) rewrite(f *logFile) error { return nil } if vp.Fid == f.fid && vp.Offset == e.offset { + moved++ // This new entry only contains the key, and a pointer to the value. ne := new(Entry) - ne.meta = 0 // Remove all bits. + ne.meta = 0 // Remove all bits. Different keyspace doesn't need these bits. ne.UserMeta = e.UserMeta - ne.Key = make([]byte, len(e.Key)) - copy(ne.Key, e.Key) - ne.Value = make([]byte, len(e.Value)) - copy(ne.Value, e.Value) + + // Create a new key in a separate keyspace, prefixed by moveKey. We are not + // allowed to rewrite an older version of key in the LSM tree, because then this older + // version would be at the top of the LSM tree. To work correctly, reads expect the + // latest versions to be at the top, and the older versions at the bottom. + if bytes.HasPrefix(e.Key, badgerMove) { + ne.Key = append([]byte{}, e.Key...) + } else { + ne.Key = append([]byte{}, badgerMove...) + ne.Key = append(ne.Key, e.Key...) + } + + ne.Value = append([]byte{}, e.Value...) wb = append(wb, ne) size += int64(e.estimateSize(vlog.opt.ValueThreshold)) if size >= 64*mi { - elog.Printf("request has %d entries, size %d", len(wb), size) + tr.LazyPrintf("request has %d entries, size %d", len(wb), size) if err := vlog.kv.batchSet(wb); err != nil { return err } @@ -357,7 +415,7 @@ func (vlog *valueLog) rewrite(f *logFile) error { return err } - elog.Printf("request has %d entries, size %d", len(wb), size) + tr.LazyPrintf("request has %d entries, size %d", len(wb), size) batchSize := 1024 var loops int for i := 0; i < len(wb); { @@ -374,16 +432,16 @@ func (vlog *valueLog) rewrite(f *logFile) error { if err == ErrTxnTooBig { // Decrease the batch size to half. batchSize = batchSize / 2 - elog.Printf("Dropped batch size to %d", batchSize) + tr.LazyPrintf("Dropped batch size to %d", batchSize) continue } return err } i += batchSize } - elog.Printf("Processed %d entries in %d loops", len(wb), loops) - - elog.Printf("Removing fid: %d", f.fid) + tr.LazyPrintf("Processed %d entries in %d loops", len(wb), loops) + tr.LazyPrintf("Total entries: %d. Moved: %d", count, moved) + tr.LazyPrintf("Removing fid: %d", f.fid) var deleteFileNow bool // Entries written to LSM. Remove the older file now. { @@ -409,6 +467,63 @@ func (vlog *valueLog) rewrite(f *logFile) error { return nil } +func (vlog *valueLog) deleteMoveKeysFor(fid uint32, tr trace.Trace) { + db := vlog.kv + var result []*Entry + var count, pointers uint64 + tr.LazyPrintf("Iterating over move keys to find invalids for fid: %d", fid) + err := db.View(func(txn *Txn) error { + opt := DefaultIteratorOptions + opt.internalAccess = true + opt.PrefetchValues = false + itr := txn.NewIterator(opt) + defer itr.Close() + + for itr.Seek(badgerMove); itr.ValidForPrefix(badgerMove); itr.Next() { + count++ + item := itr.Item() + if item.meta&bitValuePointer == 0 { + continue + } + pointers++ + var vp valuePointer + vp.Decode(item.vptr) + if vp.Fid == fid { + e := &Entry{Key: item.KeyCopy(nil), meta: bitDelete} + result = append(result, e) + } + } + return nil + }) + if err != nil { + tr.LazyPrintf("Got error while iterating move keys: %v", err) + tr.SetError() + return + } + tr.LazyPrintf("Num total move keys: %d. Num pointers: %d", count, pointers) + tr.LazyPrintf("Number of invalid move keys found: %d", len(result)) + batchSize := 10240 + for i := 0; i < len(result); { + end := i + batchSize + if end > len(result) { + end = len(result) + } + if err := db.batchSet(result[i:end]); err != nil { + if err == ErrTxnTooBig { + batchSize /= 2 + tr.LazyPrintf("Dropped batch size to %d", batchSize) + continue + } + tr.LazyPrintf("Error while doing batchSet: %v", err) + tr.SetError() + return + } + i += batchSize + } + tr.LazyPrintf("Move keys deletion done.") + return +} + func (vlog *valueLog) incrIteratorCount() { atomic.AddInt32(&vlog.numActiveIterators, 1) } @@ -470,6 +585,7 @@ type valueLog struct { kv *DB maxFid uint32 writableLogOffset uint32 + numEntriesWritten uint32 opt Options garbageCh chan struct{} @@ -484,7 +600,7 @@ func (vlog *valueLog) fpath(fid uint32) string { return vlogFilePath(vlog.dirPath, fid) } -func (vlog *valueLog) openOrCreateFiles() error { +func (vlog *valueLog) openOrCreateFiles(readOnly bool) error { files, err := ioutil.ReadDir(vlog.dirPath) if err != nil { return errors.Wrapf(err, "Error while opening value log") @@ -519,12 +635,18 @@ func (vlog *valueLog) openOrCreateFiles() error { vlog.maxFid = uint32(maxFid) // Open all previous log files as read only. Open the last log file - // as read write. + // as read write (unless the DB is read only). for fid, lf := range vlog.filesMap { if fid == maxFid { - if lf.fd, err = y.OpenExistingSyncedFile(vlog.fpath(fid), - vlog.opt.SyncWrites); err != nil { - return errors.Wrapf(err, "Unable to open value log file as RDWR") + var flags uint32 + if vlog.opt.SyncWrites { + flags |= y.Sync + } + if readOnly { + flags |= y.ReadOnly + } + if lf.fd, err = y.OpenExistingFile(vlog.fpath(fid), flags); err != nil { + return errors.Wrapf(err, "Unable to open value log file") } } else { if err := lf.openReadOnly(); err != nil { @@ -548,6 +670,7 @@ func (vlog *valueLog) createVlogFile(fid uint32) (*logFile, error) { path := vlog.fpath(fid) lf := &logFile{fid: fid, path: path, loadingMode: vlog.opt.ValueLogLoadingMode} vlog.writableLogOffset = 0 + vlog.numEntriesWritten = 0 var err error if lf.fd, err = y.CreateSyncedFile(path, vlog.opt.SyncWrites); err != nil { @@ -570,7 +693,7 @@ func (vlog *valueLog) Open(kv *DB, opt Options) error { vlog.opt = opt vlog.kv = kv vlog.filesMap = make(map[uint32]*logFile) - if err := vlog.openOrCreateFiles(); err != nil { + if err := vlog.openOrCreateFiles(kv.opt.ReadOnly); err != nil { return errors.Wrapf(err, "Unable to open value log") } @@ -592,7 +715,7 @@ func (vlog *valueLog) Close() error { err = munmapErr } - if id == vlog.maxFid { + if !vlog.opt.ReadOnly && id == vlog.maxFid { // truncate writable log file to correct offset. if truncErr := f.fd.Truncate( int64(vlog.writableLogOffset)); truncErr != nil && err == nil { @@ -631,7 +754,8 @@ func (vlog *valueLog) sortedFids() []uint32 { func (vlog *valueLog) Replay(ptr valuePointer, fn logEntry) error { fid := ptr.Fid offset := ptr.Offset + ptr.Len - vlog.elog.Printf("Seeking at value pointer: %+v\n", ptr) + // @kataras removed: vlog.elog.Printf("Seeking at value pointer: %+v\n", ptr) + // @kataras removed: log.Printf("Replaying from value pointer: %+v\n", ptr) fids := vlog.sortedFids() @@ -644,7 +768,11 @@ func (vlog *valueLog) Replay(ptr valuePointer, fn logEntry) error { of = 0 } f := vlog.filesMap[id] + // @kataras removed: + // log.Printf("Iterating file id: %d", id) + // now := time.Now() err := vlog.iterate(f, of, fn) + // @kataras removed: log.Printf("Iteration took: %s\n", time.Since(now)) if err != nil { return errors.Wrapf(err, "Unable to replay value log: %q", f.path) } @@ -667,6 +795,14 @@ type request struct { Err error } +func (req *request) Wait() error { + req.Wg.Wait() + req.Entries = nil + err := req.Err + requestPool.Put(req) + return err +} + // sync is thread-unsafe and should not be called concurrently with write. func (vlog *valueLog) sync() error { if vlog.opt.SyncWrites { @@ -718,14 +854,15 @@ func (vlog *valueLog) write(reqs []*request) error { atomic.AddUint32(&vlog.writableLogOffset, uint32(n)) vlog.buf.Reset() - if vlog.writableOffset() > uint32(vlog.opt.ValueLogFileSize) { + if vlog.writableOffset() > uint32(vlog.opt.ValueLogFileSize) || + vlog.numEntriesWritten > vlog.opt.ValueLogMaxEntries { var err error if err = curlf.doneWriting(vlog.writableLogOffset); err != nil { return err } newid := atomic.AddUint32(&vlog.maxFid, 1) - y.AssertTruef(newid <= math.MaxUint32, "newid will overflow uint32: %v", newid) + y.AssertTruef(newid > 0, "newid has overflown uint32: %v", newid) newlf, err := vlog.createVlogFile(newid) if err != nil { return err @@ -756,11 +893,16 @@ func (vlog *valueLog) write(reqs []*request) error { } p.Len = uint32(plen) b.Ptrs = append(b.Ptrs, p) - - if p.Offset > uint32(vlog.opt.ValueLogFileSize) { - if err := toDisk(); err != nil { - return err - } + } + vlog.numEntriesWritten += uint32(len(b.Entries)) + // We write to disk here so that all entries that are part of the same transaction are + // written to the same vlog file. + writeNow := + vlog.writableOffset()+uint32(vlog.buf.Len()) > uint32(vlog.opt.ValueLogFileSize) || + vlog.numEntriesWritten > uint32(vlog.opt.ValueLogMaxEntries) + if writeNow { + if err := toDisk(); err != nil { + return err } } } @@ -807,7 +949,7 @@ func (vlog *valueLog) Read(vp valuePointer, s *y.Slice) ([]byte, func(), error) func (vlog *valueLog) readValueBytes(vp valuePointer, s *y.Slice) ([]byte, func(), error) { lf, err := vlog.getFileRLocked(vp.Fid) if err != nil { - return nil, nil, errors.Wrapf(err, "Unable to read from value log: %+v", vp) + return nil, nil, err } buf, err := lf.read(vp, s) @@ -834,11 +976,15 @@ func valueBytesToEntry(buf []byte) (e Entry) { return } -func (vlog *valueLog) pickLog(head valuePointer) *logFile { +func (vlog *valueLog) pickLog(head valuePointer, tr trace.Trace) (files []*logFile) { vlog.filesLock.RLock() defer vlog.filesLock.RUnlock() fids := vlog.sortedFids() - if len(fids) <= 1 || head.Fid == 0 { + if len(fids) <= 1 { + tr.LazyPrintf("Only one or less value log file.") + return nil + } else if head.Fid == 0 { + tr.LazyPrintf("Head pointer is at zero.") return nil } @@ -860,7 +1006,10 @@ func (vlog *valueLog) pickLog(head valuePointer) *logFile { vlog.lfDiscardStats.Unlock() if candidate.fid != math.MaxUint32 { // Found a candidate - return vlog.filesMap[candidate.fid] + tr.LazyPrintf("Found candidate via discard stats: %v", candidate) + files = append(files, vlog.filesMap[candidate.fid]) + } else { + tr.LazyPrintf("Could not find candidate via discard stats. Randomly picking one.") } // Fallback to randomly picking a log file @@ -872,13 +1021,16 @@ func (vlog *valueLog) pickLog(head valuePointer) *logFile { } } if idxHead == 0 { // Not found or first file + tr.LazyPrintf("Could not find any file.") return nil } idx := rand.Intn(idxHead) // Don’t include head.Fid. We pick a random file before it. if idx > 0 { idx = rand.Intn(idx + 1) // Another level of rand to favor smaller fids. } - return vlog.filesMap[fids[idx]] + tr.LazyPrintf("Randomly chose fid: %d", fids[idx]) + files = append(files, vlog.filesMap[fids[idx]]) + return files } func discardEntry(e Entry, vs y.ValueStruct) bool { @@ -900,13 +1052,7 @@ func discardEntry(e Entry, vs y.ValueStruct) bool { return false } -func (vlog *valueLog) doRunGC(gcThreshold float64, head valuePointer) (err error) { - // Pick a log file for GC - lf := vlog.pickLog(head) - if lf == nil { - return ErrNoRewrite - } - +func (vlog *valueLog) doRunGC(lf *logFile, discardRatio float64, tr trace.Trace) (err error) { // Update stats before exiting defer func() { if err == nil { @@ -918,39 +1064,57 @@ func (vlog *valueLog) doRunGC(gcThreshold float64, head valuePointer) (err error type reason struct { total float64 - keep float64 discard float64 + count int } - var r reason - var window = 100.0 - count := 0 + fi, err := lf.fd.Stat() + if err != nil { + tr.LazyPrintf("Error while finding file size: %v", err) + tr.SetError() + return err + } + + // Set up the sampling window sizes. + sizeWindow := float64(fi.Size()) * 0.1 // 10% of the file as window. + countWindow := int(float64(vlog.opt.ValueLogMaxEntries) * 0.01) // 1% of num entries. + tr.LazyPrintf("Size window: %5.2f. Count window: %d.", sizeWindow, countWindow) // Pick a random start point for the log. - skipFirstM := float64(rand.Intn(int(vlog.opt.ValueLogFileSize/mi))) - window + skipFirstM := float64(rand.Int63n(fi.Size())) // Pick a random starting location. + skipFirstM -= sizeWindow // Avoid hitting EOF by moving back by window. + skipFirstM /= float64(mi) // Convert to MBs. + tr.LazyPrintf("Skip first %5.2f MB of file of size: %d MB", skipFirstM, fi.Size()/mi) var skipped float64 + var r reason start := time.Now() y.AssertTrue(vlog.kv != nil) s := new(y.Slice) + var numIterations int err = vlog.iterate(lf, 0, func(e Entry, vp valuePointer) error { - esz := float64(vp.Len) / (1 << 20) // in MBs. +4 for the CAS stuff. - skipped += esz + numIterations++ + esz := float64(vp.Len) / (1 << 20) // in MBs. if skipped < skipFirstM { + skipped += esz return nil } - count++ - if count%100 == 0 { - time.Sleep(time.Millisecond) + // Sample until we reach the window sizes or exceed 10 seconds. + if r.count > countWindow { + tr.LazyPrintf("Stopping sampling after %d entries.", countWindow) + return errStop } - r.total += esz - if r.total > window { + if r.total > sizeWindow { + tr.LazyPrintf("Stopping sampling after reaching window size.") return errStop } if time.Since(start) > 10*time.Second { + tr.LazyPrintf("Stopping sampling after 10 seconds.") return errStop } + r.total += esz + r.count++ vs, err := vlog.kv.get(e.Key) if err != nil { @@ -977,7 +1141,6 @@ func (vlog *valueLog) doRunGC(gcThreshold float64, head valuePointer) (err error } if vp.Fid == lf.fid && vp.Offset == e.offset { // This is still the active entry. This would need to be rewritten. - r.keep += esz } else { vlog.elog.Printf("Reason=%+v\n", r) @@ -998,21 +1161,23 @@ func (vlog *valueLog) doRunGC(gcThreshold float64, head valuePointer) (err error }) if err != nil { - vlog.elog.Errorf("Error while iterating for RunGC: %v", err) + tr.LazyPrintf("Error while iterating for RunGC: %v", err) + tr.SetError() return err } - vlog.elog.Printf("Fid: %d Data status=%+v\n", lf.fid, r) + tr.LazyPrintf("Fid: %d. Skipped: %5.2fMB Num iterations: %d. Data status=%+v\n", + lf.fid, skipped, numIterations, r) - if r.total < 10.0 || r.discard < gcThreshold*r.total { - vlog.elog.Printf("Skipping GC on fid: %d\n\n", lf.fid) + // If we couldn't sample at least a 1000 KV pairs or at least 75% of the window size, + // and what we can discard is below the threshold, we should skip the rewrite. + if (r.count < countWindow && r.total < sizeWindow*0.75) || r.discard < discardRatio*r.total { + tr.LazyPrintf("Skipping GC on fid: %d", lf.fid) return ErrNoRewrite } - - vlog.elog.Printf("REWRITING VLOG %d\n", lf.fid) - if err = vlog.rewrite(lf); err != nil { + if err = vlog.rewrite(lf, tr); err != nil { return err } - vlog.elog.Printf("Done rewriting.") + tr.LazyPrintf("Done rewriting.") return nil } @@ -1026,24 +1191,34 @@ func (vlog *valueLog) waitOnGC(lc *y.Closer) { vlog.garbageCh <- struct{}{} } -func (vlog *valueLog) runGC(gcThreshold float64, head valuePointer) error { +func (vlog *valueLog) runGC(discardRatio float64, head valuePointer) error { select { case vlog.garbageCh <- struct{}{}: - // Run GC - var ( - err error - count int - ) - for { - err = vlog.doRunGC(gcThreshold, head) - if err != nil { - break - } - count++ + // Pick a log file for GC. + tr := trace.New("Badger.ValueLog", "GC") + tr.SetMaxEvents(100) + defer func() { + tr.Finish() + <-vlog.garbageCh + }() + + var err error + files := vlog.pickLog(head, tr) + if len(files) == 0 { + tr.LazyPrintf("PickLog returned zero results.") + return ErrNoRewrite } - <-vlog.garbageCh - if err == ErrNoRewrite && count > 0 { - return nil + tried := make(map[uint32]bool) + for _, lf := range files { + if _, done := tried[lf.fid]; done { + continue + } + tried[lf.fid] = true + err = vlog.doRunGC(lf, discardRatio, tr) + if err == nil { + vlog.deleteMoveKeysFor(lf.fid, tr) + return nil + } } return err default: @@ -1051,12 +1226,10 @@ func (vlog *valueLog) runGC(gcThreshold float64, head valuePointer) error { } } -func (vlog *valueLog) updateGCStats(item *Item) { - if item.meta&bitValuePointer > 0 { - var vp valuePointer - vp.Decode(item.vptr) - vlog.lfDiscardStats.Lock() - vlog.lfDiscardStats.m[vp.Fid] += int64(vp.Len) - vlog.lfDiscardStats.Unlock() +func (vlog *valueLog) updateGCStats(stats map[uint32]int64) { + vlog.lfDiscardStats.Lock() + for fid, sz := range stats { + vlog.lfDiscardStats.m[fid] += sz } + vlog.lfDiscardStats.Unlock() } diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/LICENSE b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/LICENSE index 1b1b192..0f64693 100644 --- a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/LICENSE +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/LICENSE @@ -1,7 +1,4 @@ -Go support for Protocol Buffers - Google's data interchange format - Copyright 2010 The Go Authors. All rights reserved. -https://github.com/golang/protobuf Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go index 110ae13..a569810 100644 --- a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go @@ -56,6 +56,8 @@ import ( stpb "github.com/golang/protobuf/ptypes/struct" ) +const secondInNanos = int64(time.Second / time.Nanosecond) + // Marshaler is a configurable object for converting between // protocol buffer objects and a JSON representation for them. type Marshaler struct { @@ -118,6 +120,14 @@ type JSONPBUnmarshaler interface { // Marshal marshals a protocol buffer into JSON. func (m *Marshaler) Marshal(out io.Writer, pb proto.Message) error { + v := reflect.ValueOf(pb) + if pb == nil || (v.Kind() == reflect.Ptr && v.IsNil()) { + return errors.New("Marshal called with nil") + } + // Check for unset required fields first. + if err := checkRequiredFields(pb); err != nil { + return err + } writer := &errWriter{writer: out} return m.marshalObject(writer, pb, "", "") } @@ -190,13 +200,22 @@ func (m *Marshaler) marshalObject(out *errWriter, v proto.Message, indent, typeU // Any is a bit more involved. return m.marshalAny(out, v, indent) case "Duration": - // "Generated output always contains 3, 6, or 9 fractional digits, + // "Generated output always contains 0, 3, 6, or 9 fractional digits, // depending on required precision." s, ns := s.Field(0).Int(), s.Field(1).Int() - d := time.Duration(s)*time.Second + time.Duration(ns)*time.Nanosecond - x := fmt.Sprintf("%.9f", d.Seconds()) + if ns <= -secondInNanos || ns >= secondInNanos { + return fmt.Errorf("ns out of range (%v, %v)", -secondInNanos, secondInNanos) + } + if (s > 0 && ns < 0) || (s < 0 && ns > 0) { + return errors.New("signs of seconds and nanos do not match") + } + if s < 0 { + ns = -ns + } + x := fmt.Sprintf("%d.%09d", s, ns) x = strings.TrimSuffix(x, "000") x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") out.write(`"`) out.write(x) out.write(`s"`) @@ -207,13 +226,17 @@ func (m *Marshaler) marshalObject(out *errWriter, v proto.Message, indent, typeU return m.marshalValue(out, &proto.Properties{}, s.Field(0), indent) case "Timestamp": // "RFC 3339, where generated output will always be Z-normalized - // and uses 3, 6 or 9 fractional digits." + // and uses 0, 3, 6 or 9 fractional digits." s, ns := s.Field(0).Int(), s.Field(1).Int() + if ns < 0 || ns >= secondInNanos { + return fmt.Errorf("ns out of range [0, %v)", secondInNanos) + } t := time.Unix(s, ns).UTC() // time.RFC3339Nano isn't exactly right (we need to get 3/6/9 fractional digits). x := t.Format("2006-01-02T15:04:05.000000000") x = strings.TrimSuffix(x, "000") x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") out.write(`"`) out.write(x) out.write(`Z"`) @@ -632,7 +655,10 @@ func (u *Unmarshaler) UnmarshalNext(dec *json.Decoder, pb proto.Message) error { if err := dec.Decode(&inputValue); err != nil { return err } - return u.unmarshalValue(reflect.ValueOf(pb).Elem(), inputValue, nil) + if err := u.unmarshalValue(reflect.ValueOf(pb).Elem(), inputValue, nil); err != nil { + return err + } + return checkRequiredFields(pb) } // Unmarshal unmarshals a JSON object stream into a protocol @@ -752,7 +778,7 @@ func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMe return nil case "Duration": - unq, err := strconv.Unquote(string(inputValue)) + unq, err := unquote(string(inputValue)) if err != nil { return err } @@ -769,7 +795,7 @@ func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMe target.Field(1).SetInt(ns) return nil case "Timestamp": - unq, err := strconv.Unquote(string(inputValue)) + unq, err := unquote(string(inputValue)) if err != nil { return err } @@ -803,7 +829,7 @@ func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMe return fmt.Errorf("bad ListValue: %v", err) } - target.Field(0).Set(reflect.ValueOf(make([]*stpb.Value, len(s), len(s)))) + target.Field(0).Set(reflect.ValueOf(make([]*stpb.Value, len(s)))) for i, sv := range s { if err := u.unmarshalValue(target.Field(0).Index(i), sv, prop); err != nil { return err @@ -816,7 +842,7 @@ func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMe target.Field(0).Set(reflect.ValueOf(&stpb.Value_NullValue{})) } else if v, err := strconv.ParseFloat(ivStr, 0); err == nil { target.Field(0).Set(reflect.ValueOf(&stpb.Value_NumberValue{v})) - } else if v, err := strconv.Unquote(ivStr); err == nil { + } else if v, err := unquote(ivStr); err == nil { target.Field(0).Set(reflect.ValueOf(&stpb.Value_StringValue{v})) } else if v, err := strconv.ParseBool(ivStr); err == nil { target.Field(0).Set(reflect.ValueOf(&stpb.Value_BoolValue{v})) @@ -852,6 +878,9 @@ func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMe target.Set(reflect.New(targetType.Elem())) target = target.Elem() } + if targetType.Kind() != reflect.Int32 { + return fmt.Errorf("invalid target %q for enum %s", targetType.Kind(), prop.Enum) + } target.SetInt(int64(n)) return nil } @@ -973,13 +1002,6 @@ func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMe } if mp != nil { target.Set(reflect.MakeMap(targetType)) - var keyprop, valprop *proto.Properties - if prop != nil { - // These could still be nil if the protobuf metadata is broken somehow. - // TODO: This won't work because the fields are unexported. - // We should probably just reparse them. - //keyprop, valprop = prop.mkeyprop, prop.mvalprop - } for ks, raw := range mp { // Unmarshal map key. The core json library already decoded the key into a // string, so we handle that specially. Other types were quoted post-serialization. @@ -988,14 +1010,16 @@ func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMe k = reflect.ValueOf(ks) } else { k = reflect.New(targetType.Key()).Elem() - if err := u.unmarshalValue(k, json.RawMessage(ks), keyprop); err != nil { + // TODO: pass the correct Properties if needed. + if err := u.unmarshalValue(k, json.RawMessage(ks), nil); err != nil { return err } } // Unmarshal map value. v := reflect.New(targetType.Elem()).Elem() - if err := u.unmarshalValue(v, raw, valprop); err != nil { + // TODO: pass the correct Properties if needed. + if err := u.unmarshalValue(v, raw, nil); err != nil { return err } target.SetMapIndex(k, v) @@ -1004,13 +1028,6 @@ func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMe return nil } - // 64-bit integers can be encoded as strings. In this case we drop - // the quotes and proceed as normal. - isNum := targetType.Kind() == reflect.Int64 || targetType.Kind() == reflect.Uint64 - if isNum && strings.HasPrefix(string(inputValue), `"`) { - inputValue = inputValue[1 : len(inputValue)-1] - } - // Non-finite numbers can be encoded as strings. isFloat := targetType.Kind() == reflect.Float32 || targetType.Kind() == reflect.Float64 if isFloat { @@ -1020,10 +1037,25 @@ func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMe } } + // integers & floats can be encoded as strings. In this case we drop + // the quotes and proceed as normal. + isNum := targetType.Kind() == reflect.Int64 || targetType.Kind() == reflect.Uint64 || + targetType.Kind() == reflect.Int32 || targetType.Kind() == reflect.Uint32 || + targetType.Kind() == reflect.Float32 || targetType.Kind() == reflect.Float64 + if isNum && strings.HasPrefix(string(inputValue), `"`) { + inputValue = inputValue[1 : len(inputValue)-1] + } + // Use the encoding/json for parsing other value types. return json.Unmarshal(inputValue, target.Addr().Interface()) } +func unquote(s string) (string, error) { + var ret string + err := json.Unmarshal([]byte(s), &ret) + return ret, err +} + // jsonProperties returns parsed proto.Properties for the field and corrects JSONName attribute. func jsonProperties(f reflect.StructField, origName bool) *proto.Properties { var prop proto.Properties @@ -1081,3 +1113,140 @@ func (s mapKeys) Less(i, j int) bool { } return fmt.Sprint(s[i].Interface()) < fmt.Sprint(s[j].Interface()) } + +// checkRequiredFields returns an error if any required field in the given proto message is not set. +// This function is used by both Marshal and Unmarshal. While required fields only exist in a +// proto2 message, a proto3 message can contain proto2 message(s). +func checkRequiredFields(pb proto.Message) error { + // Most well-known type messages do not contain required fields. The "Any" type may contain + // a message that has required fields. + // + // When an Any message is being marshaled, the code will invoked proto.Unmarshal on Any.Value + // field in order to transform that into JSON, and that should have returned an error if a + // required field is not set in the embedded message. + // + // When an Any message is being unmarshaled, the code will have invoked proto.Marshal on the + // embedded message to store the serialized message in Any.Value field, and that should have + // returned an error if a required field is not set. + if _, ok := pb.(wkt); ok { + return nil + } + + v := reflect.ValueOf(pb) + // Skip message if it is not a struct pointer. + if v.Kind() != reflect.Ptr { + return nil + } + v = v.Elem() + if v.Kind() != reflect.Struct { + return nil + } + + for i := 0; i < v.NumField(); i++ { + field := v.Field(i) + sfield := v.Type().Field(i) + + if sfield.PkgPath != "" { + // blank PkgPath means the field is exported; skip if not exported + continue + } + + if strings.HasPrefix(sfield.Name, "XXX_") { + continue + } + + // Oneof field is an interface implemented by wrapper structs containing the actual oneof + // field, i.e. an interface containing &T{real_value}. + if sfield.Tag.Get("protobuf_oneof") != "" { + if field.Kind() != reflect.Interface { + continue + } + v := field.Elem() + if v.Kind() != reflect.Ptr || v.IsNil() { + continue + } + v = v.Elem() + if v.Kind() != reflect.Struct || v.NumField() < 1 { + continue + } + field = v.Field(0) + sfield = v.Type().Field(0) + } + + protoTag := sfield.Tag.Get("protobuf") + if protoTag == "" { + continue + } + var prop proto.Properties + prop.Init(sfield.Type, sfield.Name, protoTag, &sfield) + + switch field.Kind() { + case reflect.Map: + if field.IsNil() { + continue + } + // Check each map value. + keys := field.MapKeys() + for _, k := range keys { + v := field.MapIndex(k) + if err := checkRequiredFieldsInValue(v); err != nil { + return err + } + } + case reflect.Slice: + // Handle non-repeated type, e.g. bytes. + if !prop.Repeated { + if prop.Required && field.IsNil() { + return fmt.Errorf("required field %q is not set", prop.Name) + } + continue + } + + // Handle repeated type. + if field.IsNil() { + continue + } + // Check each slice item. + for i := 0; i < field.Len(); i++ { + v := field.Index(i) + if err := checkRequiredFieldsInValue(v); err != nil { + return err + } + } + case reflect.Ptr: + if field.IsNil() { + if prop.Required { + return fmt.Errorf("required field %q is not set", prop.Name) + } + continue + } + if err := checkRequiredFieldsInValue(field); err != nil { + return err + } + } + } + + // Handle proto2 extensions. + for _, ext := range proto.RegisteredExtensions(pb) { + if !proto.HasExtension(pb, ext) { + continue + } + ep, err := proto.GetExtension(pb, ext) + if err != nil { + return err + } + err = checkRequiredFieldsInValue(reflect.ValueOf(ep)) + if err != nil { + return err + } + } + + return nil +} + +func checkRequiredFieldsInValue(v reflect.Value) error { + if pm, ok := v.Interface().(proto.Message); ok { + return checkRequiredFields(pm) + } + return nil +} diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/Makefile b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/Makefile deleted file mode 100644 index eeda8ae..0000000 --- a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/Makefile +++ /dev/null @@ -1,33 +0,0 @@ -# Go support for Protocol Buffers - Google's data interchange format -# -# Copyright 2015 The Go Authors. All rights reserved. -# https://github.com/golang/protobuf -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -regenerate: - protoc --go_out=Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any,Mgoogle/protobuf/duration.proto=github.com/golang/protobuf/ptypes/duration,Mgoogle/protobuf/struct.proto=github.com/golang/protobuf/ptypes/struct,Mgoogle/protobuf/timestamp.proto=github.com/golang/protobuf/ptypes/timestamp,Mgoogle/protobuf/wrappers.proto=github.com/golang/protobuf/ptypes/wrappers:. *.proto diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/more_test_objects.pb.go b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/more_test_objects.pb.go index ebb180e..0555e44 100644 --- a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/more_test_objects.pb.go +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/more_test_objects.pb.go @@ -1,29 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: more_test_objects.proto -/* -Package jsonpb is a generated protocol buffer package. - -It is generated from these files: - more_test_objects.proto - test_objects.proto - -It has these top-level messages: - Simple3 - SimpleSlice3 - SimpleMap3 - SimpleNull3 - Mappy - Simple - NonFinites - Repeats - Widget - Maps - MsgWithOneof - Real - Complex - KnownTypes -*/ package jsonpb import proto "github.com/golang/protobuf/proto" @@ -63,16 +40,40 @@ var Numeral_value = map[string]int32{ func (x Numeral) String() string { return proto.EnumName(Numeral_name, int32(x)) } -func (Numeral) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (Numeral) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_more_test_objects_bef0d79b901f4c4a, []int{0} +} type Simple3 struct { - Dub float64 `protobuf:"fixed64,1,opt,name=dub" json:"dub,omitempty"` + Dub float64 `protobuf:"fixed64,1,opt,name=dub,proto3" json:"dub,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Simple3) Reset() { *m = Simple3{} } +func (m *Simple3) String() string { return proto.CompactTextString(m) } +func (*Simple3) ProtoMessage() {} +func (*Simple3) Descriptor() ([]byte, []int) { + return fileDescriptor_more_test_objects_bef0d79b901f4c4a, []int{0} +} +func (m *Simple3) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Simple3.Unmarshal(m, b) +} +func (m *Simple3) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Simple3.Marshal(b, m, deterministic) +} +func (dst *Simple3) XXX_Merge(src proto.Message) { + xxx_messageInfo_Simple3.Merge(dst, src) +} +func (m *Simple3) XXX_Size() int { + return xxx_messageInfo_Simple3.Size(m) +} +func (m *Simple3) XXX_DiscardUnknown() { + xxx_messageInfo_Simple3.DiscardUnknown(m) } -func (m *Simple3) Reset() { *m = Simple3{} } -func (m *Simple3) String() string { return proto.CompactTextString(m) } -func (*Simple3) ProtoMessage() {} -func (*Simple3) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +var xxx_messageInfo_Simple3 proto.InternalMessageInfo func (m *Simple3) GetDub() float64 { if m != nil { @@ -82,13 +83,35 @@ func (m *Simple3) GetDub() float64 { } type SimpleSlice3 struct { - Slices []string `protobuf:"bytes,1,rep,name=slices" json:"slices,omitempty"` + Slices []string `protobuf:"bytes,1,rep,name=slices,proto3" json:"slices,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SimpleSlice3) Reset() { *m = SimpleSlice3{} } +func (m *SimpleSlice3) String() string { return proto.CompactTextString(m) } +func (*SimpleSlice3) ProtoMessage() {} +func (*SimpleSlice3) Descriptor() ([]byte, []int) { + return fileDescriptor_more_test_objects_bef0d79b901f4c4a, []int{1} +} +func (m *SimpleSlice3) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SimpleSlice3.Unmarshal(m, b) +} +func (m *SimpleSlice3) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SimpleSlice3.Marshal(b, m, deterministic) +} +func (dst *SimpleSlice3) XXX_Merge(src proto.Message) { + xxx_messageInfo_SimpleSlice3.Merge(dst, src) +} +func (m *SimpleSlice3) XXX_Size() int { + return xxx_messageInfo_SimpleSlice3.Size(m) +} +func (m *SimpleSlice3) XXX_DiscardUnknown() { + xxx_messageInfo_SimpleSlice3.DiscardUnknown(m) } -func (m *SimpleSlice3) Reset() { *m = SimpleSlice3{} } -func (m *SimpleSlice3) String() string { return proto.CompactTextString(m) } -func (*SimpleSlice3) ProtoMessage() {} -func (*SimpleSlice3) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } +var xxx_messageInfo_SimpleSlice3 proto.InternalMessageInfo func (m *SimpleSlice3) GetSlices() []string { if m != nil { @@ -98,13 +121,35 @@ func (m *SimpleSlice3) GetSlices() []string { } type SimpleMap3 struct { - Stringy map[string]string `protobuf:"bytes,1,rep,name=stringy" json:"stringy,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Stringy map[string]string `protobuf:"bytes,1,rep,name=stringy,proto3" json:"stringy,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SimpleMap3) Reset() { *m = SimpleMap3{} } +func (m *SimpleMap3) String() string { return proto.CompactTextString(m) } +func (*SimpleMap3) ProtoMessage() {} +func (*SimpleMap3) Descriptor() ([]byte, []int) { + return fileDescriptor_more_test_objects_bef0d79b901f4c4a, []int{2} +} +func (m *SimpleMap3) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SimpleMap3.Unmarshal(m, b) +} +func (m *SimpleMap3) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SimpleMap3.Marshal(b, m, deterministic) +} +func (dst *SimpleMap3) XXX_Merge(src proto.Message) { + xxx_messageInfo_SimpleMap3.Merge(dst, src) +} +func (m *SimpleMap3) XXX_Size() int { + return xxx_messageInfo_SimpleMap3.Size(m) +} +func (m *SimpleMap3) XXX_DiscardUnknown() { + xxx_messageInfo_SimpleMap3.DiscardUnknown(m) } -func (m *SimpleMap3) Reset() { *m = SimpleMap3{} } -func (m *SimpleMap3) String() string { return proto.CompactTextString(m) } -func (*SimpleMap3) ProtoMessage() {} -func (*SimpleMap3) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } +var xxx_messageInfo_SimpleMap3 proto.InternalMessageInfo func (m *SimpleMap3) GetStringy() map[string]string { if m != nil { @@ -114,13 +159,35 @@ func (m *SimpleMap3) GetStringy() map[string]string { } type SimpleNull3 struct { - Simple *Simple3 `protobuf:"bytes,1,opt,name=simple" json:"simple,omitempty"` + Simple *Simple3 `protobuf:"bytes,1,opt,name=simple,proto3" json:"simple,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SimpleNull3) Reset() { *m = SimpleNull3{} } +func (m *SimpleNull3) String() string { return proto.CompactTextString(m) } +func (*SimpleNull3) ProtoMessage() {} +func (*SimpleNull3) Descriptor() ([]byte, []int) { + return fileDescriptor_more_test_objects_bef0d79b901f4c4a, []int{3} +} +func (m *SimpleNull3) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SimpleNull3.Unmarshal(m, b) +} +func (m *SimpleNull3) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SimpleNull3.Marshal(b, m, deterministic) +} +func (dst *SimpleNull3) XXX_Merge(src proto.Message) { + xxx_messageInfo_SimpleNull3.Merge(dst, src) +} +func (m *SimpleNull3) XXX_Size() int { + return xxx_messageInfo_SimpleNull3.Size(m) +} +func (m *SimpleNull3) XXX_DiscardUnknown() { + xxx_messageInfo_SimpleNull3.DiscardUnknown(m) } -func (m *SimpleNull3) Reset() { *m = SimpleNull3{} } -func (m *SimpleNull3) String() string { return proto.CompactTextString(m) } -func (*SimpleNull3) ProtoMessage() {} -func (*SimpleNull3) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } +var xxx_messageInfo_SimpleNull3 proto.InternalMessageInfo func (m *SimpleNull3) GetSimple() *Simple3 { if m != nil { @@ -130,22 +197,44 @@ func (m *SimpleNull3) GetSimple() *Simple3 { } type Mappy struct { - Nummy map[int64]int32 `protobuf:"bytes,1,rep,name=nummy" json:"nummy,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` - Strry map[string]string `protobuf:"bytes,2,rep,name=strry" json:"strry,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - Objjy map[int32]*Simple3 `protobuf:"bytes,3,rep,name=objjy" json:"objjy,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - Buggy map[int64]string `protobuf:"bytes,4,rep,name=buggy" json:"buggy,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - Booly map[bool]bool `protobuf:"bytes,5,rep,name=booly" json:"booly,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` - Enumy map[string]Numeral `protobuf:"bytes,6,rep,name=enumy" json:"enumy,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"varint,2,opt,name=value,enum=jsonpb.Numeral"` - S32Booly map[int32]bool `protobuf:"bytes,7,rep,name=s32booly" json:"s32booly,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` - S64Booly map[int64]bool `protobuf:"bytes,8,rep,name=s64booly" json:"s64booly,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` - U32Booly map[uint32]bool `protobuf:"bytes,9,rep,name=u32booly" json:"u32booly,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` - U64Booly map[uint64]bool `protobuf:"bytes,10,rep,name=u64booly" json:"u64booly,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` -} - -func (m *Mappy) Reset() { *m = Mappy{} } -func (m *Mappy) String() string { return proto.CompactTextString(m) } -func (*Mappy) ProtoMessage() {} -func (*Mappy) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + Nummy map[int64]int32 `protobuf:"bytes,1,rep,name=nummy,proto3" json:"nummy,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` + Strry map[string]string `protobuf:"bytes,2,rep,name=strry,proto3" json:"strry,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Objjy map[int32]*Simple3 `protobuf:"bytes,3,rep,name=objjy,proto3" json:"objjy,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Buggy map[int64]string `protobuf:"bytes,4,rep,name=buggy,proto3" json:"buggy,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Booly map[bool]bool `protobuf:"bytes,5,rep,name=booly,proto3" json:"booly,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` + Enumy map[string]Numeral `protobuf:"bytes,6,rep,name=enumy,proto3" json:"enumy,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3,enum=jsonpb.Numeral"` + S32Booly map[int32]bool `protobuf:"bytes,7,rep,name=s32booly,proto3" json:"s32booly,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` + S64Booly map[int64]bool `protobuf:"bytes,8,rep,name=s64booly,proto3" json:"s64booly,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` + U32Booly map[uint32]bool `protobuf:"bytes,9,rep,name=u32booly,proto3" json:"u32booly,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` + U64Booly map[uint64]bool `protobuf:"bytes,10,rep,name=u64booly,proto3" json:"u64booly,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Mappy) Reset() { *m = Mappy{} } +func (m *Mappy) String() string { return proto.CompactTextString(m) } +func (*Mappy) ProtoMessage() {} +func (*Mappy) Descriptor() ([]byte, []int) { + return fileDescriptor_more_test_objects_bef0d79b901f4c4a, []int{4} +} +func (m *Mappy) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Mappy.Unmarshal(m, b) +} +func (m *Mappy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Mappy.Marshal(b, m, deterministic) +} +func (dst *Mappy) XXX_Merge(src proto.Message) { + xxx_messageInfo_Mappy.Merge(dst, src) +} +func (m *Mappy) XXX_Size() int { + return xxx_messageInfo_Mappy.Size(m) +} +func (m *Mappy) XXX_DiscardUnknown() { + xxx_messageInfo_Mappy.DiscardUnknown(m) +} + +var xxx_messageInfo_Mappy proto.InternalMessageInfo func (m *Mappy) GetNummy() map[int64]int32 { if m != nil { @@ -221,14 +310,27 @@ func init() { proto.RegisterType((*Simple3)(nil), "jsonpb.Simple3") proto.RegisterType((*SimpleSlice3)(nil), "jsonpb.SimpleSlice3") proto.RegisterType((*SimpleMap3)(nil), "jsonpb.SimpleMap3") + proto.RegisterMapType((map[string]string)(nil), "jsonpb.SimpleMap3.StringyEntry") proto.RegisterType((*SimpleNull3)(nil), "jsonpb.SimpleNull3") proto.RegisterType((*Mappy)(nil), "jsonpb.Mappy") + proto.RegisterMapType((map[bool]bool)(nil), "jsonpb.Mappy.BoolyEntry") + proto.RegisterMapType((map[int64]string)(nil), "jsonpb.Mappy.BuggyEntry") + proto.RegisterMapType((map[string]Numeral)(nil), "jsonpb.Mappy.EnumyEntry") + proto.RegisterMapType((map[int64]int32)(nil), "jsonpb.Mappy.NummyEntry") + proto.RegisterMapType((map[int32]*Simple3)(nil), "jsonpb.Mappy.ObjjyEntry") + proto.RegisterMapType((map[int32]bool)(nil), "jsonpb.Mappy.S32boolyEntry") + proto.RegisterMapType((map[int64]bool)(nil), "jsonpb.Mappy.S64boolyEntry") + proto.RegisterMapType((map[string]string)(nil), "jsonpb.Mappy.StrryEntry") + proto.RegisterMapType((map[uint32]bool)(nil), "jsonpb.Mappy.U32boolyEntry") + proto.RegisterMapType((map[uint64]bool)(nil), "jsonpb.Mappy.U64boolyEntry") proto.RegisterEnum("jsonpb.Numeral", Numeral_name, Numeral_value) } -func init() { proto.RegisterFile("more_test_objects.proto", fileDescriptor0) } +func init() { + proto.RegisterFile("more_test_objects.proto", fileDescriptor_more_test_objects_bef0d79b901f4c4a) +} -var fileDescriptor0 = []byte{ +var fileDescriptor_more_test_objects_bef0d79b901f4c4a = []byte{ // 526 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x94, 0xdd, 0x6b, 0xdb, 0x3c, 0x14, 0x87, 0x5f, 0x27, 0xf5, 0xd7, 0x49, 0xfb, 0x2e, 0x88, 0xb1, 0x99, 0xf4, 0x62, 0xc5, 0xb0, diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/test_objects.pb.go b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/test_objects.pb.go index d413d74..6c15fd1 100644 --- a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/test_objects.pb.go +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/test_objects.pb.go @@ -6,17 +6,23 @@ package jsonpb import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" -import google_protobuf "github.com/golang/protobuf/ptypes/any" -import google_protobuf1 "github.com/golang/protobuf/ptypes/duration" -import google_protobuf2 "github.com/golang/protobuf/ptypes/struct" -import google_protobuf3 "github.com/golang/protobuf/ptypes/timestamp" -import google_protobuf4 "github.com/golang/protobuf/ptypes/wrappers" +import any "github.com/golang/protobuf/ptypes/any" +import duration "github.com/golang/protobuf/ptypes/duration" +import _struct "github.com/golang/protobuf/ptypes/struct" +import timestamp "github.com/golang/protobuf/ptypes/timestamp" +import wrappers "github.com/golang/protobuf/ptypes/wrappers" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + type Widget_Color int32 const ( @@ -52,28 +58,59 @@ func (x *Widget_Color) UnmarshalJSON(data []byte) error { *x = Widget_Color(value) return nil } -func (Widget_Color) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{3, 0} } +func (Widget_Color) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_test_objects_a4d3e593ea3c686f, []int{3, 0} +} // Test message for holding primitive types. type Simple struct { - OBool *bool `protobuf:"varint,1,opt,name=o_bool,json=oBool" json:"o_bool,omitempty"` - OInt32 *int32 `protobuf:"varint,2,opt,name=o_int32,json=oInt32" json:"o_int32,omitempty"` - OInt64 *int64 `protobuf:"varint,3,opt,name=o_int64,json=oInt64" json:"o_int64,omitempty"` - OUint32 *uint32 `protobuf:"varint,4,opt,name=o_uint32,json=oUint32" json:"o_uint32,omitempty"` - OUint64 *uint64 `protobuf:"varint,5,opt,name=o_uint64,json=oUint64" json:"o_uint64,omitempty"` - OSint32 *int32 `protobuf:"zigzag32,6,opt,name=o_sint32,json=oSint32" json:"o_sint32,omitempty"` - OSint64 *int64 `protobuf:"zigzag64,7,opt,name=o_sint64,json=oSint64" json:"o_sint64,omitempty"` - OFloat *float32 `protobuf:"fixed32,8,opt,name=o_float,json=oFloat" json:"o_float,omitempty"` - ODouble *float64 `protobuf:"fixed64,9,opt,name=o_double,json=oDouble" json:"o_double,omitempty"` - OString *string `protobuf:"bytes,10,opt,name=o_string,json=oString" json:"o_string,omitempty"` - OBytes []byte `protobuf:"bytes,11,opt,name=o_bytes,json=oBytes" json:"o_bytes,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Simple) Reset() { *m = Simple{} } -func (m *Simple) String() string { return proto.CompactTextString(m) } -func (*Simple) ProtoMessage() {} -func (*Simple) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } + OBool *bool `protobuf:"varint,1,opt,name=o_bool,json=oBool" json:"o_bool,omitempty"` + OInt32 *int32 `protobuf:"varint,2,opt,name=o_int32,json=oInt32" json:"o_int32,omitempty"` + OInt32Str *int32 `protobuf:"varint,3,opt,name=o_int32_str,json=oInt32Str" json:"o_int32_str,omitempty"` + OInt64 *int64 `protobuf:"varint,4,opt,name=o_int64,json=oInt64" json:"o_int64,omitempty"` + OInt64Str *int64 `protobuf:"varint,5,opt,name=o_int64_str,json=oInt64Str" json:"o_int64_str,omitempty"` + OUint32 *uint32 `protobuf:"varint,6,opt,name=o_uint32,json=oUint32" json:"o_uint32,omitempty"` + OUint32Str *uint32 `protobuf:"varint,7,opt,name=o_uint32_str,json=oUint32Str" json:"o_uint32_str,omitempty"` + OUint64 *uint64 `protobuf:"varint,8,opt,name=o_uint64,json=oUint64" json:"o_uint64,omitempty"` + OUint64Str *uint64 `protobuf:"varint,9,opt,name=o_uint64_str,json=oUint64Str" json:"o_uint64_str,omitempty"` + OSint32 *int32 `protobuf:"zigzag32,10,opt,name=o_sint32,json=oSint32" json:"o_sint32,omitempty"` + OSint32Str *int32 `protobuf:"zigzag32,11,opt,name=o_sint32_str,json=oSint32Str" json:"o_sint32_str,omitempty"` + OSint64 *int64 `protobuf:"zigzag64,12,opt,name=o_sint64,json=oSint64" json:"o_sint64,omitempty"` + OSint64Str *int64 `protobuf:"zigzag64,13,opt,name=o_sint64_str,json=oSint64Str" json:"o_sint64_str,omitempty"` + OFloat *float32 `protobuf:"fixed32,14,opt,name=o_float,json=oFloat" json:"o_float,omitempty"` + OFloatStr *float32 `protobuf:"fixed32,15,opt,name=o_float_str,json=oFloatStr" json:"o_float_str,omitempty"` + ODouble *float64 `protobuf:"fixed64,16,opt,name=o_double,json=oDouble" json:"o_double,omitempty"` + ODoubleStr *float64 `protobuf:"fixed64,17,opt,name=o_double_str,json=oDoubleStr" json:"o_double_str,omitempty"` + OString *string `protobuf:"bytes,18,opt,name=o_string,json=oString" json:"o_string,omitempty"` + OBytes []byte `protobuf:"bytes,19,opt,name=o_bytes,json=oBytes" json:"o_bytes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Simple) Reset() { *m = Simple{} } +func (m *Simple) String() string { return proto.CompactTextString(m) } +func (*Simple) ProtoMessage() {} +func (*Simple) Descriptor() ([]byte, []int) { + return fileDescriptor_test_objects_a4d3e593ea3c686f, []int{0} +} +func (m *Simple) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Simple.Unmarshal(m, b) +} +func (m *Simple) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Simple.Marshal(b, m, deterministic) +} +func (dst *Simple) XXX_Merge(src proto.Message) { + xxx_messageInfo_Simple.Merge(dst, src) +} +func (m *Simple) XXX_Size() int { + return xxx_messageInfo_Simple.Size(m) +} +func (m *Simple) XXX_DiscardUnknown() { + xxx_messageInfo_Simple.DiscardUnknown(m) +} + +var xxx_messageInfo_Simple proto.InternalMessageInfo func (m *Simple) GetOBool() bool { if m != nil && m.OBool != nil { @@ -89,6 +126,13 @@ func (m *Simple) GetOInt32() int32 { return 0 } +func (m *Simple) GetOInt32Str() int32 { + if m != nil && m.OInt32Str != nil { + return *m.OInt32Str + } + return 0 +} + func (m *Simple) GetOInt64() int64 { if m != nil && m.OInt64 != nil { return *m.OInt64 @@ -96,6 +140,13 @@ func (m *Simple) GetOInt64() int64 { return 0 } +func (m *Simple) GetOInt64Str() int64 { + if m != nil && m.OInt64Str != nil { + return *m.OInt64Str + } + return 0 +} + func (m *Simple) GetOUint32() uint32 { if m != nil && m.OUint32 != nil { return *m.OUint32 @@ -103,6 +154,13 @@ func (m *Simple) GetOUint32() uint32 { return 0 } +func (m *Simple) GetOUint32Str() uint32 { + if m != nil && m.OUint32Str != nil { + return *m.OUint32Str + } + return 0 +} + func (m *Simple) GetOUint64() uint64 { if m != nil && m.OUint64 != nil { return *m.OUint64 @@ -110,6 +168,13 @@ func (m *Simple) GetOUint64() uint64 { return 0 } +func (m *Simple) GetOUint64Str() uint64 { + if m != nil && m.OUint64Str != nil { + return *m.OUint64Str + } + return 0 +} + func (m *Simple) GetOSint32() int32 { if m != nil && m.OSint32 != nil { return *m.OSint32 @@ -117,6 +182,13 @@ func (m *Simple) GetOSint32() int32 { return 0 } +func (m *Simple) GetOSint32Str() int32 { + if m != nil && m.OSint32Str != nil { + return *m.OSint32Str + } + return 0 +} + func (m *Simple) GetOSint64() int64 { if m != nil && m.OSint64 != nil { return *m.OSint64 @@ -124,6 +196,13 @@ func (m *Simple) GetOSint64() int64 { return 0 } +func (m *Simple) GetOSint64Str() int64 { + if m != nil && m.OSint64Str != nil { + return *m.OSint64Str + } + return 0 +} + func (m *Simple) GetOFloat() float32 { if m != nil && m.OFloat != nil { return *m.OFloat @@ -131,6 +210,13 @@ func (m *Simple) GetOFloat() float32 { return 0 } +func (m *Simple) GetOFloatStr() float32 { + if m != nil && m.OFloatStr != nil { + return *m.OFloatStr + } + return 0 +} + func (m *Simple) GetODouble() float64 { if m != nil && m.ODouble != nil { return *m.ODouble @@ -138,6 +224,13 @@ func (m *Simple) GetODouble() float64 { return 0 } +func (m *Simple) GetODoubleStr() float64 { + if m != nil && m.ODoubleStr != nil { + return *m.ODoubleStr + } + return 0 +} + func (m *Simple) GetOString() string { if m != nil && m.OString != nil { return *m.OString @@ -154,19 +247,40 @@ func (m *Simple) GetOBytes() []byte { // Test message for holding special non-finites primitives. type NonFinites struct { - FNan *float32 `protobuf:"fixed32,1,opt,name=f_nan,json=fNan" json:"f_nan,omitempty"` - FPinf *float32 `protobuf:"fixed32,2,opt,name=f_pinf,json=fPinf" json:"f_pinf,omitempty"` - FNinf *float32 `protobuf:"fixed32,3,opt,name=f_ninf,json=fNinf" json:"f_ninf,omitempty"` - DNan *float64 `protobuf:"fixed64,4,opt,name=d_nan,json=dNan" json:"d_nan,omitempty"` - DPinf *float64 `protobuf:"fixed64,5,opt,name=d_pinf,json=dPinf" json:"d_pinf,omitempty"` - DNinf *float64 `protobuf:"fixed64,6,opt,name=d_ninf,json=dNinf" json:"d_ninf,omitempty"` - XXX_unrecognized []byte `json:"-"` + FNan *float32 `protobuf:"fixed32,1,opt,name=f_nan,json=fNan" json:"f_nan,omitempty"` + FPinf *float32 `protobuf:"fixed32,2,opt,name=f_pinf,json=fPinf" json:"f_pinf,omitempty"` + FNinf *float32 `protobuf:"fixed32,3,opt,name=f_ninf,json=fNinf" json:"f_ninf,omitempty"` + DNan *float64 `protobuf:"fixed64,4,opt,name=d_nan,json=dNan" json:"d_nan,omitempty"` + DPinf *float64 `protobuf:"fixed64,5,opt,name=d_pinf,json=dPinf" json:"d_pinf,omitempty"` + DNinf *float64 `protobuf:"fixed64,6,opt,name=d_ninf,json=dNinf" json:"d_ninf,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NonFinites) Reset() { *m = NonFinites{} } +func (m *NonFinites) String() string { return proto.CompactTextString(m) } +func (*NonFinites) ProtoMessage() {} +func (*NonFinites) Descriptor() ([]byte, []int) { + return fileDescriptor_test_objects_a4d3e593ea3c686f, []int{1} +} +func (m *NonFinites) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NonFinites.Unmarshal(m, b) +} +func (m *NonFinites) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NonFinites.Marshal(b, m, deterministic) +} +func (dst *NonFinites) XXX_Merge(src proto.Message) { + xxx_messageInfo_NonFinites.Merge(dst, src) +} +func (m *NonFinites) XXX_Size() int { + return xxx_messageInfo_NonFinites.Size(m) +} +func (m *NonFinites) XXX_DiscardUnknown() { + xxx_messageInfo_NonFinites.DiscardUnknown(m) } -func (m *NonFinites) Reset() { *m = NonFinites{} } -func (m *NonFinites) String() string { return proto.CompactTextString(m) } -func (*NonFinites) ProtoMessage() {} -func (*NonFinites) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1} } +var xxx_messageInfo_NonFinites proto.InternalMessageInfo func (m *NonFinites) GetFNan() float32 { if m != nil && m.FNan != nil { @@ -212,24 +326,45 @@ func (m *NonFinites) GetDNinf() float64 { // Test message for holding repeated primitives. type Repeats struct { - RBool []bool `protobuf:"varint,1,rep,name=r_bool,json=rBool" json:"r_bool,omitempty"` - RInt32 []int32 `protobuf:"varint,2,rep,name=r_int32,json=rInt32" json:"r_int32,omitempty"` - RInt64 []int64 `protobuf:"varint,3,rep,name=r_int64,json=rInt64" json:"r_int64,omitempty"` - RUint32 []uint32 `protobuf:"varint,4,rep,name=r_uint32,json=rUint32" json:"r_uint32,omitempty"` - RUint64 []uint64 `protobuf:"varint,5,rep,name=r_uint64,json=rUint64" json:"r_uint64,omitempty"` - RSint32 []int32 `protobuf:"zigzag32,6,rep,name=r_sint32,json=rSint32" json:"r_sint32,omitempty"` - RSint64 []int64 `protobuf:"zigzag64,7,rep,name=r_sint64,json=rSint64" json:"r_sint64,omitempty"` - RFloat []float32 `protobuf:"fixed32,8,rep,name=r_float,json=rFloat" json:"r_float,omitempty"` - RDouble []float64 `protobuf:"fixed64,9,rep,name=r_double,json=rDouble" json:"r_double,omitempty"` - RString []string `protobuf:"bytes,10,rep,name=r_string,json=rString" json:"r_string,omitempty"` - RBytes [][]byte `protobuf:"bytes,11,rep,name=r_bytes,json=rBytes" json:"r_bytes,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Repeats) Reset() { *m = Repeats{} } -func (m *Repeats) String() string { return proto.CompactTextString(m) } -func (*Repeats) ProtoMessage() {} -func (*Repeats) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{2} } + RBool []bool `protobuf:"varint,1,rep,name=r_bool,json=rBool" json:"r_bool,omitempty"` + RInt32 []int32 `protobuf:"varint,2,rep,name=r_int32,json=rInt32" json:"r_int32,omitempty"` + RInt64 []int64 `protobuf:"varint,3,rep,name=r_int64,json=rInt64" json:"r_int64,omitempty"` + RUint32 []uint32 `protobuf:"varint,4,rep,name=r_uint32,json=rUint32" json:"r_uint32,omitempty"` + RUint64 []uint64 `protobuf:"varint,5,rep,name=r_uint64,json=rUint64" json:"r_uint64,omitempty"` + RSint32 []int32 `protobuf:"zigzag32,6,rep,name=r_sint32,json=rSint32" json:"r_sint32,omitempty"` + RSint64 []int64 `protobuf:"zigzag64,7,rep,name=r_sint64,json=rSint64" json:"r_sint64,omitempty"` + RFloat []float32 `protobuf:"fixed32,8,rep,name=r_float,json=rFloat" json:"r_float,omitempty"` + RDouble []float64 `protobuf:"fixed64,9,rep,name=r_double,json=rDouble" json:"r_double,omitempty"` + RString []string `protobuf:"bytes,10,rep,name=r_string,json=rString" json:"r_string,omitempty"` + RBytes [][]byte `protobuf:"bytes,11,rep,name=r_bytes,json=rBytes" json:"r_bytes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Repeats) Reset() { *m = Repeats{} } +func (m *Repeats) String() string { return proto.CompactTextString(m) } +func (*Repeats) ProtoMessage() {} +func (*Repeats) Descriptor() ([]byte, []int) { + return fileDescriptor_test_objects_a4d3e593ea3c686f, []int{2} +} +func (m *Repeats) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Repeats.Unmarshal(m, b) +} +func (m *Repeats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Repeats.Marshal(b, m, deterministic) +} +func (dst *Repeats) XXX_Merge(src proto.Message) { + xxx_messageInfo_Repeats.Merge(dst, src) +} +func (m *Repeats) XXX_Size() int { + return xxx_messageInfo_Repeats.Size(m) +} +func (m *Repeats) XXX_DiscardUnknown() { + xxx_messageInfo_Repeats.DiscardUnknown(m) +} + +var xxx_messageInfo_Repeats proto.InternalMessageInfo func (m *Repeats) GetRBool() []bool { if m != nil { @@ -310,19 +445,40 @@ func (m *Repeats) GetRBytes() [][]byte { // Test message for holding enums and nested messages. type Widget struct { - Color *Widget_Color `protobuf:"varint,1,opt,name=color,enum=jsonpb.Widget_Color" json:"color,omitempty"` - RColor []Widget_Color `protobuf:"varint,2,rep,name=r_color,json=rColor,enum=jsonpb.Widget_Color" json:"r_color,omitempty"` - Simple *Simple `protobuf:"bytes,10,opt,name=simple" json:"simple,omitempty"` - RSimple []*Simple `protobuf:"bytes,11,rep,name=r_simple,json=rSimple" json:"r_simple,omitempty"` - Repeats *Repeats `protobuf:"bytes,20,opt,name=repeats" json:"repeats,omitempty"` - RRepeats []*Repeats `protobuf:"bytes,21,rep,name=r_repeats,json=rRepeats" json:"r_repeats,omitempty"` - XXX_unrecognized []byte `json:"-"` + Color *Widget_Color `protobuf:"varint,1,opt,name=color,enum=jsonpb.Widget_Color" json:"color,omitempty"` + RColor []Widget_Color `protobuf:"varint,2,rep,name=r_color,json=rColor,enum=jsonpb.Widget_Color" json:"r_color,omitempty"` + Simple *Simple `protobuf:"bytes,10,opt,name=simple" json:"simple,omitempty"` + RSimple []*Simple `protobuf:"bytes,11,rep,name=r_simple,json=rSimple" json:"r_simple,omitempty"` + Repeats *Repeats `protobuf:"bytes,20,opt,name=repeats" json:"repeats,omitempty"` + RRepeats []*Repeats `protobuf:"bytes,21,rep,name=r_repeats,json=rRepeats" json:"r_repeats,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Widget) Reset() { *m = Widget{} } +func (m *Widget) String() string { return proto.CompactTextString(m) } +func (*Widget) ProtoMessage() {} +func (*Widget) Descriptor() ([]byte, []int) { + return fileDescriptor_test_objects_a4d3e593ea3c686f, []int{3} +} +func (m *Widget) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Widget.Unmarshal(m, b) +} +func (m *Widget) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Widget.Marshal(b, m, deterministic) +} +func (dst *Widget) XXX_Merge(src proto.Message) { + xxx_messageInfo_Widget.Merge(dst, src) +} +func (m *Widget) XXX_Size() int { + return xxx_messageInfo_Widget.Size(m) +} +func (m *Widget) XXX_DiscardUnknown() { + xxx_messageInfo_Widget.DiscardUnknown(m) } -func (m *Widget) Reset() { *m = Widget{} } -func (m *Widget) String() string { return proto.CompactTextString(m) } -func (*Widget) ProtoMessage() {} -func (*Widget) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{3} } +var xxx_messageInfo_Widget proto.InternalMessageInfo func (m *Widget) GetColor() Widget_Color { if m != nil && m.Color != nil { @@ -367,15 +523,36 @@ func (m *Widget) GetRRepeats() []*Repeats { } type Maps struct { - MInt64Str map[int64]string `protobuf:"bytes,1,rep,name=m_int64_str,json=mInt64Str" json:"m_int64_str,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - MBoolSimple map[bool]*Simple `protobuf:"bytes,2,rep,name=m_bool_simple,json=mBoolSimple" json:"m_bool_simple,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - XXX_unrecognized []byte `json:"-"` + MInt64Str map[int64]string `protobuf:"bytes,1,rep,name=m_int64_str,json=mInt64Str" json:"m_int64_str,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + MBoolSimple map[bool]*Simple `protobuf:"bytes,2,rep,name=m_bool_simple,json=mBoolSimple" json:"m_bool_simple,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *Maps) Reset() { *m = Maps{} } -func (m *Maps) String() string { return proto.CompactTextString(m) } -func (*Maps) ProtoMessage() {} -func (*Maps) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{4} } +func (m *Maps) Reset() { *m = Maps{} } +func (m *Maps) String() string { return proto.CompactTextString(m) } +func (*Maps) ProtoMessage() {} +func (*Maps) Descriptor() ([]byte, []int) { + return fileDescriptor_test_objects_a4d3e593ea3c686f, []int{4} +} +func (m *Maps) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Maps.Unmarshal(m, b) +} +func (m *Maps) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Maps.Marshal(b, m, deterministic) +} +func (dst *Maps) XXX_Merge(src proto.Message) { + xxx_messageInfo_Maps.Merge(dst, src) +} +func (m *Maps) XXX_Size() int { + return xxx_messageInfo_Maps.Size(m) +} +func (m *Maps) XXX_DiscardUnknown() { + xxx_messageInfo_Maps.DiscardUnknown(m) +} + +var xxx_messageInfo_Maps proto.InternalMessageInfo func (m *Maps) GetMInt64Str() map[int64]string { if m != nil { @@ -397,14 +574,36 @@ type MsgWithOneof struct { // *MsgWithOneof_Salary // *MsgWithOneof_Country // *MsgWithOneof_HomeAddress - Union isMsgWithOneof_Union `protobuf_oneof:"union"` - XXX_unrecognized []byte `json:"-"` + // *MsgWithOneof_MsgWithRequired + Union isMsgWithOneof_Union `protobuf_oneof:"union"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *MsgWithOneof) Reset() { *m = MsgWithOneof{} } -func (m *MsgWithOneof) String() string { return proto.CompactTextString(m) } -func (*MsgWithOneof) ProtoMessage() {} -func (*MsgWithOneof) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{5} } +func (m *MsgWithOneof) Reset() { *m = MsgWithOneof{} } +func (m *MsgWithOneof) String() string { return proto.CompactTextString(m) } +func (*MsgWithOneof) ProtoMessage() {} +func (*MsgWithOneof) Descriptor() ([]byte, []int) { + return fileDescriptor_test_objects_a4d3e593ea3c686f, []int{5} +} +func (m *MsgWithOneof) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MsgWithOneof.Unmarshal(m, b) +} +func (m *MsgWithOneof) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MsgWithOneof.Marshal(b, m, deterministic) +} +func (dst *MsgWithOneof) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgWithOneof.Merge(dst, src) +} +func (m *MsgWithOneof) XXX_Size() int { + return xxx_messageInfo_MsgWithOneof.Size(m) +} +func (m *MsgWithOneof) XXX_DiscardUnknown() { + xxx_messageInfo_MsgWithOneof.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgWithOneof proto.InternalMessageInfo type isMsgWithOneof_Union interface { isMsgWithOneof_Union() @@ -422,11 +621,15 @@ type MsgWithOneof_Country struct { type MsgWithOneof_HomeAddress struct { HomeAddress string `protobuf:"bytes,4,opt,name=home_address,json=homeAddress,oneof"` } +type MsgWithOneof_MsgWithRequired struct { + MsgWithRequired *MsgWithRequired `protobuf:"bytes,5,opt,name=msg_with_required,json=msgWithRequired,oneof"` +} -func (*MsgWithOneof_Title) isMsgWithOneof_Union() {} -func (*MsgWithOneof_Salary) isMsgWithOneof_Union() {} -func (*MsgWithOneof_Country) isMsgWithOneof_Union() {} -func (*MsgWithOneof_HomeAddress) isMsgWithOneof_Union() {} +func (*MsgWithOneof_Title) isMsgWithOneof_Union() {} +func (*MsgWithOneof_Salary) isMsgWithOneof_Union() {} +func (*MsgWithOneof_Country) isMsgWithOneof_Union() {} +func (*MsgWithOneof_HomeAddress) isMsgWithOneof_Union() {} +func (*MsgWithOneof_MsgWithRequired) isMsgWithOneof_Union() {} func (m *MsgWithOneof) GetUnion() isMsgWithOneof_Union { if m != nil { @@ -463,6 +666,13 @@ func (m *MsgWithOneof) GetHomeAddress() string { return "" } +func (m *MsgWithOneof) GetMsgWithRequired() *MsgWithRequired { + if x, ok := m.GetUnion().(*MsgWithOneof_MsgWithRequired); ok { + return x.MsgWithRequired + } + return nil +} + // XXX_OneofFuncs is for the internal use of the proto package. func (*MsgWithOneof) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { return _MsgWithOneof_OneofMarshaler, _MsgWithOneof_OneofUnmarshaler, _MsgWithOneof_OneofSizer, []interface{}{ @@ -470,6 +680,7 @@ func (*MsgWithOneof) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) (*MsgWithOneof_Salary)(nil), (*MsgWithOneof_Country)(nil), (*MsgWithOneof_HomeAddress)(nil), + (*MsgWithOneof_MsgWithRequired)(nil), } } @@ -489,6 +700,11 @@ func _MsgWithOneof_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { case *MsgWithOneof_HomeAddress: b.EncodeVarint(4<<3 | proto.WireBytes) b.EncodeStringBytes(x.HomeAddress) + case *MsgWithOneof_MsgWithRequired: + b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.MsgWithRequired); err != nil { + return err + } case nil: default: return fmt.Errorf("MsgWithOneof.Union has unexpected type %T", x) @@ -527,6 +743,14 @@ func _MsgWithOneof_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.B x, err := b.DecodeStringBytes() m.Union = &MsgWithOneof_HomeAddress{x} return true, err + case 5: // union.msg_with_required + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(MsgWithRequired) + err := b.DecodeMessage(msg) + m.Union = &MsgWithOneof_MsgWithRequired{msg} + return true, err default: return false, nil } @@ -537,20 +761,25 @@ func _MsgWithOneof_OneofSizer(msg proto.Message) (n int) { // union switch x := m.Union.(type) { case *MsgWithOneof_Title: - n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += 1 // tag and wire n += proto.SizeVarint(uint64(len(x.Title))) n += len(x.Title) case *MsgWithOneof_Salary: - n += proto.SizeVarint(2<<3 | proto.WireVarint) + n += 1 // tag and wire n += proto.SizeVarint(uint64(x.Salary)) case *MsgWithOneof_Country: - n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += 1 // tag and wire n += proto.SizeVarint(uint64(len(x.Country))) n += len(x.Country) case *MsgWithOneof_HomeAddress: - n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += 1 // tag and wire n += proto.SizeVarint(uint64(len(x.HomeAddress))) n += len(x.HomeAddress) + case *MsgWithOneof_MsgWithRequired: + s := proto.Size(x.MsgWithRequired) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s case nil: default: panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) @@ -560,22 +789,43 @@ func _MsgWithOneof_OneofSizer(msg proto.Message) (n int) { type Real struct { Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` proto.XXX_InternalExtensions `json:"-"` XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *Real) Reset() { *m = Real{} } -func (m *Real) String() string { return proto.CompactTextString(m) } -func (*Real) ProtoMessage() {} -func (*Real) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{6} } +func (m *Real) Reset() { *m = Real{} } +func (m *Real) String() string { return proto.CompactTextString(m) } +func (*Real) ProtoMessage() {} +func (*Real) Descriptor() ([]byte, []int) { + return fileDescriptor_test_objects_a4d3e593ea3c686f, []int{6} +} var extRange_Real = []proto.ExtensionRange{ - {100, 536870911}, + {Start: 100, End: 536870911}, } func (*Real) ExtensionRangeArray() []proto.ExtensionRange { return extRange_Real } +func (m *Real) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Real.Unmarshal(m, b) +} +func (m *Real) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Real.Marshal(b, m, deterministic) +} +func (dst *Real) XXX_Merge(src proto.Message) { + xxx_messageInfo_Real.Merge(dst, src) +} +func (m *Real) XXX_Size() int { + return xxx_messageInfo_Real.Size(m) +} +func (m *Real) XXX_DiscardUnknown() { + xxx_messageInfo_Real.DiscardUnknown(m) +} + +var xxx_messageInfo_Real proto.InternalMessageInfo func (m *Real) GetValue() float64 { if m != nil && m.Value != nil { @@ -586,22 +836,43 @@ func (m *Real) GetValue() float64 { type Complex struct { Imaginary *float64 `protobuf:"fixed64,1,opt,name=imaginary" json:"imaginary,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` proto.XXX_InternalExtensions `json:"-"` XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *Complex) Reset() { *m = Complex{} } -func (m *Complex) String() string { return proto.CompactTextString(m) } -func (*Complex) ProtoMessage() {} -func (*Complex) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{7} } +func (m *Complex) Reset() { *m = Complex{} } +func (m *Complex) String() string { return proto.CompactTextString(m) } +func (*Complex) ProtoMessage() {} +func (*Complex) Descriptor() ([]byte, []int) { + return fileDescriptor_test_objects_a4d3e593ea3c686f, []int{7} +} var extRange_Complex = []proto.ExtensionRange{ - {100, 536870911}, + {Start: 100, End: 536870911}, } func (*Complex) ExtensionRangeArray() []proto.ExtensionRange { return extRange_Complex } +func (m *Complex) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Complex.Unmarshal(m, b) +} +func (m *Complex) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Complex.Marshal(b, m, deterministic) +} +func (dst *Complex) XXX_Merge(src proto.Message) { + xxx_messageInfo_Complex.Merge(dst, src) +} +func (m *Complex) XXX_Size() int { + return xxx_messageInfo_Complex.Size(m) +} +func (m *Complex) XXX_DiscardUnknown() { + xxx_messageInfo_Complex.DiscardUnknown(m) +} + +var xxx_messageInfo_Complex proto.InternalMessageInfo func (m *Complex) GetImaginary() float64 { if m != nil && m.Imaginary != nil { @@ -620,134 +891,324 @@ var E_Complex_RealExtension = &proto.ExtensionDesc{ } type KnownTypes struct { - An *google_protobuf.Any `protobuf:"bytes,14,opt,name=an" json:"an,omitempty"` - Dur *google_protobuf1.Duration `protobuf:"bytes,1,opt,name=dur" json:"dur,omitempty"` - St *google_protobuf2.Struct `protobuf:"bytes,12,opt,name=st" json:"st,omitempty"` - Ts *google_protobuf3.Timestamp `protobuf:"bytes,2,opt,name=ts" json:"ts,omitempty"` - Lv *google_protobuf2.ListValue `protobuf:"bytes,15,opt,name=lv" json:"lv,omitempty"` - Val *google_protobuf2.Value `protobuf:"bytes,16,opt,name=val" json:"val,omitempty"` - Dbl *google_protobuf4.DoubleValue `protobuf:"bytes,3,opt,name=dbl" json:"dbl,omitempty"` - Flt *google_protobuf4.FloatValue `protobuf:"bytes,4,opt,name=flt" json:"flt,omitempty"` - I64 *google_protobuf4.Int64Value `protobuf:"bytes,5,opt,name=i64" json:"i64,omitempty"` - U64 *google_protobuf4.UInt64Value `protobuf:"bytes,6,opt,name=u64" json:"u64,omitempty"` - I32 *google_protobuf4.Int32Value `protobuf:"bytes,7,opt,name=i32" json:"i32,omitempty"` - U32 *google_protobuf4.UInt32Value `protobuf:"bytes,8,opt,name=u32" json:"u32,omitempty"` - Bool *google_protobuf4.BoolValue `protobuf:"bytes,9,opt,name=bool" json:"bool,omitempty"` - Str *google_protobuf4.StringValue `protobuf:"bytes,10,opt,name=str" json:"str,omitempty"` - Bytes *google_protobuf4.BytesValue `protobuf:"bytes,11,opt,name=bytes" json:"bytes,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *KnownTypes) Reset() { *m = KnownTypes{} } -func (m *KnownTypes) String() string { return proto.CompactTextString(m) } -func (*KnownTypes) ProtoMessage() {} -func (*KnownTypes) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{8} } - -func (m *KnownTypes) GetAn() *google_protobuf.Any { + An *any.Any `protobuf:"bytes,14,opt,name=an" json:"an,omitempty"` + Dur *duration.Duration `protobuf:"bytes,1,opt,name=dur" json:"dur,omitempty"` + St *_struct.Struct `protobuf:"bytes,12,opt,name=st" json:"st,omitempty"` + Ts *timestamp.Timestamp `protobuf:"bytes,2,opt,name=ts" json:"ts,omitempty"` + Lv *_struct.ListValue `protobuf:"bytes,15,opt,name=lv" json:"lv,omitempty"` + Val *_struct.Value `protobuf:"bytes,16,opt,name=val" json:"val,omitempty"` + Dbl *wrappers.DoubleValue `protobuf:"bytes,3,opt,name=dbl" json:"dbl,omitempty"` + Flt *wrappers.FloatValue `protobuf:"bytes,4,opt,name=flt" json:"flt,omitempty"` + I64 *wrappers.Int64Value `protobuf:"bytes,5,opt,name=i64" json:"i64,omitempty"` + U64 *wrappers.UInt64Value `protobuf:"bytes,6,opt,name=u64" json:"u64,omitempty"` + I32 *wrappers.Int32Value `protobuf:"bytes,7,opt,name=i32" json:"i32,omitempty"` + U32 *wrappers.UInt32Value `protobuf:"bytes,8,opt,name=u32" json:"u32,omitempty"` + Bool *wrappers.BoolValue `protobuf:"bytes,9,opt,name=bool" json:"bool,omitempty"` + Str *wrappers.StringValue `protobuf:"bytes,10,opt,name=str" json:"str,omitempty"` + Bytes *wrappers.BytesValue `protobuf:"bytes,11,opt,name=bytes" json:"bytes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *KnownTypes) Reset() { *m = KnownTypes{} } +func (m *KnownTypes) String() string { return proto.CompactTextString(m) } +func (*KnownTypes) ProtoMessage() {} +func (*KnownTypes) Descriptor() ([]byte, []int) { + return fileDescriptor_test_objects_a4d3e593ea3c686f, []int{8} +} +func (m *KnownTypes) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_KnownTypes.Unmarshal(m, b) +} +func (m *KnownTypes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_KnownTypes.Marshal(b, m, deterministic) +} +func (dst *KnownTypes) XXX_Merge(src proto.Message) { + xxx_messageInfo_KnownTypes.Merge(dst, src) +} +func (m *KnownTypes) XXX_Size() int { + return xxx_messageInfo_KnownTypes.Size(m) +} +func (m *KnownTypes) XXX_DiscardUnknown() { + xxx_messageInfo_KnownTypes.DiscardUnknown(m) +} + +var xxx_messageInfo_KnownTypes proto.InternalMessageInfo + +func (m *KnownTypes) GetAn() *any.Any { if m != nil { return m.An } return nil } -func (m *KnownTypes) GetDur() *google_protobuf1.Duration { +func (m *KnownTypes) GetDur() *duration.Duration { if m != nil { return m.Dur } return nil } -func (m *KnownTypes) GetSt() *google_protobuf2.Struct { +func (m *KnownTypes) GetSt() *_struct.Struct { if m != nil { return m.St } return nil } -func (m *KnownTypes) GetTs() *google_protobuf3.Timestamp { +func (m *KnownTypes) GetTs() *timestamp.Timestamp { if m != nil { return m.Ts } return nil } -func (m *KnownTypes) GetLv() *google_protobuf2.ListValue { +func (m *KnownTypes) GetLv() *_struct.ListValue { if m != nil { return m.Lv } return nil } -func (m *KnownTypes) GetVal() *google_protobuf2.Value { +func (m *KnownTypes) GetVal() *_struct.Value { if m != nil { return m.Val } return nil } -func (m *KnownTypes) GetDbl() *google_protobuf4.DoubleValue { +func (m *KnownTypes) GetDbl() *wrappers.DoubleValue { if m != nil { return m.Dbl } return nil } -func (m *KnownTypes) GetFlt() *google_protobuf4.FloatValue { +func (m *KnownTypes) GetFlt() *wrappers.FloatValue { if m != nil { return m.Flt } return nil } -func (m *KnownTypes) GetI64() *google_protobuf4.Int64Value { +func (m *KnownTypes) GetI64() *wrappers.Int64Value { if m != nil { return m.I64 } return nil } -func (m *KnownTypes) GetU64() *google_protobuf4.UInt64Value { +func (m *KnownTypes) GetU64() *wrappers.UInt64Value { if m != nil { return m.U64 } return nil } -func (m *KnownTypes) GetI32() *google_protobuf4.Int32Value { +func (m *KnownTypes) GetI32() *wrappers.Int32Value { if m != nil { return m.I32 } return nil } -func (m *KnownTypes) GetU32() *google_protobuf4.UInt32Value { +func (m *KnownTypes) GetU32() *wrappers.UInt32Value { if m != nil { return m.U32 } return nil } -func (m *KnownTypes) GetBool() *google_protobuf4.BoolValue { +func (m *KnownTypes) GetBool() *wrappers.BoolValue { if m != nil { return m.Bool } return nil } -func (m *KnownTypes) GetStr() *google_protobuf4.StringValue { +func (m *KnownTypes) GetStr() *wrappers.StringValue { if m != nil { return m.Str } return nil } -func (m *KnownTypes) GetBytes() *google_protobuf4.BytesValue { +func (m *KnownTypes) GetBytes() *wrappers.BytesValue { if m != nil { return m.Bytes } return nil } +// Test messages for marshaling/unmarshaling required fields. +type MsgWithRequired struct { + Str *string `protobuf:"bytes,1,req,name=str" json:"str,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MsgWithRequired) Reset() { *m = MsgWithRequired{} } +func (m *MsgWithRequired) String() string { return proto.CompactTextString(m) } +func (*MsgWithRequired) ProtoMessage() {} +func (*MsgWithRequired) Descriptor() ([]byte, []int) { + return fileDescriptor_test_objects_a4d3e593ea3c686f, []int{9} +} +func (m *MsgWithRequired) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MsgWithRequired.Unmarshal(m, b) +} +func (m *MsgWithRequired) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MsgWithRequired.Marshal(b, m, deterministic) +} +func (dst *MsgWithRequired) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgWithRequired.Merge(dst, src) +} +func (m *MsgWithRequired) XXX_Size() int { + return xxx_messageInfo_MsgWithRequired.Size(m) +} +func (m *MsgWithRequired) XXX_DiscardUnknown() { + xxx_messageInfo_MsgWithRequired.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgWithRequired proto.InternalMessageInfo + +func (m *MsgWithRequired) GetStr() string { + if m != nil && m.Str != nil { + return *m.Str + } + return "" +} + +type MsgWithIndirectRequired struct { + Subm *MsgWithRequired `protobuf:"bytes,1,opt,name=subm" json:"subm,omitempty"` + MapField map[string]*MsgWithRequired `protobuf:"bytes,2,rep,name=map_field,json=mapField" json:"map_field,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + SliceField []*MsgWithRequired `protobuf:"bytes,3,rep,name=slice_field,json=sliceField" json:"slice_field,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MsgWithIndirectRequired) Reset() { *m = MsgWithIndirectRequired{} } +func (m *MsgWithIndirectRequired) String() string { return proto.CompactTextString(m) } +func (*MsgWithIndirectRequired) ProtoMessage() {} +func (*MsgWithIndirectRequired) Descriptor() ([]byte, []int) { + return fileDescriptor_test_objects_a4d3e593ea3c686f, []int{10} +} +func (m *MsgWithIndirectRequired) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MsgWithIndirectRequired.Unmarshal(m, b) +} +func (m *MsgWithIndirectRequired) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MsgWithIndirectRequired.Marshal(b, m, deterministic) +} +func (dst *MsgWithIndirectRequired) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgWithIndirectRequired.Merge(dst, src) +} +func (m *MsgWithIndirectRequired) XXX_Size() int { + return xxx_messageInfo_MsgWithIndirectRequired.Size(m) +} +func (m *MsgWithIndirectRequired) XXX_DiscardUnknown() { + xxx_messageInfo_MsgWithIndirectRequired.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgWithIndirectRequired proto.InternalMessageInfo + +func (m *MsgWithIndirectRequired) GetSubm() *MsgWithRequired { + if m != nil { + return m.Subm + } + return nil +} + +func (m *MsgWithIndirectRequired) GetMapField() map[string]*MsgWithRequired { + if m != nil { + return m.MapField + } + return nil +} + +func (m *MsgWithIndirectRequired) GetSliceField() []*MsgWithRequired { + if m != nil { + return m.SliceField + } + return nil +} + +type MsgWithRequiredBytes struct { + Byts []byte `protobuf:"bytes,1,req,name=byts" json:"byts,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MsgWithRequiredBytes) Reset() { *m = MsgWithRequiredBytes{} } +func (m *MsgWithRequiredBytes) String() string { return proto.CompactTextString(m) } +func (*MsgWithRequiredBytes) ProtoMessage() {} +func (*MsgWithRequiredBytes) Descriptor() ([]byte, []int) { + return fileDescriptor_test_objects_a4d3e593ea3c686f, []int{11} +} +func (m *MsgWithRequiredBytes) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MsgWithRequiredBytes.Unmarshal(m, b) +} +func (m *MsgWithRequiredBytes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MsgWithRequiredBytes.Marshal(b, m, deterministic) +} +func (dst *MsgWithRequiredBytes) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgWithRequiredBytes.Merge(dst, src) +} +func (m *MsgWithRequiredBytes) XXX_Size() int { + return xxx_messageInfo_MsgWithRequiredBytes.Size(m) +} +func (m *MsgWithRequiredBytes) XXX_DiscardUnknown() { + xxx_messageInfo_MsgWithRequiredBytes.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgWithRequiredBytes proto.InternalMessageInfo + +func (m *MsgWithRequiredBytes) GetByts() []byte { + if m != nil { + return m.Byts + } + return nil +} + +type MsgWithRequiredWKT struct { + Str *wrappers.StringValue `protobuf:"bytes,1,req,name=str" json:"str,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MsgWithRequiredWKT) Reset() { *m = MsgWithRequiredWKT{} } +func (m *MsgWithRequiredWKT) String() string { return proto.CompactTextString(m) } +func (*MsgWithRequiredWKT) ProtoMessage() {} +func (*MsgWithRequiredWKT) Descriptor() ([]byte, []int) { + return fileDescriptor_test_objects_a4d3e593ea3c686f, []int{12} +} +func (m *MsgWithRequiredWKT) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MsgWithRequiredWKT.Unmarshal(m, b) +} +func (m *MsgWithRequiredWKT) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MsgWithRequiredWKT.Marshal(b, m, deterministic) +} +func (dst *MsgWithRequiredWKT) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgWithRequiredWKT.Merge(dst, src) +} +func (m *MsgWithRequiredWKT) XXX_Size() int { + return xxx_messageInfo_MsgWithRequiredWKT.Size(m) +} +func (m *MsgWithRequiredWKT) XXX_DiscardUnknown() { + xxx_messageInfo_MsgWithRequiredWKT.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgWithRequiredWKT proto.InternalMessageInfo + +func (m *MsgWithRequiredWKT) GetStr() *wrappers.StringValue { + if m != nil { + return m.Str + } + return nil +} + var E_Name = &proto.ExtensionDesc{ ExtendedType: (*Real)(nil), ExtensionType: (*string)(nil), @@ -757,96 +1218,132 @@ var E_Name = &proto.ExtensionDesc{ Filename: "test_objects.proto", } +var E_Extm = &proto.ExtensionDesc{ + ExtendedType: (*Real)(nil), + ExtensionType: (*MsgWithRequired)(nil), + Field: 125, + Name: "jsonpb.extm", + Tag: "bytes,125,opt,name=extm", + Filename: "test_objects.proto", +} + func init() { proto.RegisterType((*Simple)(nil), "jsonpb.Simple") proto.RegisterType((*NonFinites)(nil), "jsonpb.NonFinites") proto.RegisterType((*Repeats)(nil), "jsonpb.Repeats") proto.RegisterType((*Widget)(nil), "jsonpb.Widget") proto.RegisterType((*Maps)(nil), "jsonpb.Maps") + proto.RegisterMapType((map[bool]*Simple)(nil), "jsonpb.Maps.MBoolSimpleEntry") + proto.RegisterMapType((map[int64]string)(nil), "jsonpb.Maps.MInt64StrEntry") proto.RegisterType((*MsgWithOneof)(nil), "jsonpb.MsgWithOneof") proto.RegisterType((*Real)(nil), "jsonpb.Real") proto.RegisterType((*Complex)(nil), "jsonpb.Complex") proto.RegisterType((*KnownTypes)(nil), "jsonpb.KnownTypes") + proto.RegisterType((*MsgWithRequired)(nil), "jsonpb.MsgWithRequired") + proto.RegisterType((*MsgWithIndirectRequired)(nil), "jsonpb.MsgWithIndirectRequired") + proto.RegisterMapType((map[string]*MsgWithRequired)(nil), "jsonpb.MsgWithIndirectRequired.MapFieldEntry") + proto.RegisterType((*MsgWithRequiredBytes)(nil), "jsonpb.MsgWithRequiredBytes") + proto.RegisterType((*MsgWithRequiredWKT)(nil), "jsonpb.MsgWithRequiredWKT") proto.RegisterEnum("jsonpb.Widget_Color", Widget_Color_name, Widget_Color_value) proto.RegisterExtension(E_Complex_RealExtension) proto.RegisterExtension(E_Name) -} - -func init() { proto.RegisterFile("test_objects.proto", fileDescriptor1) } - -var fileDescriptor1 = []byte{ - // 1160 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x95, 0x41, 0x73, 0xdb, 0x44, - 0x14, 0xc7, 0x23, 0xc9, 0x92, 0xed, 0x75, 0x92, 0x9a, 0x6d, 0xda, 0x2a, 0x26, 0x80, 0xc6, 0x94, - 0x22, 0x0a, 0x75, 0x07, 0xc7, 0xe3, 0x61, 0x0a, 0x97, 0xa4, 0x71, 0x29, 0x43, 0x13, 0x98, 0x4d, - 0x43, 0x8f, 0x1e, 0x39, 0x5a, 0xbb, 0x2a, 0xf2, 0xae, 0x67, 0x77, 0x95, 0xd4, 0x03, 0x87, 0x9c, - 0x39, 0x32, 0x7c, 0x05, 0xf8, 0x08, 0x1c, 0xf8, 0x74, 0xcc, 0xdb, 0x95, 0xac, 0xc4, 0x8e, 0x4f, - 0xf1, 0x7b, 0xef, 0xff, 0xfe, 0x59, 0xed, 0x6f, 0x77, 0x1f, 0xc2, 0x8a, 0x4a, 0x35, 0xe4, 0xa3, - 0x77, 0xf4, 0x5c, 0xc9, 0xce, 0x4c, 0x70, 0xc5, 0xb1, 0xf7, 0x4e, 0x72, 0x36, 0x1b, 0xb5, 0x76, - 0x27, 0x9c, 0x4f, 0x52, 0xfa, 0x54, 0x67, 0x47, 0xd9, 0xf8, 0x69, 0xc4, 0xe6, 0x46, 0xd2, 0xfa, - 0x78, 0xb9, 0x14, 0x67, 0x22, 0x52, 0x09, 0x67, 0x79, 0x7d, 0x6f, 0xb9, 0x2e, 0x95, 0xc8, 0xce, - 0x55, 0x5e, 0xfd, 0x64, 0xb9, 0xaa, 0x92, 0x29, 0x95, 0x2a, 0x9a, 0xce, 0xd6, 0xd9, 0x5f, 0x8a, - 0x68, 0x36, 0xa3, 0x22, 0x5f, 0x61, 0xfb, 0x6f, 0x1b, 0x79, 0xa7, 0xc9, 0x74, 0x96, 0x52, 0x7c, - 0x0f, 0x79, 0x7c, 0x38, 0xe2, 0x3c, 0xf5, 0xad, 0xc0, 0x0a, 0x6b, 0xc4, 0xe5, 0x87, 0x9c, 0xa7, - 0xf8, 0x01, 0xaa, 0xf2, 0x61, 0xc2, 0xd4, 0x7e, 0xd7, 0xb7, 0x03, 0x2b, 0x74, 0x89, 0xc7, 0x7f, - 0x80, 0x68, 0x51, 0xe8, 0xf7, 0x7c, 0x27, 0xb0, 0x42, 0xc7, 0x14, 0xfa, 0x3d, 0xbc, 0x8b, 0x6a, - 0x7c, 0x98, 0x99, 0x96, 0x4a, 0x60, 0x85, 0x5b, 0xa4, 0xca, 0xcf, 0x74, 0x58, 0x96, 0xfa, 0x3d, - 0xdf, 0x0d, 0xac, 0xb0, 0x92, 0x97, 0x8a, 0x2e, 0x69, 0xba, 0xbc, 0xc0, 0x0a, 0x3f, 0x20, 0x55, - 0x7e, 0x7a, 0xad, 0x4b, 0x9a, 0xae, 0x6a, 0x60, 0x85, 0x38, 0x2f, 0xf5, 0x7b, 0x66, 0x11, 0xe3, - 0x94, 0x47, 0xca, 0xaf, 0x05, 0x56, 0x68, 0x13, 0x8f, 0xbf, 0x80, 0xc8, 0xf4, 0xc4, 0x3c, 0x1b, - 0xa5, 0xd4, 0xaf, 0x07, 0x56, 0x68, 0x91, 0x2a, 0x3f, 0xd2, 0x61, 0x6e, 0xa7, 0x44, 0xc2, 0x26, - 0x3e, 0x0a, 0xac, 0xb0, 0x0e, 0x76, 0x3a, 0x34, 0x76, 0xa3, 0xb9, 0xa2, 0xd2, 0x6f, 0x04, 0x56, - 0xb8, 0x49, 0x3c, 0x7e, 0x08, 0x51, 0xfb, 0x4f, 0x0b, 0xa1, 0x13, 0xce, 0x5e, 0x24, 0x2c, 0x51, - 0x54, 0xe2, 0xbb, 0xc8, 0x1d, 0x0f, 0x59, 0xc4, 0xf4, 0x56, 0xd9, 0xa4, 0x32, 0x3e, 0x89, 0x18, - 0x6c, 0xe0, 0x78, 0x38, 0x4b, 0xd8, 0x58, 0x6f, 0x94, 0x4d, 0xdc, 0xf1, 0xcf, 0x09, 0x1b, 0x9b, - 0x34, 0x83, 0xb4, 0x93, 0xa7, 0x4f, 0x20, 0x7d, 0x17, 0xb9, 0xb1, 0xb6, 0xa8, 0xe8, 0xd5, 0x55, - 0xe2, 0xdc, 0x22, 0x36, 0x16, 0xae, 0xce, 0xba, 0x71, 0x61, 0x11, 0x1b, 0x0b, 0x2f, 0x4f, 0x83, - 0x45, 0xfb, 0x1f, 0x1b, 0x55, 0x09, 0x9d, 0xd1, 0x48, 0x49, 0x90, 0x88, 0x82, 0x9e, 0x03, 0xf4, - 0x44, 0x41, 0x4f, 0x2c, 0xe8, 0x39, 0x40, 0x4f, 0x2c, 0xe8, 0x89, 0x05, 0x3d, 0x07, 0xe8, 0x89, - 0x05, 0x3d, 0x51, 0xd2, 0x73, 0x80, 0x9e, 0x28, 0xe9, 0x89, 0x92, 0x9e, 0x03, 0xf4, 0x44, 0x49, - 0x4f, 0x94, 0xf4, 0x1c, 0xa0, 0x27, 0x4e, 0xaf, 0x75, 0x2d, 0xe8, 0x39, 0x40, 0x4f, 0x94, 0xf4, - 0xc4, 0x82, 0x9e, 0x03, 0xf4, 0xc4, 0x82, 0x9e, 0x28, 0xe9, 0x39, 0x40, 0x4f, 0x94, 0xf4, 0x44, - 0x49, 0xcf, 0x01, 0x7a, 0xa2, 0xa4, 0x27, 0x16, 0xf4, 0x1c, 0xa0, 0x27, 0x0c, 0xbd, 0x7f, 0x6d, - 0xe4, 0xbd, 0x49, 0xe2, 0x09, 0x55, 0xf8, 0x31, 0x72, 0xcf, 0x79, 0xca, 0x85, 0x26, 0xb7, 0xdd, - 0xdd, 0xe9, 0x98, 0x2b, 0xda, 0x31, 0xe5, 0xce, 0x73, 0xa8, 0x11, 0x23, 0xc1, 0x4f, 0xc0, 0xcf, - 0xa8, 0x61, 0xf3, 0xd6, 0xa9, 0x3d, 0xa1, 0xff, 0xe2, 0x47, 0xc8, 0x93, 0xfa, 0x2a, 0xe9, 0x53, - 0xd5, 0xe8, 0x6e, 0x17, 0x6a, 0x73, 0xc1, 0x48, 0x5e, 0xc5, 0x5f, 0x98, 0x0d, 0xd1, 0x4a, 0x58, - 0xe7, 0xaa, 0x12, 0x36, 0x28, 0x97, 0x56, 0x85, 0x01, 0xec, 0xef, 0x68, 0xcf, 0x3b, 0x85, 0x32, - 0xe7, 0x4e, 0x8a, 0x3a, 0xfe, 0x0a, 0xd5, 0xc5, 0xb0, 0x10, 0xdf, 0xd3, 0xb6, 0x2b, 0xe2, 0x9a, - 0xc8, 0x7f, 0xb5, 0x3f, 0x43, 0xae, 0x59, 0x74, 0x15, 0x39, 0x64, 0x70, 0xd4, 0xdc, 0xc0, 0x75, - 0xe4, 0x7e, 0x4f, 0x06, 0x83, 0x93, 0xa6, 0x85, 0x6b, 0xa8, 0x72, 0xf8, 0xea, 0x6c, 0xd0, 0xb4, - 0xdb, 0x7f, 0xd9, 0xa8, 0x72, 0x1c, 0xcd, 0x24, 0xfe, 0x16, 0x35, 0xa6, 0xe6, 0xb8, 0xc0, 0xde, - 0xeb, 0x33, 0xd6, 0xe8, 0x7e, 0x58, 0xf8, 0x83, 0xa4, 0x73, 0xac, 0xcf, 0xcf, 0xa9, 0x12, 0x03, - 0xa6, 0xc4, 0x9c, 0xd4, 0xa7, 0x45, 0x8c, 0x0f, 0xd0, 0xd6, 0x54, 0x9f, 0xcd, 0xe2, 0xab, 0x6d, - 0xdd, 0xfe, 0xd1, 0xcd, 0x76, 0x38, 0xaf, 0xe6, 0xb3, 0x8d, 0x41, 0x63, 0x5a, 0x66, 0x5a, 0xdf, - 0xa1, 0xed, 0x9b, 0xfe, 0xb8, 0x89, 0x9c, 0x5f, 0xe9, 0x5c, 0x63, 0x74, 0x08, 0xfc, 0xc4, 0x3b, - 0xc8, 0xbd, 0x88, 0xd2, 0x8c, 0xea, 0xeb, 0x57, 0x27, 0x26, 0x78, 0x66, 0x7f, 0x63, 0xb5, 0x4e, - 0x50, 0x73, 0xd9, 0xfe, 0x7a, 0x7f, 0xcd, 0xf4, 0x3f, 0xbc, 0xde, 0xbf, 0x0a, 0xa5, 0xf4, 0x6b, - 0xff, 0x61, 0xa1, 0xcd, 0x63, 0x39, 0x79, 0x93, 0xa8, 0xb7, 0x3f, 0x31, 0xca, 0xc7, 0xf8, 0x3e, - 0x72, 0x55, 0xa2, 0x52, 0xaa, 0xed, 0xea, 0x2f, 0x37, 0x88, 0x09, 0xb1, 0x8f, 0x3c, 0x19, 0xa5, - 0x91, 0x98, 0x6b, 0x4f, 0xe7, 0xe5, 0x06, 0xc9, 0x63, 0xdc, 0x42, 0xd5, 0xe7, 0x3c, 0x83, 0x95, - 0xe8, 0x67, 0x01, 0x7a, 0x8a, 0x04, 0xfe, 0x14, 0x6d, 0xbe, 0xe5, 0x53, 0x3a, 0x8c, 0xe2, 0x58, - 0x50, 0x29, 0xf5, 0x0b, 0x01, 0x82, 0x06, 0x64, 0x0f, 0x4c, 0xf2, 0xb0, 0x8a, 0xdc, 0x8c, 0x25, - 0x9c, 0xb5, 0x1f, 0xa1, 0x0a, 0xa1, 0x51, 0x5a, 0x7e, 0xbe, 0x65, 0xde, 0x08, 0x1d, 0x3c, 0xae, - 0xd5, 0xe2, 0xe6, 0xd5, 0xd5, 0xd5, 0x95, 0xdd, 0xbe, 0x84, 0xff, 0x08, 0x5f, 0xf2, 0x1e, 0xef, - 0xa1, 0x7a, 0x32, 0x8d, 0x26, 0x09, 0x83, 0x95, 0x19, 0x79, 0x99, 0x28, 0x5b, 0xba, 0x47, 0x68, - 0x5b, 0xd0, 0x28, 0x1d, 0xd2, 0xf7, 0x8a, 0x32, 0x99, 0x70, 0x86, 0x37, 0xcb, 0x23, 0x15, 0xa5, - 0xfe, 0x6f, 0x37, 0xcf, 0x64, 0x6e, 0x4f, 0xb6, 0xa0, 0x69, 0x50, 0xf4, 0xb4, 0xff, 0x73, 0x11, - 0xfa, 0x91, 0xf1, 0x4b, 0xf6, 0x7a, 0x3e, 0xa3, 0x12, 0x3f, 0x44, 0x76, 0xc4, 0xfc, 0x6d, 0xdd, - 0xba, 0xd3, 0x31, 0xf3, 0xa9, 0x53, 0xcc, 0xa7, 0xce, 0x01, 0x9b, 0x13, 0x3b, 0x62, 0xf8, 0x4b, - 0xe4, 0xc4, 0x99, 0xb9, 0xa5, 0x8d, 0xee, 0xee, 0x8a, 0xec, 0x28, 0x9f, 0x92, 0x04, 0x54, 0xf8, - 0x73, 0x64, 0x4b, 0xe5, 0x6f, 0x6a, 0xed, 0x83, 0x15, 0xed, 0xa9, 0x9e, 0x98, 0xc4, 0x96, 0x70, - 0xfb, 0x6d, 0x25, 0x73, 0xbe, 0xad, 0x15, 0xe1, 0xeb, 0x62, 0x78, 0x12, 0x5b, 0x49, 0xd0, 0xa6, - 0x17, 0xfe, 0x9d, 0x35, 0xda, 0x57, 0x89, 0x54, 0xbf, 0xc0, 0x0e, 0x13, 0x3b, 0xbd, 0xc0, 0x21, - 0x72, 0x2e, 0xa2, 0xd4, 0x6f, 0x6a, 0xf1, 0xfd, 0x15, 0xb1, 0x11, 0x82, 0x04, 0x77, 0x90, 0x13, - 0x8f, 0x52, 0xcd, 0xbc, 0xd1, 0xdd, 0x5b, 0xfd, 0x2e, 0xfd, 0xc8, 0xe5, 0xfa, 0x78, 0x94, 0xe2, - 0x27, 0xc8, 0x19, 0xa7, 0x4a, 0x1f, 0x01, 0xb8, 0x70, 0xcb, 0x7a, 0xfd, 0x5c, 0xe6, 0xf2, 0x71, - 0xaa, 0x40, 0x9e, 0xe4, 0xb3, 0xf5, 0x36, 0xb9, 0xbe, 0x42, 0xb9, 0x3c, 0xe9, 0xf7, 0x60, 0x35, - 0x59, 0xbf, 0xa7, 0xa7, 0xca, 0x6d, 0xab, 0x39, 0xbb, 0xae, 0xcf, 0xfa, 0x3d, 0x6d, 0xbf, 0xdf, - 0xd5, 0x43, 0x78, 0x8d, 0xfd, 0x7e, 0xb7, 0xb0, 0xdf, 0xef, 0x6a, 0xfb, 0xfd, 0xae, 0x9e, 0xcc, - 0xeb, 0xec, 0x17, 0xfa, 0x4c, 0xeb, 0x2b, 0x7a, 0x84, 0xd5, 0xd7, 0x6c, 0x3a, 0xdc, 0x61, 0x23, - 0xd7, 0x3a, 0xf0, 0x87, 0xd7, 0x08, 0xad, 0xf1, 0x37, 0x63, 0x21, 0xf7, 0x97, 0x4a, 0xe0, 0xaf, - 0x91, 0x5b, 0x0e, 0xf7, 0xdb, 0x3e, 0x40, 0x8f, 0x0b, 0xd3, 0x60, 0x94, 0xcf, 0x02, 0x54, 0x61, - 0xd1, 0x94, 0x2e, 0x1d, 0xfc, 0xdf, 0xf5, 0x0b, 0xa3, 0x2b, 0xff, 0x07, 0x00, 0x00, 0xff, 0xff, - 0xd5, 0x39, 0x32, 0x09, 0xf9, 0x09, 0x00, 0x00, + proto.RegisterExtension(E_Extm) +} + +func init() { proto.RegisterFile("test_objects.proto", fileDescriptor_test_objects_a4d3e593ea3c686f) } + +var fileDescriptor_test_objects_a4d3e593ea3c686f = []byte{ + // 1460 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x56, 0xdd, 0x72, 0xdb, 0x44, + 0x14, 0x8e, 0x24, 0xcb, 0xb6, 0x8e, 0xf3, 0xd7, 0x6d, 0xda, 0x2a, 0xa1, 0x14, 0x8d, 0x5b, 0x8a, + 0x69, 0x89, 0x3b, 0x38, 0x1e, 0x4f, 0x29, 0xdc, 0x34, 0x4d, 0x4a, 0x4b, 0xdb, 0xc0, 0x6c, 0x52, + 0x7a, 0xe9, 0x91, 0x23, 0x39, 0x55, 0x91, 0xb4, 0x66, 0x77, 0x9d, 0xd4, 0x03, 0xcc, 0xe4, 0x19, + 0x18, 0x9e, 0x80, 0x0b, 0x6e, 0xb9, 0xe3, 0x82, 0xb7, 0xe0, 0x8d, 0x98, 0x3d, 0xbb, 0xf2, 0x5f, + 0xe2, 0x81, 0x2b, 0x7b, 0xf7, 0xfb, 0xd9, 0xd5, 0x9e, 0x4f, 0x67, 0x05, 0x44, 0xc6, 0x42, 0x76, + 0x59, 0xef, 0x5d, 0x7c, 0x2c, 0x45, 0x73, 0xc0, 0x99, 0x64, 0xa4, 0xfc, 0x4e, 0xb0, 0x7c, 0xd0, + 0xdb, 0xda, 0x3c, 0x61, 0xec, 0x24, 0x8d, 0x1f, 0xe0, 0x6c, 0x6f, 0xd8, 0x7f, 0x10, 0xe6, 0x23, + 0x4d, 0xd9, 0xba, 0x35, 0x0f, 0x45, 0x43, 0x1e, 0xca, 0x84, 0xe5, 0x06, 0xbf, 0x39, 0x8f, 0x0b, + 0xc9, 0x87, 0xc7, 0xd2, 0xa0, 0x1f, 0xcd, 0xa3, 0x32, 0xc9, 0x62, 0x21, 0xc3, 0x6c, 0xb0, 0xc8, + 0xfe, 0x8c, 0x87, 0x83, 0x41, 0xcc, 0xcd, 0x0e, 0xeb, 0x7f, 0x96, 0xa0, 0x7c, 0x98, 0x64, 0x83, + 0x34, 0x26, 0xd7, 0xa0, 0xcc, 0xba, 0x3d, 0xc6, 0x52, 0xdf, 0x0a, 0xac, 0x46, 0x95, 0xba, 0x6c, + 0x97, 0xb1, 0x94, 0xdc, 0x80, 0x0a, 0xeb, 0x26, 0xb9, 0xdc, 0x69, 0xf9, 0x76, 0x60, 0x35, 0x5c, + 0x5a, 0x66, 0xcf, 0xd5, 0x88, 0xdc, 0x82, 0x9a, 0x01, 0xba, 0x42, 0x72, 0xdf, 0x41, 0xd0, 0xd3, + 0xe0, 0xa1, 0xe4, 0x63, 0x61, 0xa7, 0xed, 0x97, 0x02, 0xab, 0xe1, 0x68, 0x61, 0xa7, 0x3d, 0x16, + 0x76, 0xda, 0x28, 0x74, 0x11, 0xf4, 0x34, 0xa8, 0x84, 0x9b, 0x50, 0x65, 0xdd, 0xa1, 0x5e, 0xb2, + 0x1c, 0x58, 0x8d, 0x15, 0x5a, 0x61, 0xaf, 0x71, 0x48, 0x02, 0x58, 0x2e, 0x20, 0xd4, 0x56, 0x10, + 0x06, 0x03, 0xcf, 0x88, 0x3b, 0x6d, 0xbf, 0x1a, 0x58, 0x8d, 0x92, 0x11, 0x77, 0xda, 0x13, 0xb1, + 0x59, 0xd8, 0x43, 0x18, 0x0c, 0x3c, 0x16, 0x0b, 0xbd, 0x32, 0x04, 0x56, 0xe3, 0x0a, 0xad, 0xb0, + 0xc3, 0xa9, 0x95, 0xc5, 0x64, 0xe5, 0x1a, 0xc2, 0x60, 0xe0, 0x19, 0x71, 0xa7, 0xed, 0x2f, 0x07, + 0x56, 0x83, 0x18, 0x71, 0xb1, 0xb2, 0x98, 0xac, 0xbc, 0x82, 0x30, 0x18, 0x78, 0x7c, 0x58, 0xfd, + 0x94, 0x85, 0xd2, 0x5f, 0x0d, 0xac, 0x86, 0x4d, 0xcb, 0xec, 0xa9, 0x1a, 0xe9, 0xc3, 0x42, 0x00, + 0x95, 0x6b, 0x08, 0x7a, 0x1a, 0x1c, 0xaf, 0x1a, 0xb1, 0x61, 0x2f, 0x8d, 0xfd, 0xf5, 0xc0, 0x6a, + 0x58, 0xb4, 0xc2, 0xf6, 0x70, 0xa8, 0x57, 0xd5, 0x10, 0x6a, 0xaf, 0x20, 0x0c, 0x06, 0x9e, 0x6c, + 0x59, 0xf2, 0x24, 0x3f, 0xf1, 0x49, 0x60, 0x35, 0x3c, 0xb5, 0x65, 0x1c, 0xea, 0x0d, 0xf5, 0x46, + 0x32, 0x16, 0xfe, 0xd5, 0xc0, 0x6a, 0x2c, 0xd3, 0x32, 0xdb, 0x55, 0xa3, 0xfa, 0xaf, 0x16, 0xc0, + 0x01, 0xcb, 0x9f, 0x26, 0x79, 0x22, 0x63, 0x41, 0xae, 0x82, 0xdb, 0xef, 0xe6, 0x61, 0x8e, 0xa1, + 0xb1, 0x69, 0xa9, 0x7f, 0x10, 0xe6, 0x2a, 0x4a, 0xfd, 0xee, 0x20, 0xc9, 0xfb, 0x18, 0x19, 0x9b, + 0xba, 0xfd, 0xef, 0x92, 0xbc, 0xaf, 0xa7, 0x73, 0x35, 0xed, 0x98, 0xe9, 0x03, 0x35, 0x7d, 0x15, + 0xdc, 0x08, 0x2d, 0x4a, 0xb8, 0xc1, 0x52, 0x64, 0x2c, 0x22, 0x6d, 0xe1, 0xe2, 0xac, 0x1b, 0x15, + 0x16, 0x91, 0xb6, 0x28, 0x9b, 0x69, 0x65, 0x51, 0xff, 0xc3, 0x86, 0x0a, 0x8d, 0x07, 0x71, 0x28, + 0x85, 0xa2, 0xf0, 0x22, 0xc7, 0x8e, 0xca, 0x31, 0x2f, 0x72, 0xcc, 0xc7, 0x39, 0x76, 0x54, 0x8e, + 0xb9, 0xce, 0x71, 0x01, 0x74, 0xda, 0xbe, 0x13, 0x38, 0x2a, 0xa7, 0x5c, 0xe7, 0x74, 0x13, 0xaa, + 0xbc, 0xc8, 0x61, 0x29, 0x70, 0x54, 0x0e, 0xb9, 0xc9, 0xe1, 0x18, 0xea, 0xb4, 0x7d, 0x37, 0x70, + 0x54, 0xca, 0xb8, 0x49, 0x19, 0x42, 0xa2, 0x48, 0xaf, 0xa3, 0x32, 0xc4, 0x0f, 0xa7, 0x54, 0x26, + 0x21, 0x95, 0xc0, 0x51, 0x09, 0xe1, 0x26, 0x21, 0xb8, 0x09, 0x5d, 0xff, 0x6a, 0xe0, 0xa8, 0xfa, + 0x73, 0x5d, 0x7f, 0xd4, 0x98, 0xfa, 0x7a, 0x81, 0xa3, 0xea, 0xcb, 0x4d, 0x7d, 0xb5, 0x9d, 0xae, + 0x1e, 0x04, 0x8e, 0xaa, 0x1e, 0x9f, 0x54, 0x8f, 0x9b, 0xea, 0xd5, 0x02, 0x47, 0x55, 0x8f, 0xeb, + 0xea, 0xfd, 0x65, 0x43, 0xf9, 0x4d, 0x12, 0x9d, 0xc4, 0x92, 0xdc, 0x03, 0xf7, 0x98, 0xa5, 0x8c, + 0x63, 0xe5, 0x56, 0x5b, 0x1b, 0x4d, 0xdd, 0xac, 0x9a, 0x1a, 0x6e, 0x3e, 0x51, 0x18, 0xd5, 0x14, + 0xb2, 0xad, 0xfc, 0x34, 0x5b, 0x1d, 0xde, 0x22, 0x76, 0x99, 0xe3, 0x2f, 0xb9, 0x0b, 0x65, 0x81, + 0x4d, 0x05, 0xdf, 0xa2, 0x5a, 0x6b, 0xb5, 0x60, 0xeb, 0x56, 0x43, 0x0d, 0x4a, 0x3e, 0xd5, 0x07, + 0x82, 0x4c, 0xb5, 0xcf, 0x8b, 0x4c, 0x75, 0x40, 0x86, 0x5a, 0xe1, 0xba, 0xc0, 0xfe, 0x06, 0x7a, + 0xae, 0x15, 0x4c, 0x53, 0x77, 0x5a, 0xe0, 0xe4, 0x33, 0xf0, 0x78, 0xb7, 0x20, 0x5f, 0x43, 0xdb, + 0x0b, 0xe4, 0x2a, 0x37, 0xff, 0xea, 0x1f, 0x83, 0xab, 0x37, 0x5d, 0x01, 0x87, 0xee, 0xef, 0xad, + 0x2f, 0x11, 0x0f, 0xdc, 0xaf, 0xe9, 0xfe, 0xfe, 0xc1, 0xba, 0x45, 0xaa, 0x50, 0xda, 0x7d, 0xf9, + 0x7a, 0x7f, 0xdd, 0xae, 0xff, 0x66, 0x43, 0xe9, 0x55, 0x38, 0x10, 0xe4, 0x4b, 0xa8, 0x65, 0x53, + 0xdd, 0xcb, 0x42, 0xff, 0x0f, 0x0a, 0x7f, 0x45, 0x69, 0xbe, 0x2a, 0x5a, 0xd9, 0x7e, 0x2e, 0xf9, + 0x88, 0x7a, 0xd9, 0xb8, 0xb5, 0x3d, 0x86, 0x95, 0x0c, 0xb3, 0x59, 0x3c, 0xb5, 0x8d, 0xf2, 0x0f, + 0x67, 0xe5, 0x2a, 0xaf, 0xfa, 0xb1, 0xb5, 0x41, 0x2d, 0x9b, 0xcc, 0x6c, 0x7d, 0x05, 0xab, 0xb3, + 0xfe, 0x64, 0x1d, 0x9c, 0x1f, 0xe2, 0x11, 0x96, 0xd1, 0xa1, 0xea, 0x2f, 0xd9, 0x00, 0xf7, 0x34, + 0x4c, 0x87, 0x31, 0xbe, 0x7e, 0x1e, 0xd5, 0x83, 0x47, 0xf6, 0x43, 0x6b, 0xeb, 0x00, 0xd6, 0xe7, + 0xed, 0xa7, 0xf5, 0x55, 0xad, 0xbf, 0x33, 0xad, 0xbf, 0x58, 0x94, 0x89, 0x5f, 0xfd, 0x1f, 0x0b, + 0x96, 0x5f, 0x89, 0x93, 0x37, 0x89, 0x7c, 0xfb, 0x6d, 0x1e, 0xb3, 0x3e, 0xb9, 0x0e, 0xae, 0x4c, + 0x64, 0x1a, 0xa3, 0x9d, 0xf7, 0x6c, 0x89, 0xea, 0x21, 0xf1, 0xa1, 0x2c, 0xc2, 0x34, 0xe4, 0x23, + 0xf4, 0x74, 0x9e, 0x2d, 0x51, 0x33, 0x26, 0x5b, 0x50, 0x79, 0xc2, 0x86, 0x6a, 0x27, 0xd8, 0x16, + 0x94, 0xa6, 0x98, 0x20, 0xb7, 0x61, 0xf9, 0x2d, 0xcb, 0xe2, 0x6e, 0x18, 0x45, 0x3c, 0x16, 0x02, + 0x3b, 0x84, 0x22, 0xd4, 0xd4, 0xec, 0x63, 0x3d, 0x49, 0xf6, 0xe1, 0x4a, 0x26, 0x4e, 0xba, 0x67, + 0x89, 0x7c, 0xdb, 0xe5, 0xf1, 0x8f, 0xc3, 0x84, 0xc7, 0x11, 0x76, 0x8d, 0x5a, 0xeb, 0xc6, 0xf8, + 0x60, 0xf5, 0x1e, 0xa9, 0x81, 0x9f, 0x2d, 0xd1, 0xb5, 0x6c, 0x76, 0x6a, 0xb7, 0x02, 0xee, 0x30, + 0x4f, 0x58, 0x5e, 0xbf, 0x0b, 0x25, 0x1a, 0x87, 0xe9, 0xe4, 0x14, 0x2d, 0xdd, 0x6a, 0x70, 0x70, + 0xaf, 0x5a, 0x8d, 0xd6, 0xcf, 0xcf, 0xcf, 0xcf, 0xed, 0xfa, 0x99, 0xda, 0xb8, 0x3a, 0x90, 0xf7, + 0xe4, 0x26, 0x78, 0x49, 0x16, 0x9e, 0x24, 0xb9, 0x7a, 0x40, 0x4d, 0x9f, 0x4c, 0x4c, 0x24, 0xad, + 0x3d, 0x58, 0xe5, 0x71, 0x98, 0x76, 0xe3, 0xf7, 0x32, 0xce, 0x45, 0xc2, 0x72, 0xb2, 0x3c, 0x49, + 0x66, 0x98, 0xfa, 0x3f, 0xcd, 0x46, 0xdb, 0xd8, 0xd3, 0x15, 0x25, 0xda, 0x2f, 0x34, 0xf5, 0xbf, + 0x5d, 0x80, 0x17, 0x39, 0x3b, 0xcb, 0x8f, 0x46, 0x83, 0x58, 0x90, 0x3b, 0x60, 0x87, 0x39, 0x5e, + 0x1b, 0xb5, 0xd6, 0x46, 0x53, 0x5f, 0xf8, 0xcd, 0xe2, 0xc2, 0x6f, 0x3e, 0xce, 0x47, 0xd4, 0x0e, + 0x73, 0x72, 0x1f, 0x9c, 0x68, 0xa8, 0x5f, 0xf6, 0x5a, 0x6b, 0xf3, 0x02, 0x6d, 0xcf, 0x7c, 0x76, + 0x50, 0xc5, 0x22, 0x9f, 0x80, 0x2d, 0x24, 0xde, 0x62, 0xea, 0x0c, 0xe7, 0xb9, 0x87, 0xf8, 0x09, + 0x42, 0x6d, 0xa1, 0x9a, 0x88, 0x2d, 0x85, 0x89, 0xc9, 0xd6, 0x05, 0xe2, 0x51, 0xf1, 0x35, 0x42, + 0x6d, 0x29, 0x14, 0x37, 0x3d, 0xc5, 0x1b, 0xec, 0x32, 0xee, 0xcb, 0x44, 0xc8, 0xef, 0xd5, 0x09, + 0x53, 0x3b, 0x3d, 0x25, 0x0d, 0x70, 0x4e, 0xc3, 0x14, 0x6f, 0xb4, 0x5a, 0xeb, 0xfa, 0x05, 0xb2, + 0x26, 0x2a, 0x0a, 0x69, 0x82, 0x13, 0xf5, 0x52, 0x8c, 0x4e, 0xad, 0x75, 0xf3, 0xe2, 0x73, 0x61, + 0xaf, 0x34, 0xfc, 0xa8, 0x97, 0x92, 0x6d, 0x70, 0xfa, 0xa9, 0xc4, 0x24, 0xa9, 0xf7, 0x76, 0x9e, + 0x8f, 0x5d, 0xd7, 0xd0, 0xfb, 0xa9, 0x54, 0xf4, 0x04, 0x9b, 0xfc, 0xe5, 0x74, 0x7c, 0x13, 0x0d, + 0x3d, 0xe9, 0xb4, 0xd5, 0x6e, 0x86, 0x9d, 0x36, 0x5e, 0x4e, 0x97, 0xed, 0xe6, 0xf5, 0x34, 0x7f, + 0xd8, 0x69, 0xa3, 0xfd, 0x4e, 0x0b, 0xbf, 0x63, 0x16, 0xd8, 0xef, 0xb4, 0x0a, 0xfb, 0x9d, 0x16, + 0xda, 0xef, 0xb4, 0xf0, 0xc3, 0x66, 0x91, 0xfd, 0x98, 0x3f, 0x44, 0x7e, 0x09, 0x6f, 0x42, 0x6f, + 0xc1, 0xa1, 0xab, 0x56, 0xa0, 0xe9, 0xc8, 0x53, 0xfe, 0xaa, 0xa9, 0xc1, 0x02, 0x7f, 0x7d, 0xbb, + 0x18, 0x7f, 0x21, 0x39, 0xf9, 0x1c, 0xdc, 0xe2, 0x96, 0xb9, 0xfc, 0x01, 0xf0, 0xd6, 0xd1, 0x02, + 0xcd, 0xac, 0xdf, 0x86, 0xb5, 0xb9, 0x97, 0x51, 0x35, 0x20, 0xdd, 0x4a, 0xed, 0x86, 0x87, 0xbe, + 0xf5, 0xdf, 0x6d, 0xb8, 0x61, 0x58, 0xcf, 0xf3, 0x28, 0xe1, 0xf1, 0xb1, 0x1c, 0xb3, 0xef, 0x43, + 0x49, 0x0c, 0x7b, 0x99, 0x49, 0xf2, 0xa2, 0x37, 0x9c, 0x22, 0x89, 0x7c, 0x03, 0x5e, 0x16, 0x0e, + 0xba, 0xfd, 0x24, 0x4e, 0x23, 0xd3, 0x6c, 0xb7, 0xe7, 0x14, 0xf3, 0x0b, 0xa8, 0x26, 0xfc, 0x54, + 0xf1, 0x75, 0xf3, 0xad, 0x66, 0x66, 0x48, 0x1e, 0x42, 0x4d, 0xa4, 0xc9, 0x71, 0x6c, 0xdc, 0x1c, + 0x74, 0x5b, 0xb8, 0x3e, 0x20, 0x17, 0x95, 0x5b, 0x47, 0xb0, 0x32, 0x63, 0x3a, 0xdd, 0x72, 0x3d, + 0xdd, 0x72, 0xb7, 0x67, 0x5b, 0xee, 0x42, 0xdb, 0xa9, 0xde, 0x7b, 0x0f, 0x36, 0xe6, 0x50, 0x3c, + 0x6d, 0x42, 0xa0, 0xd4, 0x1b, 0x49, 0x81, 0xe7, 0xb9, 0x4c, 0xf1, 0x7f, 0x7d, 0x0f, 0xc8, 0x1c, + 0xf7, 0xcd, 0x8b, 0xa3, 0xa2, 0xdc, 0x8a, 0xf8, 0x7f, 0xca, 0xfd, 0x28, 0x80, 0x52, 0x1e, 0x66, + 0xf1, 0x5c, 0xd3, 0xfa, 0x19, 0x9f, 0x02, 0x91, 0x47, 0x5f, 0x40, 0x29, 0x7e, 0x2f, 0xb3, 0x39, + 0xc6, 0x2f, 0xff, 0x51, 0x2a, 0x25, 0xf9, 0x37, 0x00, 0x00, 0xff, 0xff, 0xe9, 0xd4, 0xfd, 0x2f, + 0x41, 0x0d, 0x00, 0x00, } diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/test_objects.proto b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/test_objects.proto index 0d2fc1f..e01386e 100644 --- a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/test_objects.proto +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/test_objects.proto @@ -43,15 +43,23 @@ package jsonpb; message Simple { optional bool o_bool = 1; optional int32 o_int32 = 2; - optional int64 o_int64 = 3; - optional uint32 o_uint32 = 4; - optional uint64 o_uint64 = 5; - optional sint32 o_sint32 = 6; - optional sint64 o_sint64 = 7; - optional float o_float = 8; - optional double o_double = 9; - optional string o_string = 10; - optional bytes o_bytes = 11; + optional int32 o_int32_str = 3; + optional int64 o_int64 = 4; + optional int64 o_int64_str = 5; + optional uint32 o_uint32 = 6; + optional uint32 o_uint32_str = 7; + optional uint64 o_uint64 = 8; + optional uint64 o_uint64_str = 9; + optional sint32 o_sint32 = 10; + optional sint32 o_sint32_str = 11; + optional sint64 o_sint64 = 12; + optional sint64 o_sint64_str = 13; + optional float o_float = 14; + optional float o_float_str = 15; + optional double o_double = 16; + optional double o_double_str = 17; + optional string o_string = 18; + optional bytes o_bytes = 19; } // Test message for holding special non-finites primitives. @@ -107,6 +115,7 @@ message MsgWithOneof { int64 salary = 2; string Country = 3; string home_address = 4; + MsgWithRequired msg_with_required = 5; } } @@ -145,3 +154,26 @@ message KnownTypes { optional google.protobuf.StringValue str = 10; optional google.protobuf.BytesValue bytes = 11; } + +// Test messages for marshaling/unmarshaling required fields. +message MsgWithRequired { + required string str = 1; +} + +message MsgWithIndirectRequired { + optional MsgWithRequired subm = 1; + map map_field = 2; + repeated MsgWithRequired slice_field = 3; +} + +message MsgWithRequiredBytes { + required bytes byts = 1; +} + +message MsgWithRequiredWKT { + required google.protobuf.StringValue str = 1; +} + +extend Real { + optional MsgWithRequired extm = 125; +} diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/proto/Makefile b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/proto/Makefile deleted file mode 100644 index e2e0651..0000000 --- a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/proto/Makefile +++ /dev/null @@ -1,43 +0,0 @@ -# Go support for Protocol Buffers - Google's data interchange format -# -# Copyright 2010 The Go Authors. All rights reserved. -# https://github.com/golang/protobuf -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -install: - go install - -test: install generate-test-pbs - go test - - -generate-test-pbs: - make install - make -C testdata - protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata,Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any:. proto3_proto/proto3.proto - make diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/proto/clone.go b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/proto/clone.go index e392575..3cd3249 100644 --- a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/proto/clone.go +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/proto/clone.go @@ -35,22 +35,39 @@ package proto import ( + "fmt" "log" "reflect" "strings" ) // Clone returns a deep copy of a protocol buffer. -func Clone(pb Message) Message { - in := reflect.ValueOf(pb) +func Clone(src Message) Message { + in := reflect.ValueOf(src) if in.IsNil() { - return pb + return src } - out := reflect.New(in.Type().Elem()) - // out is empty so a merge is a deep copy. - mergeStruct(out.Elem(), in.Elem()) - return out.Interface().(Message) + dst := out.Interface().(Message) + Merge(dst, src) + return dst +} + +// Merger is the interface representing objects that can merge messages of the same type. +type Merger interface { + // Merge merges src into this message. + // Required and optional fields that are set in src will be set to that value in dst. + // Elements of repeated fields will be appended. + // + // Merge may panic if called with a different argument type than the receiver. + Merge(src Message) +} + +// generatedMerger is the custom merge method that generated protos will have. +// We must add this method since a generate Merge method will conflict with +// many existing protos that have a Merge data field already defined. +type generatedMerger interface { + XXX_Merge(src Message) } // Merge merges src into dst. @@ -58,17 +75,24 @@ func Clone(pb Message) Message { // Elements of repeated fields will be appended. // Merge panics if src and dst are not the same type, or if dst is nil. func Merge(dst, src Message) { + if m, ok := dst.(Merger); ok { + m.Merge(src) + return + } + in := reflect.ValueOf(src) out := reflect.ValueOf(dst) if out.IsNil() { panic("proto: nil destination") } if in.Type() != out.Type() { - // Explicit test prior to mergeStruct so that mistyped nils will fail - panic("proto: type mismatch") + panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src)) } if in.IsNil() { - // Merging nil into non-nil is a quiet no-op + return // Merge from nil src is a noop + } + if m, ok := dst.(generatedMerger); ok { + m.XXX_Merge(src) return } mergeStruct(out.Elem(), in.Elem()) @@ -84,7 +108,7 @@ func mergeStruct(out, in reflect.Value) { mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) } - if emIn, ok := extendable(in.Addr().Interface()); ok { + if emIn, err := extendable(in.Addr().Interface()); err == nil { emOut, _ := extendable(out.Addr().Interface()) mIn, muIn := emIn.extensionsRead() if mIn != nil { diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/proto/decode.go b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/proto/decode.go index aa20729..d9aa3c4 100644 --- a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/proto/decode.go +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/proto/decode.go @@ -39,8 +39,6 @@ import ( "errors" "fmt" "io" - "os" - "reflect" ) // errOverflow is returned when an integer is too large to be represented. @@ -50,10 +48,6 @@ var errOverflow = errors.New("proto: integer overflow") // wire type is encountered. It does not get returned to user code. var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") -// The fundamental decoders that interpret bytes on the wire. -// Those that take integer types all return uint64 and are -// therefore of type valueDecoder. - // DecodeVarint reads a varint-encoded integer from the slice. // It returns the integer and the number of bytes consumed, or // zero if there is not enough. @@ -267,9 +261,6 @@ func (p *Buffer) DecodeZigzag32() (x uint64, err error) { return } -// These are not ValueDecoders: they produce an array of bytes or a string. -// bytes, embedded messages - // DecodeRawBytes reads a count-delimited byte buffer from the Buffer. // This is the format used for the bytes protocol buffer // type and for embedded messages. @@ -311,81 +302,29 @@ func (p *Buffer) DecodeStringBytes() (s string, err error) { return string(buf), nil } -// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. -// If the protocol buffer has extensions, and the field matches, add it as an extension. -// Otherwise, if the XXX_unrecognized field exists, append the skipped data there. -func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error { - oi := o.index - - err := o.skip(t, tag, wire) - if err != nil { - return err - } - - if !unrecField.IsValid() { - return nil - } - - ptr := structPointer_Bytes(base, unrecField) - - // Add the skipped field to struct field - obuf := o.buf - - o.buf = *ptr - o.EncodeVarint(uint64(tag<<3 | wire)) - *ptr = append(o.buf, obuf[oi:o.index]...) - - o.buf = obuf - - return nil -} - -// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. -func (o *Buffer) skip(t reflect.Type, tag, wire int) error { - - var u uint64 - var err error - - switch wire { - case WireVarint: - _, err = o.DecodeVarint() - case WireFixed64: - _, err = o.DecodeFixed64() - case WireBytes: - _, err = o.DecodeRawBytes(false) - case WireFixed32: - _, err = o.DecodeFixed32() - case WireStartGroup: - for { - u, err = o.DecodeVarint() - if err != nil { - break - } - fwire := int(u & 0x7) - if fwire == WireEndGroup { - break - } - ftag := int(u >> 3) - err = o.skip(t, ftag, fwire) - if err != nil { - break - } - } - default: - err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t) - } - return err -} - // Unmarshaler is the interface representing objects that can -// unmarshal themselves. The method should reset the receiver before -// decoding starts. The argument points to data that may be +// unmarshal themselves. The argument points to data that may be // overwritten, so implementations should not keep references to the // buffer. +// Unmarshal implementations should not clear the receiver. +// Any unmarshaled data should be merged into the receiver. +// Callers of Unmarshal that do not want to retain existing data +// should Reset the receiver before calling Unmarshal. type Unmarshaler interface { Unmarshal([]byte) error } +// newUnmarshaler is the interface representing objects that can +// unmarshal themselves. The semantics are identical to Unmarshaler. +// +// This exists to support protoc-gen-go generated messages. +// The proto package will stop type-asserting to this interface in the future. +// +// DO NOT DEPEND ON THIS. +type newUnmarshaler interface { + XXX_Unmarshal([]byte) error +} + // Unmarshal parses the protocol buffer representation in buf and places the // decoded result in pb. If the struct underlying pb does not match // the data in buf, the results can be unpredictable. @@ -395,7 +334,13 @@ type Unmarshaler interface { // to preserve and append to existing data. func Unmarshal(buf []byte, pb Message) error { pb.Reset() - return UnmarshalMerge(buf, pb) + if u, ok := pb.(newUnmarshaler); ok { + return u.XXX_Unmarshal(buf) + } + if u, ok := pb.(Unmarshaler); ok { + return u.Unmarshal(buf) + } + return NewBuffer(buf).Unmarshal(pb) } // UnmarshalMerge parses the protocol buffer representation in buf and @@ -405,8 +350,16 @@ func Unmarshal(buf []byte, pb Message) error { // UnmarshalMerge merges into existing data in pb. // Most code should use Unmarshal instead. func UnmarshalMerge(buf []byte, pb Message) error { - // If the object can unmarshal itself, let it. + if u, ok := pb.(newUnmarshaler); ok { + return u.XXX_Unmarshal(buf) + } if u, ok := pb.(Unmarshaler); ok { + // NOTE: The history of proto have unfortunately been inconsistent + // whether Unmarshaler should or should not implicitly clear itself. + // Some implementations do, most do not. + // Thus, calling this here may or may not do what people want. + // + // See https://github.com/golang/protobuf/issues/424 return u.Unmarshal(buf) } return NewBuffer(buf).Unmarshal(pb) @@ -422,12 +375,17 @@ func (p *Buffer) DecodeMessage(pb Message) error { } // DecodeGroup reads a tag-delimited group from the Buffer. +// StartGroup tag is already consumed. This function consumes +// EndGroup tag. func (p *Buffer) DecodeGroup(pb Message) error { - typ, base, err := getbase(pb) - if err != nil { - return err + b := p.buf[p.index:] + x, y := findEndGroup(b) + if x < 0 { + return io.ErrUnexpectedEOF } - return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base) + err := Unmarshal(b[:x], pb) + p.index += y + return err } // Unmarshal parses the protocol buffer representation in the @@ -438,533 +396,33 @@ func (p *Buffer) DecodeGroup(pb Message) error { // Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal. func (p *Buffer) Unmarshal(pb Message) error { // If the object can unmarshal itself, let it. - if u, ok := pb.(Unmarshaler); ok { - err := u.Unmarshal(p.buf[p.index:]) + if u, ok := pb.(newUnmarshaler); ok { + err := u.XXX_Unmarshal(p.buf[p.index:]) p.index = len(p.buf) return err } - - typ, base, err := getbase(pb) - if err != nil { - return err - } - - err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base) - - if collectStats { - stats.Decode++ - } - - return err -} - -// unmarshalType does the work of unmarshaling a structure. -func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error { - var state errorState - required, reqFields := prop.reqCount, uint64(0) - - var err error - for err == nil && o.index < len(o.buf) { - oi := o.index - var u uint64 - u, err = o.DecodeVarint() - if err != nil { - break - } - wire := int(u & 0x7) - if wire == WireEndGroup { - if is_group { - if required > 0 { - // Not enough information to determine the exact field. - // (See below.) - return &RequiredNotSetError{"{Unknown}"} - } - return nil // input is satisfied - } - return fmt.Errorf("proto: %s: wiretype end group for non-group", st) - } - tag := int(u >> 3) - if tag <= 0 { - return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire) - } - fieldnum, ok := prop.decoderTags.get(tag) - if !ok { - // Maybe it's an extension? - if prop.extendable { - if e, _ := extendable(structPointer_Interface(base, st)); isExtensionField(e, int32(tag)) { - if err = o.skip(st, tag, wire); err == nil { - extmap := e.extensionsWrite() - ext := extmap[int32(tag)] // may be missing - ext.enc = append(ext.enc, o.buf[oi:o.index]...) - extmap[int32(tag)] = ext - } - continue - } - } - // Maybe it's a oneof? - if prop.oneofUnmarshaler != nil { - m := structPointer_Interface(base, st).(Message) - // First return value indicates whether tag is a oneof field. - ok, err = prop.oneofUnmarshaler(m, tag, wire, o) - if err == ErrInternalBadWireType { - // Map the error to something more descriptive. - // Do the formatting here to save generated code space. - err = fmt.Errorf("bad wiretype for oneof field in %T", m) - } - if ok { - continue - } - } - err = o.skipAndSave(st, tag, wire, base, prop.unrecField) - continue - } - p := prop.Prop[fieldnum] - - if p.dec == nil { - fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name) - continue - } - dec := p.dec - if wire != WireStartGroup && wire != p.WireType { - if wire == WireBytes && p.packedDec != nil { - // a packable field - dec = p.packedDec - } else { - err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType) - continue - } - } - decErr := dec(o, p, base) - if decErr != nil && !state.shouldContinue(decErr, p) { - err = decErr - } - if err == nil && p.Required { - // Successfully decoded a required field. - if tag <= 64 { - // use bitmap for fields 1-64 to catch field reuse. - var mask uint64 = 1 << uint64(tag-1) - if reqFields&mask == 0 { - // new required field - reqFields |= mask - required-- - } - } else { - // This is imprecise. It can be fooled by a required field - // with a tag > 64 that is encoded twice; that's very rare. - // A fully correct implementation would require allocating - // a data structure, which we would like to avoid. - required-- - } - } - } - if err == nil { - if is_group { - return io.ErrUnexpectedEOF - } - if state.err != nil { - return state.err - } - if required > 0 { - // Not enough information to determine the exact field. If we use extra - // CPU, we could determine the field only if the missing required field - // has a tag <= 64 and we check reqFields. - return &RequiredNotSetError{"{Unknown}"} - } - } - return err -} - -// Individual type decoders -// For each, -// u is the decoded value, -// v is a pointer to the field (pointer) in the struct - -// Sizes of the pools to allocate inside the Buffer. -// The goal is modest amortization and allocation -// on at least 16-byte boundaries. -const ( - boolPoolSize = 16 - uint32PoolSize = 8 - uint64PoolSize = 4 -) - -// Decode a bool. -func (o *Buffer) dec_bool(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - if len(o.bools) == 0 { - o.bools = make([]bool, boolPoolSize) - } - o.bools[0] = u != 0 - *structPointer_Bool(base, p.field) = &o.bools[0] - o.bools = o.bools[1:] - return nil -} - -func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - *structPointer_BoolVal(base, p.field) = u != 0 - return nil -} - -// Decode an int32. -func (o *Buffer) dec_int32(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word32_Set(structPointer_Word32(base, p.field), o, uint32(u)) - return nil -} - -func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u)) - return nil -} - -// Decode an int64. -func (o *Buffer) dec_int64(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word64_Set(structPointer_Word64(base, p.field), o, u) - return nil -} - -func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word64Val_Set(structPointer_Word64Val(base, p.field), o, u) - return nil -} - -// Decode a string. -func (o *Buffer) dec_string(p *Properties, base structPointer) error { - s, err := o.DecodeStringBytes() - if err != nil { - return err - } - *structPointer_String(base, p.field) = &s - return nil -} - -func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error { - s, err := o.DecodeStringBytes() - if err != nil { - return err - } - *structPointer_StringVal(base, p.field) = s - return nil -} - -// Decode a slice of bytes ([]byte). -func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error { - b, err := o.DecodeRawBytes(true) - if err != nil { - return err - } - *structPointer_Bytes(base, p.field) = b - return nil -} - -// Decode a slice of bools ([]bool). -func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - v := structPointer_BoolSlice(base, p.field) - *v = append(*v, u != 0) - return nil -} - -// Decode a slice of bools ([]bool) in packed format. -func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error { - v := structPointer_BoolSlice(base, p.field) - - nn, err := o.DecodeVarint() - if err != nil { - return err - } - nb := int(nn) // number of bytes of encoded bools - fin := o.index + nb - if fin < o.index { - return errOverflow - } - - y := *v - for o.index < fin { - u, err := p.valDec(o) - if err != nil { - return err - } - y = append(y, u != 0) - } - - *v = y - return nil -} - -// Decode a slice of int32s ([]int32). -func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - structPointer_Word32Slice(base, p.field).Append(uint32(u)) - return nil -} - -// Decode a slice of int32s ([]int32) in packed format. -func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error { - v := structPointer_Word32Slice(base, p.field) - - nn, err := o.DecodeVarint() - if err != nil { - return err - } - nb := int(nn) // number of bytes of encoded int32s - - fin := o.index + nb - if fin < o.index { - return errOverflow - } - for o.index < fin { - u, err := p.valDec(o) - if err != nil { - return err - } - v.Append(uint32(u)) - } - return nil -} - -// Decode a slice of int64s ([]int64). -func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - - structPointer_Word64Slice(base, p.field).Append(u) - return nil -} - -// Decode a slice of int64s ([]int64) in packed format. -func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error { - v := structPointer_Word64Slice(base, p.field) - - nn, err := o.DecodeVarint() - if err != nil { - return err - } - nb := int(nn) // number of bytes of encoded int64s - - fin := o.index + nb - if fin < o.index { - return errOverflow - } - for o.index < fin { - u, err := p.valDec(o) - if err != nil { - return err - } - v.Append(u) - } - return nil -} - -// Decode a slice of strings ([]string). -func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error { - s, err := o.DecodeStringBytes() - if err != nil { - return err - } - v := structPointer_StringSlice(base, p.field) - *v = append(*v, s) - return nil -} - -// Decode a slice of slice of bytes ([][]byte). -func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error { - b, err := o.DecodeRawBytes(true) - if err != nil { - return err - } - v := structPointer_BytesSlice(base, p.field) - *v = append(*v, b) - return nil -} - -// Decode a map field. -func (o *Buffer) dec_new_map(p *Properties, base structPointer) error { - raw, err := o.DecodeRawBytes(false) - if err != nil { - return err - } - oi := o.index // index at the end of this map entry - o.index -= len(raw) // move buffer back to start of map entry - - mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V - if mptr.Elem().IsNil() { - mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem())) - } - v := mptr.Elem() // map[K]V - - // Prepare addressable doubly-indirect placeholders for the key and value types. - // See enc_new_map for why. - keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K - keybase := toStructPointer(keyptr.Addr()) // **K - - var valbase structPointer - var valptr reflect.Value - switch p.mtype.Elem().Kind() { - case reflect.Slice: - // []byte - var dummy []byte - valptr = reflect.ValueOf(&dummy) // *[]byte - valbase = toStructPointer(valptr) // *[]byte - case reflect.Ptr: - // message; valptr is **Msg; need to allocate the intermediate pointer - valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V - valptr.Set(reflect.New(valptr.Type().Elem())) - valbase = toStructPointer(valptr) - default: - // everything else - valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V - valbase = toStructPointer(valptr.Addr()) // **V - } - - // Decode. - // This parses a restricted wire format, namely the encoding of a message - // with two fields. See enc_new_map for the format. - for o.index < oi { - // tagcode for key and value properties are always a single byte - // because they have tags 1 and 2. - tagcode := o.buf[o.index] - o.index++ - switch tagcode { - case p.mkeyprop.tagcode[0]: - if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil { - return err - } - case p.mvalprop.tagcode[0]: - if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil { - return err - } - default: - // TODO: Should we silently skip this instead? - return fmt.Errorf("proto: bad map data tag %d", raw[0]) - } - } - keyelem, valelem := keyptr.Elem(), valptr.Elem() - if !keyelem.IsValid() { - keyelem = reflect.Zero(p.mtype.Key()) - } - if !valelem.IsValid() { - valelem = reflect.Zero(p.mtype.Elem()) - } - - v.SetMapIndex(keyelem, valelem) - return nil -} - -// Decode a group. -func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error { - bas := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(bas) { - // allocate new nested message - bas = toStructPointer(reflect.New(p.stype)) - structPointer_SetStructPointer(base, p.field, bas) - } - return o.unmarshalType(p.stype, p.sprop, true, bas) -} - -// Decode an embedded message. -func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) { - raw, e := o.DecodeRawBytes(false) - if e != nil { - return e - } - - bas := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(bas) { - // allocate new nested message - bas = toStructPointer(reflect.New(p.stype)) - structPointer_SetStructPointer(base, p.field, bas) - } - - // If the object can unmarshal itself, let it. - if p.isUnmarshaler { - iv := structPointer_Interface(bas, p.stype) - return iv.(Unmarshaler).Unmarshal(raw) - } - - obuf := o.buf - oi := o.index - o.buf = raw - o.index = 0 - - err = o.unmarshalType(p.stype, p.sprop, false, bas) - o.buf = obuf - o.index = oi - - return err -} - -// Decode a slice of embedded messages. -func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error { - return o.dec_slice_struct(p, false, base) -} - -// Decode a slice of embedded groups. -func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error { - return o.dec_slice_struct(p, true, base) -} - -// Decode a slice of structs ([]*struct). -func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error { - v := reflect.New(p.stype) - bas := toStructPointer(v) - structPointer_StructPointerSlice(base, p.field).Append(bas) - - if is_group { - err := o.unmarshalType(p.stype, p.sprop, is_group, bas) - return err - } - - raw, err := o.DecodeRawBytes(false) - if err != nil { + if u, ok := pb.(Unmarshaler); ok { + // NOTE: The history of proto have unfortunately been inconsistent + // whether Unmarshaler should or should not implicitly clear itself. + // Some implementations do, most do not. + // Thus, calling this here may or may not do what people want. + // + // See https://github.com/golang/protobuf/issues/424 + err := u.Unmarshal(p.buf[p.index:]) + p.index = len(p.buf) return err } - // If the object can unmarshal itself, let it. - if p.isUnmarshaler { - iv := v.Interface() - return iv.(Unmarshaler).Unmarshal(raw) - } - - obuf := o.buf - oi := o.index - o.buf = raw - o.index = 0 - - err = o.unmarshalType(p.stype, p.sprop, is_group, bas) - - o.buf = obuf - o.index = oi - + // Slow workaround for messages that aren't Unmarshalers. + // This includes some hand-coded .pb.go files and + // bootstrap protos. + // TODO: fix all of those and then add Unmarshal to + // the Message interface. Then: + // The cast above and code below can be deleted. + // The old unmarshaler can be deleted. + // Clients can call Unmarshal directly (can already do that, actually). + var info InternalMessageInfo + err := info.Unmarshal(pb, p.buf[p.index:]) + p.index = len(p.buf) return err } diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/proto/discard.go b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/proto/discard.go index bd0e3bb..dea2617 100644 --- a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/proto/discard.go +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/proto/discard.go @@ -35,8 +35,14 @@ import ( "fmt" "reflect" "strings" + "sync" + "sync/atomic" ) +type generatedDiscarder interface { + XXX_DiscardUnknown() +} + // DiscardUnknown recursively discards all unknown fields from this message // and all embedded messages. // @@ -49,9 +55,202 @@ import ( // For proto2 messages, the unknown fields of message extensions are only // discarded from messages that have been accessed via GetExtension. func DiscardUnknown(m Message) { + if m, ok := m.(generatedDiscarder); ok { + m.XXX_DiscardUnknown() + return + } + // TODO: Dynamically populate a InternalMessageInfo for legacy messages, + // but the master branch has no implementation for InternalMessageInfo, + // so it would be more work to replicate that approach. discardLegacy(m) } +// DiscardUnknown recursively discards all unknown fields. +func (a *InternalMessageInfo) DiscardUnknown(m Message) { + di := atomicLoadDiscardInfo(&a.discard) + if di == nil { + di = getDiscardInfo(reflect.TypeOf(m).Elem()) + atomicStoreDiscardInfo(&a.discard, di) + } + di.discard(toPointer(&m)) +} + +type discardInfo struct { + typ reflect.Type + + initialized int32 // 0: only typ is valid, 1: everything is valid + lock sync.Mutex + + fields []discardFieldInfo + unrecognized field +} + +type discardFieldInfo struct { + field field // Offset of field, guaranteed to be valid + discard func(src pointer) +} + +var ( + discardInfoMap = map[reflect.Type]*discardInfo{} + discardInfoLock sync.Mutex +) + +func getDiscardInfo(t reflect.Type) *discardInfo { + discardInfoLock.Lock() + defer discardInfoLock.Unlock() + di := discardInfoMap[t] + if di == nil { + di = &discardInfo{typ: t} + discardInfoMap[t] = di + } + return di +} + +func (di *discardInfo) discard(src pointer) { + if src.isNil() { + return // Nothing to do. + } + + if atomic.LoadInt32(&di.initialized) == 0 { + di.computeDiscardInfo() + } + + for _, fi := range di.fields { + sfp := src.offset(fi.field) + fi.discard(sfp) + } + + // For proto2 messages, only discard unknown fields in message extensions + // that have been accessed via GetExtension. + if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil { + // Ignore lock since DiscardUnknown is not concurrency safe. + emm, _ := em.extensionsRead() + for _, mx := range emm { + if m, ok := mx.value.(Message); ok { + DiscardUnknown(m) + } + } + } + + if di.unrecognized.IsValid() { + *src.offset(di.unrecognized).toBytes() = nil + } +} + +func (di *discardInfo) computeDiscardInfo() { + di.lock.Lock() + defer di.lock.Unlock() + if di.initialized != 0 { + return + } + t := di.typ + n := t.NumField() + + for i := 0; i < n; i++ { + f := t.Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + + dfi := discardFieldInfo{field: toField(&f)} + tf := f.Type + + // Unwrap tf to get its most basic type. + var isPointer, isSlice bool + if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { + isSlice = true + tf = tf.Elem() + } + if tf.Kind() == reflect.Ptr { + isPointer = true + tf = tf.Elem() + } + if isPointer && isSlice && tf.Kind() != reflect.Struct { + panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name)) + } + + switch tf.Kind() { + case reflect.Struct: + switch { + case !isPointer: + panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name)) + case isSlice: // E.g., []*pb.T + di := getDiscardInfo(tf) + dfi.discard = func(src pointer) { + sps := src.getPointerSlice() + for _, sp := range sps { + if !sp.isNil() { + di.discard(sp) + } + } + } + default: // E.g., *pb.T + di := getDiscardInfo(tf) + dfi.discard = func(src pointer) { + sp := src.getPointer() + if !sp.isNil() { + di.discard(sp) + } + } + } + case reflect.Map: + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name)) + default: // E.g., map[K]V + if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T) + dfi.discard = func(src pointer) { + sm := src.asPointerTo(tf).Elem() + if sm.Len() == 0 { + return + } + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + DiscardUnknown(val.Interface().(Message)) + } + } + } else { + dfi.discard = func(pointer) {} // Noop + } + } + case reflect.Interface: + // Must be oneof field. + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name)) + default: // E.g., interface{} + // TODO: Make this faster? + dfi.discard = func(src pointer) { + su := src.asPointerTo(tf).Elem() + if !su.IsNil() { + sv := su.Elem().Elem().Field(0) + if sv.Kind() == reflect.Ptr && sv.IsNil() { + return + } + switch sv.Type().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + DiscardUnknown(sv.Interface().(Message)) + } + } + } + } + default: + continue + } + di.fields = append(di.fields, dfi) + } + + di.unrecognized = invalidField + if f, ok := t.FieldByName("XXX_unrecognized"); ok { + if f.Type != reflect.TypeOf([]byte{}) { + panic("expected XXX_unrecognized to be of type []byte") + } + di.unrecognized = toField(&f) + } + + atomic.StoreInt32(&di.initialized, 1) +} + func discardLegacy(m Message) { v := reflect.ValueOf(m) if v.Kind() != reflect.Ptr || v.IsNil() { @@ -139,7 +338,7 @@ func discardLegacy(m Message) { // For proto2 messages, only discard unknown fields in message extensions // that have been accessed via GetExtension. - if em, ok := extendable(m); ok { + if em, err := extendable(m); err == nil { // Ignore lock since discardLegacy is not concurrency safe. emm, _ := em.extensionsRead() for _, mx := range emm { diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/proto/encode.go b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/proto/encode.go index 8b84d1b..4c35d33 100644 --- a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/proto/encode.go +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/proto/encode.go @@ -39,23 +39,19 @@ import ( "errors" "fmt" "reflect" - "sort" ) -// RequiredNotSetError is the error returned if Marshal is called with -// a protocol buffer struct whose required fields have not -// all been initialized. It is also the error returned if Unmarshal is -// called with an encoded protocol buffer that does not include all the -// required fields. -// -// When printed, RequiredNotSetError reports the first unset required field in a -// message. If the field cannot be precisely determined, it is reported as -// "{Unknown}". +// RequiredNotSetError is an error type returned by either Marshal or Unmarshal. +// Marshal reports this when a required field is not initialized. +// Unmarshal reports this when a required field is missing from the wire data. type RequiredNotSetError struct { field string } func (e *RequiredNotSetError) Error() string { + if e.field == "" { + return fmt.Sprintf("proto: required field not set") + } return fmt.Sprintf("proto: required field %q not set", e.field) } @@ -82,10 +78,6 @@ var ( const maxVarintBytes = 10 // maximum length of a varint -// maxMarshalSize is the largest allowed size of an encoded protobuf, -// since C++ and Java use signed int32s for the size. -const maxMarshalSize = 1<<31 - 1 - // EncodeVarint returns the varint encoding of x. // This is the format for the // int32, int64, uint32, uint64, bool, and enum @@ -119,18 +111,27 @@ func (p *Buffer) EncodeVarint(x uint64) error { // SizeVarint returns the varint encoding size of an integer. func SizeVarint(x uint64) int { - return sizeVarint(x) -} - -func sizeVarint(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + switch { + case x < 1<<7: + return 1 + case x < 1<<14: + return 2 + case x < 1<<21: + return 3 + case x < 1<<28: + return 4 + case x < 1<<35: + return 5 + case x < 1<<42: + return 6 + case x < 1<<49: + return 7 + case x < 1<<56: + return 8 + case x < 1<<63: + return 9 + } + return 10 } // EncodeFixed64 writes a 64-bit integer to the Buffer. @@ -149,10 +150,6 @@ func (p *Buffer) EncodeFixed64(x uint64) error { return nil } -func sizeFixed64(x uint64) int { - return 8 -} - // EncodeFixed32 writes a 32-bit integer to the Buffer. // This is the format for the // fixed32, sfixed32, and float protocol buffer types. @@ -165,20 +162,12 @@ func (p *Buffer) EncodeFixed32(x uint64) error { return nil } -func sizeFixed32(x uint64) int { - return 4 -} - // EncodeZigzag64 writes a zigzag-encoded 64-bit integer // to the Buffer. // This is the format used for the sint64 protocol buffer type. func (p *Buffer) EncodeZigzag64(x uint64) error { // use signed number to get arithmetic right shift. - return p.EncodeVarint((x << 1) ^ uint64((int64(x) >> 63))) -} - -func sizeZigzag64(x uint64) int { - return sizeVarint((x << 1) ^ uint64((int64(x) >> 63))) + return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } // EncodeZigzag32 writes a zigzag-encoded 32-bit integer @@ -189,10 +178,6 @@ func (p *Buffer) EncodeZigzag32(x uint64) error { return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) } -func sizeZigzag32(x uint64) int { - return sizeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) -} - // EncodeRawBytes writes a count-delimited byte buffer to the Buffer. // This is the format used for the bytes protocol buffer // type and for embedded messages. @@ -202,11 +187,6 @@ func (p *Buffer) EncodeRawBytes(b []byte) error { return nil } -func sizeRawBytes(b []byte) int { - return sizeVarint(uint64(len(b))) + - len(b) -} - // EncodeStringBytes writes an encoded string to the Buffer. // This is the format used for the proto2 string type. func (p *Buffer) EncodeStringBytes(s string) error { @@ -215,319 +195,17 @@ func (p *Buffer) EncodeStringBytes(s string) error { return nil } -func sizeStringBytes(s string) int { - return sizeVarint(uint64(len(s))) + - len(s) -} - // Marshaler is the interface representing objects that can marshal themselves. type Marshaler interface { Marshal() ([]byte, error) } -// Marshal takes the protocol buffer -// and encodes it into the wire format, returning the data. -func Marshal(pb Message) ([]byte, error) { - // Can the object marshal itself? - if m, ok := pb.(Marshaler); ok { - return m.Marshal() - } - p := NewBuffer(nil) - err := p.Marshal(pb) - if p.buf == nil && err == nil { - // Return a non-nil slice on success. - return []byte{}, nil - } - return p.buf, err -} - // EncodeMessage writes the protocol buffer to the Buffer, // prefixed by a varint-encoded length. func (p *Buffer) EncodeMessage(pb Message) error { - t, base, err := getbase(pb) - if structPointer_IsNil(base) { - return ErrNil - } - if err == nil { - var state errorState - err = p.enc_len_struct(GetProperties(t.Elem()), base, &state) - } - return err -} - -// Marshal takes the protocol buffer -// and encodes it into the wire format, writing the result to the -// Buffer. -func (p *Buffer) Marshal(pb Message) error { - // Can the object marshal itself? - if m, ok := pb.(Marshaler); ok { - data, err := m.Marshal() - p.buf = append(p.buf, data...) - return err - } - - t, base, err := getbase(pb) - if structPointer_IsNil(base) { - return ErrNil - } - if err == nil { - err = p.enc_struct(GetProperties(t.Elem()), base) - } - - if collectStats { - (stats).Encode++ // Parens are to work around a goimports bug. - } - - if len(p.buf) > maxMarshalSize { - return ErrTooLarge - } - return err -} - -// Size returns the encoded size of a protocol buffer. -func Size(pb Message) (n int) { - // Can the object marshal itself? If so, Size is slow. - // TODO: add Size to Marshaler, or add a Sizer interface. - if m, ok := pb.(Marshaler); ok { - b, _ := m.Marshal() - return len(b) - } - - t, base, err := getbase(pb) - if structPointer_IsNil(base) { - return 0 - } - if err == nil { - n = size_struct(GetProperties(t.Elem()), base) - } - - if collectStats { - (stats).Size++ // Parens are to work around a goimports bug. - } - - return -} - -// Individual type encoders. - -// Encode a bool. -func (o *Buffer) enc_bool(p *Properties, base structPointer) error { - v := *structPointer_Bool(base, p.field) - if v == nil { - return ErrNil - } - x := 0 - if *v { - x = 1 - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func (o *Buffer) enc_proto3_bool(p *Properties, base structPointer) error { - v := *structPointer_BoolVal(base, p.field) - if !v { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, 1) - return nil -} - -func size_bool(p *Properties, base structPointer) int { - v := *structPointer_Bool(base, p.field) - if v == nil { - return 0 - } - return len(p.tagcode) + 1 // each bool takes exactly one byte -} - -func size_proto3_bool(p *Properties, base structPointer) int { - v := *structPointer_BoolVal(base, p.field) - if !v && !p.oneof { - return 0 - } - return len(p.tagcode) + 1 // each bool takes exactly one byte -} - -// Encode an int32. -func (o *Buffer) enc_int32(p *Properties, base structPointer) error { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return ErrNil - } - x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func (o *Buffer) enc_proto3_int32(p *Properties, base structPointer) error { - v := structPointer_Word32Val(base, p.field) - x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range - if x == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func size_int32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return 0 - } - x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -func size_proto3_int32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32Val(base, p.field) - x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range - if x == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -// Encode a uint32. -// Exactly the same as int32, except for no sign extension. -func (o *Buffer) enc_uint32(p *Properties, base structPointer) error { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return ErrNil - } - x := word32_Get(v) - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func (o *Buffer) enc_proto3_uint32(p *Properties, base structPointer) error { - v := structPointer_Word32Val(base, p.field) - x := word32Val_Get(v) - if x == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func size_uint32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return 0 - } - x := word32_Get(v) - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -func size_proto3_uint32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32Val(base, p.field) - x := word32Val_Get(v) - if x == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -// Encode an int64. -func (o *Buffer) enc_int64(p *Properties, base structPointer) error { - v := structPointer_Word64(base, p.field) - if word64_IsNil(v) { - return ErrNil - } - x := word64_Get(v) - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, x) - return nil -} - -func (o *Buffer) enc_proto3_int64(p *Properties, base structPointer) error { - v := structPointer_Word64Val(base, p.field) - x := word64Val_Get(v) - if x == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, x) - return nil -} - -func size_int64(p *Properties, base structPointer) (n int) { - v := structPointer_Word64(base, p.field) - if word64_IsNil(v) { - return 0 - } - x := word64_Get(v) - n += len(p.tagcode) - n += p.valSize(x) - return -} - -func size_proto3_int64(p *Properties, base structPointer) (n int) { - v := structPointer_Word64Val(base, p.field) - x := word64Val_Get(v) - if x == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += p.valSize(x) - return -} - -// Encode a string. -func (o *Buffer) enc_string(p *Properties, base structPointer) error { - v := *structPointer_String(base, p.field) - if v == nil { - return ErrNil - } - x := *v - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(x) - return nil -} - -func (o *Buffer) enc_proto3_string(p *Properties, base structPointer) error { - v := *structPointer_StringVal(base, p.field) - if v == "" { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(v) - return nil -} - -func size_string(p *Properties, base structPointer) (n int) { - v := *structPointer_String(base, p.field) - if v == nil { - return 0 - } - x := *v - n += len(p.tagcode) - n += sizeStringBytes(x) - return -} - -func size_proto3_string(p *Properties, base structPointer) (n int) { - v := *structPointer_StringVal(base, p.field) - if v == "" && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += sizeStringBytes(v) - return + siz := Size(pb) + p.EncodeVarint(uint64(siz)) + return p.Marshal(pb) } // All protocol buffer fields are nillable, but be careful. @@ -538,825 +216,3 @@ func isNil(v reflect.Value) bool { } return false } - -// Encode a message struct. -func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error { - var state errorState - structp := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return ErrNil - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, err := m.Marshal() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - return state.err - } - - o.buf = append(o.buf, p.tagcode...) - return o.enc_len_struct(p.sprop, structp, &state) -} - -func size_struct_message(p *Properties, base structPointer) int { - structp := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return 0 - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, _ := m.Marshal() - n0 := len(p.tagcode) - n1 := sizeRawBytes(data) - return n0 + n1 - } - - n0 := len(p.tagcode) - n1 := size_struct(p.sprop, structp) - n2 := sizeVarint(uint64(n1)) // size of encoded length - return n0 + n1 + n2 -} - -// Encode a group struct. -func (o *Buffer) enc_struct_group(p *Properties, base structPointer) error { - var state errorState - b := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(b) { - return ErrNil - } - - o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) - err := o.enc_struct(p.sprop, b) - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) - return state.err -} - -func size_struct_group(p *Properties, base structPointer) (n int) { - b := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(b) { - return 0 - } - - n += sizeVarint(uint64((p.Tag << 3) | WireStartGroup)) - n += size_struct(p.sprop, b) - n += sizeVarint(uint64((p.Tag << 3) | WireEndGroup)) - return -} - -// Encode a slice of bools ([]bool). -func (o *Buffer) enc_slice_bool(p *Properties, base structPointer) error { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return ErrNil - } - for _, x := range s { - o.buf = append(o.buf, p.tagcode...) - v := uint64(0) - if x { - v = 1 - } - p.valEnc(o, v) - } - return nil -} - -func size_slice_bool(p *Properties, base structPointer) int { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return 0 - } - return l * (len(p.tagcode) + 1) // each bool takes exactly one byte -} - -// Encode a slice of bools ([]bool) in packed format. -func (o *Buffer) enc_slice_packed_bool(p *Properties, base structPointer) error { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(l)) // each bool takes exactly one byte - for _, x := range s { - v := uint64(0) - if x { - v = 1 - } - p.valEnc(o, v) - } - return nil -} - -func size_slice_packed_bool(p *Properties, base structPointer) (n int) { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return 0 - } - n += len(p.tagcode) - n += sizeVarint(uint64(l)) - n += l // each bool takes exactly one byte - return -} - -// Encode a slice of bytes ([]byte). -func (o *Buffer) enc_slice_byte(p *Properties, base structPointer) error { - s := *structPointer_Bytes(base, p.field) - if s == nil { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(s) - return nil -} - -func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error { - s := *structPointer_Bytes(base, p.field) - if len(s) == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(s) - return nil -} - -func size_slice_byte(p *Properties, base structPointer) (n int) { - s := *structPointer_Bytes(base, p.field) - if s == nil && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += sizeRawBytes(s) - return -} - -func size_proto3_slice_byte(p *Properties, base structPointer) (n int) { - s := *structPointer_Bytes(base, p.field) - if len(s) == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += sizeRawBytes(s) - return -} - -// Encode a slice of int32s ([]int32). -func (o *Buffer) enc_slice_int32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - p.valEnc(o, uint64(x)) - } - return nil -} - -func size_slice_int32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - for i := 0; i < l; i++ { - n += len(p.tagcode) - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - n += p.valSize(uint64(x)) - } - return -} - -// Encode a slice of int32s ([]int32) in packed format. -func (o *Buffer) enc_slice_packed_int32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - // TODO: Reuse a Buffer. - buf := NewBuffer(nil) - for i := 0; i < l; i++ { - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - p.valEnc(buf, uint64(x)) - } - - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(len(buf.buf))) - o.buf = append(o.buf, buf.buf...) - return nil -} - -func size_slice_packed_int32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - var bufSize int - for i := 0; i < l; i++ { - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - bufSize += p.valSize(uint64(x)) - } - - n += len(p.tagcode) - n += sizeVarint(uint64(bufSize)) - n += bufSize - return -} - -// Encode a slice of uint32s ([]uint32). -// Exactly the same as int32, except for no sign extension. -func (o *Buffer) enc_slice_uint32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - x := s.Index(i) - p.valEnc(o, uint64(x)) - } - return nil -} - -func size_slice_uint32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - for i := 0; i < l; i++ { - n += len(p.tagcode) - x := s.Index(i) - n += p.valSize(uint64(x)) - } - return -} - -// Encode a slice of uint32s ([]uint32) in packed format. -// Exactly the same as int32, except for no sign extension. -func (o *Buffer) enc_slice_packed_uint32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - // TODO: Reuse a Buffer. - buf := NewBuffer(nil) - for i := 0; i < l; i++ { - p.valEnc(buf, uint64(s.Index(i))) - } - - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(len(buf.buf))) - o.buf = append(o.buf, buf.buf...) - return nil -} - -func size_slice_packed_uint32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - var bufSize int - for i := 0; i < l; i++ { - bufSize += p.valSize(uint64(s.Index(i))) - } - - n += len(p.tagcode) - n += sizeVarint(uint64(bufSize)) - n += bufSize - return -} - -// Encode a slice of int64s ([]int64). -func (o *Buffer) enc_slice_int64(p *Properties, base structPointer) error { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, s.Index(i)) - } - return nil -} - -func size_slice_int64(p *Properties, base structPointer) (n int) { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - for i := 0; i < l; i++ { - n += len(p.tagcode) - n += p.valSize(s.Index(i)) - } - return -} - -// Encode a slice of int64s ([]int64) in packed format. -func (o *Buffer) enc_slice_packed_int64(p *Properties, base structPointer) error { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - // TODO: Reuse a Buffer. - buf := NewBuffer(nil) - for i := 0; i < l; i++ { - p.valEnc(buf, s.Index(i)) - } - - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(len(buf.buf))) - o.buf = append(o.buf, buf.buf...) - return nil -} - -func size_slice_packed_int64(p *Properties, base structPointer) (n int) { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - var bufSize int - for i := 0; i < l; i++ { - bufSize += p.valSize(s.Index(i)) - } - - n += len(p.tagcode) - n += sizeVarint(uint64(bufSize)) - n += bufSize - return -} - -// Encode a slice of slice of bytes ([][]byte). -func (o *Buffer) enc_slice_slice_byte(p *Properties, base structPointer) error { - ss := *structPointer_BytesSlice(base, p.field) - l := len(ss) - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(ss[i]) - } - return nil -} - -func size_slice_slice_byte(p *Properties, base structPointer) (n int) { - ss := *structPointer_BytesSlice(base, p.field) - l := len(ss) - if l == 0 { - return 0 - } - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - n += sizeRawBytes(ss[i]) - } - return -} - -// Encode a slice of strings ([]string). -func (o *Buffer) enc_slice_string(p *Properties, base structPointer) error { - ss := *structPointer_StringSlice(base, p.field) - l := len(ss) - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(ss[i]) - } - return nil -} - -func size_slice_string(p *Properties, base structPointer) (n int) { - ss := *structPointer_StringSlice(base, p.field) - l := len(ss) - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - n += sizeStringBytes(ss[i]) - } - return -} - -// Encode a slice of message structs ([]*struct). -func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) error { - var state errorState - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - - for i := 0; i < l; i++ { - structp := s.Index(i) - if structPointer_IsNil(structp) { - return errRepeatedHasNil - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, err := m.Marshal() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - continue - } - - o.buf = append(o.buf, p.tagcode...) - err := o.enc_len_struct(p.sprop, structp, &state) - if err != nil && !state.shouldContinue(err, nil) { - if err == ErrNil { - return errRepeatedHasNil - } - return err - } - } - return state.err -} - -func size_slice_struct_message(p *Properties, base structPointer) (n int) { - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - structp := s.Index(i) - if structPointer_IsNil(structp) { - return // return the size up to this point - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, _ := m.Marshal() - n += sizeRawBytes(data) - continue - } - - n0 := size_struct(p.sprop, structp) - n1 := sizeVarint(uint64(n0)) // size of encoded length - n += n0 + n1 - } - return -} - -// Encode a slice of group structs ([]*struct). -func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error { - var state errorState - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - - for i := 0; i < l; i++ { - b := s.Index(i) - if structPointer_IsNil(b) { - return errRepeatedHasNil - } - - o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) - - err := o.enc_struct(p.sprop, b) - - if err != nil && !state.shouldContinue(err, nil) { - if err == ErrNil { - return errRepeatedHasNil - } - return err - } - - o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) - } - return state.err -} - -func size_slice_struct_group(p *Properties, base structPointer) (n int) { - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - - n += l * sizeVarint(uint64((p.Tag<<3)|WireStartGroup)) - n += l * sizeVarint(uint64((p.Tag<<3)|WireEndGroup)) - for i := 0; i < l; i++ { - b := s.Index(i) - if structPointer_IsNil(b) { - return // return size up to this point - } - - n += size_struct(p.sprop, b) - } - return -} - -// Encode an extension map. -func (o *Buffer) enc_map(p *Properties, base structPointer) error { - exts := structPointer_ExtMap(base, p.field) - if err := encodeExtensionsMap(*exts); err != nil { - return err - } - - return o.enc_map_body(*exts) -} - -func (o *Buffer) enc_exts(p *Properties, base structPointer) error { - exts := structPointer_Extensions(base, p.field) - - v, mu := exts.extensionsRead() - if v == nil { - return nil - } - - mu.Lock() - defer mu.Unlock() - if err := encodeExtensionsMap(v); err != nil { - return err - } - - return o.enc_map_body(v) -} - -func (o *Buffer) enc_map_body(v map[int32]Extension) error { - // Fast-path for common cases: zero or one extensions. - if len(v) <= 1 { - for _, e := range v { - o.buf = append(o.buf, e.enc...) - } - return nil - } - - // Sort keys to provide a deterministic encoding. - keys := make([]int, 0, len(v)) - for k := range v { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - for _, k := range keys { - o.buf = append(o.buf, v[int32(k)].enc...) - } - return nil -} - -func size_map(p *Properties, base structPointer) int { - v := structPointer_ExtMap(base, p.field) - return extensionsMapSize(*v) -} - -func size_exts(p *Properties, base structPointer) int { - v := structPointer_Extensions(base, p.field) - return extensionsSize(v) -} - -// Encode a map field. -func (o *Buffer) enc_new_map(p *Properties, base structPointer) error { - var state errorState // XXX: or do we need to plumb this through? - - /* - A map defined as - map map_field = N; - is encoded in the same way as - message MapFieldEntry { - key_type key = 1; - value_type value = 2; - } - repeated MapFieldEntry map_field = N; - */ - - v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V - if v.Len() == 0 { - return nil - } - - keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) - - enc := func() error { - if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil { - return err - } - if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil && err != ErrNil { - return err - } - return nil - } - - // Don't sort map keys. It is not required by the spec, and C++ doesn't do it. - for _, key := range v.MapKeys() { - val := v.MapIndex(key) - - keycopy.Set(key) - valcopy.Set(val) - - o.buf = append(o.buf, p.tagcode...) - if err := o.enc_len_thing(enc, &state); err != nil { - return err - } - } - return nil -} - -func size_new_map(p *Properties, base structPointer) int { - v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V - - keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) - - n := 0 - for _, key := range v.MapKeys() { - val := v.MapIndex(key) - keycopy.Set(key) - valcopy.Set(val) - - // Tag codes for key and val are the responsibility of the sub-sizer. - keysize := p.mkeyprop.size(p.mkeyprop, keybase) - valsize := p.mvalprop.size(p.mvalprop, valbase) - entry := keysize + valsize - // Add on tag code and length of map entry itself. - n += len(p.tagcode) + sizeVarint(uint64(entry)) + entry - } - return n -} - -// mapEncodeScratch returns a new reflect.Value matching the map's value type, -// and a structPointer suitable for passing to an encoder or sizer. -func mapEncodeScratch(mapType reflect.Type) (keycopy, valcopy reflect.Value, keybase, valbase structPointer) { - // Prepare addressable doubly-indirect placeholders for the key and value types. - // This is needed because the element-type encoders expect **T, but the map iteration produces T. - - keycopy = reflect.New(mapType.Key()).Elem() // addressable K - keyptr := reflect.New(reflect.PtrTo(keycopy.Type())).Elem() // addressable *K - keyptr.Set(keycopy.Addr()) // - keybase = toStructPointer(keyptr.Addr()) // **K - - // Value types are more varied and require special handling. - switch mapType.Elem().Kind() { - case reflect.Slice: - // []byte - var dummy []byte - valcopy = reflect.ValueOf(&dummy).Elem() // addressable []byte - valbase = toStructPointer(valcopy.Addr()) - case reflect.Ptr: - // message; the generated field type is map[K]*Msg (so V is *Msg), - // so we only need one level of indirection. - valcopy = reflect.New(mapType.Elem()).Elem() // addressable V - valbase = toStructPointer(valcopy.Addr()) - default: - // everything else - valcopy = reflect.New(mapType.Elem()).Elem() // addressable V - valptr := reflect.New(reflect.PtrTo(valcopy.Type())).Elem() // addressable *V - valptr.Set(valcopy.Addr()) // - valbase = toStructPointer(valptr.Addr()) // **V - } - return -} - -// Encode a struct. -func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error { - var state errorState - // Encode fields in tag order so that decoders may use optimizations - // that depend on the ordering. - // https://developers.google.com/protocol-buffers/docs/encoding#order - for _, i := range prop.order { - p := prop.Prop[i] - if p.enc != nil { - err := p.enc(o, p, base) - if err != nil { - if err == ErrNil { - if p.Required && state.err == nil { - state.err = &RequiredNotSetError{p.Name} - } - } else if err == errRepeatedHasNil { - // Give more context to nil values in repeated fields. - return errors.New("repeated field " + p.OrigName + " has nil element") - } else if !state.shouldContinue(err, p) { - return err - } - } - if len(o.buf) > maxMarshalSize { - return ErrTooLarge - } - } - } - - // Do oneof fields. - if prop.oneofMarshaler != nil { - m := structPointer_Interface(base, prop.stype).(Message) - if err := prop.oneofMarshaler(m, o); err == ErrNil { - return errOneofHasNil - } else if err != nil { - return err - } - } - - // Add unrecognized fields at the end. - if prop.unrecField.IsValid() { - v := *structPointer_Bytes(base, prop.unrecField) - if len(o.buf)+len(v) > maxMarshalSize { - return ErrTooLarge - } - if len(v) > 0 { - o.buf = append(o.buf, v...) - } - } - - return state.err -} - -func size_struct(prop *StructProperties, base structPointer) (n int) { - for _, i := range prop.order { - p := prop.Prop[i] - if p.size != nil { - n += p.size(p, base) - } - } - - // Add unrecognized fields at the end. - if prop.unrecField.IsValid() { - v := *structPointer_Bytes(base, prop.unrecField) - n += len(v) - } - - // Factor in any oneof fields. - if prop.oneofSizer != nil { - m := structPointer_Interface(base, prop.stype).(Message) - n += prop.oneofSizer(m) - } - - return -} - -var zeroes [20]byte // longer than any conceivable sizeVarint - -// Encode a struct, preceded by its encoded length (as a varint). -func (o *Buffer) enc_len_struct(prop *StructProperties, base structPointer, state *errorState) error { - return o.enc_len_thing(func() error { return o.enc_struct(prop, base) }, state) -} - -// Encode something, preceded by its encoded length (as a varint). -func (o *Buffer) enc_len_thing(enc func() error, state *errorState) error { - iLen := len(o.buf) - o.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length - iMsg := len(o.buf) - err := enc() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - lMsg := len(o.buf) - iMsg - lLen := sizeVarint(uint64(lMsg)) - switch x := lLen - (iMsg - iLen); { - case x > 0: // actual length is x bytes larger than the space we reserved - // Move msg x bytes right. - o.buf = append(o.buf, zeroes[:x]...) - copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) - case x < 0: // actual length is x bytes smaller than the space we reserved - // Move msg x bytes left. - copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) - o.buf = o.buf[:len(o.buf)+x] // x is negative - } - // Encode the length in the reserved space. - o.buf = o.buf[:iLen] - o.EncodeVarint(uint64(lMsg)) - o.buf = o.buf[:len(o.buf)+lMsg] - return state.err -} - -// errorState maintains the first error that occurs and updates that error -// with additional context. -type errorState struct { - err error -} - -// shouldContinue reports whether encoding should continue upon encountering the -// given error. If the error is RequiredNotSetError, shouldContinue returns true -// and, if this is the first appearance of that error, remembers it for future -// reporting. -// -// If prop is not nil, it may update any error with additional context about the -// field with the error. -func (s *errorState) shouldContinue(err error, prop *Properties) bool { - // Ignore unset required fields. - reqNotSet, ok := err.(*RequiredNotSetError) - if !ok { - return false - } - if s.err == nil { - if prop != nil { - err = &RequiredNotSetError{prop.Name + "." + reqNotSet.field} - } - s.err = err - } - return true -} diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/proto/equal.go b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/proto/equal.go index 2ed1cf5..d4db5a1 100644 --- a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/proto/equal.go +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/proto/equal.go @@ -109,15 +109,6 @@ func equalStruct(v1, v2 reflect.Value) bool { // set/unset mismatch return false } - b1, ok := f1.Interface().(raw) - if ok { - b2 := f2.Interface().(raw) - // RawMessage - if !bytes.Equal(b1.Bytes(), b2.Bytes()) { - return false - } - continue - } f1, f2 = f1.Elem(), f2.Elem() } if !equalAny(f1, f2, sprop.Prop[i]) { @@ -146,11 +137,7 @@ func equalStruct(v1, v2 reflect.Value) bool { u1 := uf.Bytes() u2 := v2.FieldByName("XXX_unrecognized").Bytes() - if !bytes.Equal(u1, u2) { - return false - } - - return true + return bytes.Equal(u1, u2) } // v1 and v2 are known to have the same type. @@ -261,6 +248,15 @@ func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { m1, m2 := e1.value, e2.value + if m1 == nil && m2 == nil { + // Both have only encoded form. + if bytes.Equal(e1.enc, e2.enc) { + continue + } + // The bytes are different, but the extensions might still be + // equal. We need to decode them to compare. + } + if m1 != nil && m2 != nil { // Both are unencoded. if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { @@ -276,8 +272,12 @@ func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { desc = m[extNum] } if desc == nil { + // If both have only encoded form and the bytes are the same, + // it is handled above. We get here when the bytes are different. + // We don't know how to decode it, so just compare them as byte + // slices. log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) - continue + return false } var err error if m1 == nil { diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/proto/extensions.go b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/proto/extensions.go index eaad218..816a3b9 100644 --- a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/proto/extensions.go +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/proto/extensions.go @@ -38,6 +38,7 @@ package proto import ( "errors" "fmt" + "io" "reflect" "strconv" "sync" @@ -91,14 +92,29 @@ func (n notLocker) Unlock() {} // extendable returns the extendableProto interface for the given generated proto message. // If the proto message has the old extension format, it returns a wrapper that implements // the extendableProto interface. -func extendable(p interface{}) (extendableProto, bool) { - if ep, ok := p.(extendableProto); ok { - return ep, ok - } - if ep, ok := p.(extendableProtoV1); ok { - return extensionAdapter{ep}, ok +func extendable(p interface{}) (extendableProto, error) { + switch p := p.(type) { + case extendableProto: + if isNilPtr(p) { + return nil, fmt.Errorf("proto: nil %T is not extendable", p) + } + return p, nil + case extendableProtoV1: + if isNilPtr(p) { + return nil, fmt.Errorf("proto: nil %T is not extendable", p) + } + return extensionAdapter{p}, nil } - return nil, false + // Don't allocate a specific error containing %T: + // this is the hot path for Clone and MarshalText. + return nil, errNotExtendable +} + +var errNotExtendable = errors.New("proto: not an extendable proto.Message") + +func isNilPtr(x interface{}) bool { + v := reflect.ValueOf(x) + return v.Kind() == reflect.Ptr && v.IsNil() } // XXX_InternalExtensions is an internal representation of proto extensions. @@ -143,9 +159,6 @@ func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Loc return e.p.extensionMap, &e.p.mu } -var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem() -var extendableProtoV1Type = reflect.TypeOf((*extendableProtoV1)(nil)).Elem() - // ExtensionDesc represents an extension specification. // Used in generated code from the protocol compiler. type ExtensionDesc struct { @@ -179,8 +192,8 @@ type Extension struct { // SetRawExtension is for testing only. func SetRawExtension(base Message, id int32, b []byte) { - epb, ok := extendable(base) - if !ok { + epb, err := extendable(base) + if err != nil { return } extmap := epb.extensionsWrite() @@ -205,7 +218,7 @@ func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { pbi = ea.extendableProtoV1 } if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b { - return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String()) + return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a) } // Check the range. if !isExtensionField(pb, extension.Field) { @@ -250,85 +263,11 @@ func extensionProperties(ed *ExtensionDesc) *Properties { return prop } -// encode encodes any unmarshaled (unencoded) extensions in e. -func encodeExtensions(e *XXX_InternalExtensions) error { - m, mu := e.extensionsRead() - if m == nil { - return nil // fast path - } - mu.Lock() - defer mu.Unlock() - return encodeExtensionsMap(m) -} - -// encode encodes any unmarshaled (unencoded) extensions in e. -func encodeExtensionsMap(m map[int32]Extension) error { - for k, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - et := reflect.TypeOf(e.desc.ExtensionType) - props := extensionProperties(e.desc) - - p := NewBuffer(nil) - // If e.value has type T, the encoder expects a *struct{ X T }. - // Pass a *T with a zero field and hope it all works out. - x := reflect.New(et) - x.Elem().Set(reflect.ValueOf(e.value)) - if err := props.enc(p, props, toStructPointer(x)); err != nil { - return err - } - e.enc = p.buf - m[k] = e - } - return nil -} - -func extensionsSize(e *XXX_InternalExtensions) (n int) { - m, mu := e.extensionsRead() - if m == nil { - return 0 - } - mu.Lock() - defer mu.Unlock() - return extensionsMapSize(m) -} - -func extensionsMapSize(m map[int32]Extension) (n int) { - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - n += len(e.enc) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - et := reflect.TypeOf(e.desc.ExtensionType) - props := extensionProperties(e.desc) - - // If e.value has type T, the encoder expects a *struct{ X T }. - // Pass a *T with a zero field and hope it all works out. - x := reflect.New(et) - x.Elem().Set(reflect.ValueOf(e.value)) - n += props.size(props, toStructPointer(x)) - } - return -} - // HasExtension returns whether the given extension is present in pb. func HasExtension(pb Message, extension *ExtensionDesc) bool { // TODO: Check types, field numbers, etc.? - epb, ok := extendable(pb) - if !ok { + epb, err := extendable(pb) + if err != nil { return false } extmap, mu := epb.extensionsRead() @@ -336,15 +275,15 @@ func HasExtension(pb Message, extension *ExtensionDesc) bool { return false } mu.Lock() - _, ok = extmap[extension.Field] + _, ok := extmap[extension.Field] mu.Unlock() return ok } // ClearExtension removes the given extension from pb. func ClearExtension(pb Message, extension *ExtensionDesc) { - epb, ok := extendable(pb) - if !ok { + epb, err := extendable(pb) + if err != nil { return } // TODO: Check types, field numbers, etc.? @@ -352,16 +291,26 @@ func ClearExtension(pb Message, extension *ExtensionDesc) { delete(extmap, extension.Field) } -// GetExtension parses and returns the given extension of pb. -// If the extension is not present and has no default value it returns ErrMissingExtension. +// GetExtension retrieves a proto2 extended field from pb. +// +// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil), +// then GetExtension parses the encoded field and returns a Go value of the specified type. +// If the field is not present, then the default value is returned (if one is specified), +// otherwise ErrMissingExtension is reported. +// +// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil), +// then GetExtension returns the raw encoded bytes of the field extension. func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { - epb, ok := extendable(pb) - if !ok { - return nil, errors.New("proto: not an extendable proto") + epb, err := extendable(pb) + if err != nil { + return nil, err } - if err := checkExtensionTypes(epb, extension); err != nil { - return nil, err + if extension.ExtendedType != nil { + // can only check type if this is a complete descriptor + if err := checkExtensionTypes(epb, extension); err != nil { + return nil, err + } } emap, mu := epb.extensionsRead() @@ -388,6 +337,11 @@ func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { return e.value, nil } + if extension.ExtensionType == nil { + // incomplete descriptor + return e.enc, nil + } + v, err := decodeExtension(e.enc, extension) if err != nil { return nil, err @@ -405,6 +359,11 @@ func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { // defaultExtensionValue returns the default value for extension. // If no default for an extension is defined ErrMissingExtension is returned. func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { + if extension.ExtensionType == nil { + // incomplete descriptor, so no default + return nil, ErrMissingExtension + } + t := reflect.TypeOf(extension.ExtensionType) props := extensionProperties(extension) @@ -439,31 +398,28 @@ func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { // decodeExtension decodes an extension encoded in b. func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { - o := NewBuffer(b) - t := reflect.TypeOf(extension.ExtensionType) - - props := extensionProperties(extension) + unmarshal := typeUnmarshaler(t, extension.Tag) // t is a pointer to a struct, pointer to basic type or a slice. - // Allocate a "field" to store the pointer/slice itself; the - // pointer/slice will be stored here. We pass - // the address of this field to props.dec. - // This passes a zero field and a *t and lets props.dec - // interpret it as a *struct{ x t }. + // Allocate space to store the pointer/slice. value := reflect.New(t).Elem() + var err error for { - // Discard wire type and field number varint. It isn't needed. - if _, err := o.DecodeVarint(); err != nil { - return nil, err + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF } + b = b[n:] + wire := int(x) & 7 - if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil { + b, err = unmarshal(b, valToPointer(value.Addr()), wire) + if err != nil { return nil, err } - if o.index >= len(o.buf) { + if len(b) == 0 { break } } @@ -473,9 +429,9 @@ func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { // GetExtensions returns a slice of the extensions present in pb that are also listed in es. // The returned slice has the same length as es; missing extensions will appear as nil elements. func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { - epb, ok := extendable(pb) - if !ok { - return nil, errors.New("proto: not an extendable proto") + epb, err := extendable(pb) + if err != nil { + return nil, err } extensions = make([]interface{}, len(es)) for i, e := range es { @@ -494,9 +450,9 @@ func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, e // For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing // just the Field field, which defines the extension's field number. func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { - epb, ok := extendable(pb) - if !ok { - return nil, fmt.Errorf("proto: %T is not an extendable proto.Message", pb) + epb, err := extendable(pb) + if err != nil { + return nil, err } registeredExtensions := RegisteredExtensions(pb) @@ -523,9 +479,9 @@ func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { // SetExtension sets the specified extension of pb to the specified value. func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { - epb, ok := extendable(pb) - if !ok { - return errors.New("proto: not an extendable proto") + epb, err := extendable(pb) + if err != nil { + return err } if err := checkExtensionTypes(epb, extension); err != nil { return err @@ -550,8 +506,8 @@ func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error // ClearAllExtensions clears all extensions from pb. func ClearAllExtensions(pb Message) { - epb, ok := extendable(pb) - if !ok { + epb, err := extendable(pb) + if err != nil { return } m := epb.extensionsWrite() diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/proto/lib.go b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/proto/lib.go index 1c22550..0e2191b 100644 --- a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/proto/lib.go +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/proto/lib.go @@ -265,6 +265,7 @@ package proto import ( "encoding/json" + "errors" "fmt" "log" "reflect" @@ -273,6 +274,8 @@ import ( "sync" ) +var errInvalidUTF8 = errors.New("proto: invalid UTF-8 string") + // Message is implemented by generated protocol buffer messages. type Message interface { Reset() @@ -309,16 +312,7 @@ type Buffer struct { buf []byte // encode/decode byte stream index int // read point - // pools of basic types to amortize allocation. - bools []bool - uint32s []uint32 - uint64s []uint64 - - // extra pools, only used with pointer_reflect.go - int32s []int32 - int64s []int64 - float32s []float32 - float64s []float64 + deterministic bool } // NewBuffer allocates a new Buffer and initializes its internal data to @@ -343,6 +337,30 @@ func (p *Buffer) SetBuf(s []byte) { // Bytes returns the contents of the Buffer. func (p *Buffer) Bytes() []byte { return p.buf } +// SetDeterministic sets whether to use deterministic serialization. +// +// Deterministic serialization guarantees that for a given binary, equal +// messages will always be serialized to the same bytes. This implies: +// +// - Repeated serialization of a message will return the same bytes. +// - Different processes of the same binary (which may be executing on +// different machines) will serialize equal messages to the same bytes. +// +// Note that the deterministic serialization is NOT canonical across +// languages. It is not guaranteed to remain stable over time. It is unstable +// across different builds with schema changes due to unknown fields. +// Users who need canonical serialization (e.g., persistent storage in a +// canonical form, fingerprinting, etc.) should define their own +// canonicalization specification and implement their own serializer rather +// than relying on this API. +// +// If deterministic serialization is requested, map entries will be sorted +// by keys in lexographical order. This is an implementation detail and +// subject to change. +func (p *Buffer) SetDeterministic(deterministic bool) { + p.deterministic = deterministic +} + /* * Helper routines for simplifying the creation of optional fields of basic type. */ @@ -831,22 +849,12 @@ func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMes return sf, false, nil } +// mapKeys returns a sort.Interface to be used for sorting the map keys. // Map fields may have key types of non-float scalars, strings and enums. -// The easiest way to sort them in some deterministic order is to use fmt. -// If this turns out to be inefficient we can always consider other options, -// such as doing a Schwartzian transform. - func mapKeys(vs []reflect.Value) sort.Interface { - s := mapKeySorter{ - vs: vs, - // default Less function: textual comparison - less: func(a, b reflect.Value) bool { - return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface()) - }, - } + s := mapKeySorter{vs: vs} - // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps; - // numeric keys are sorted numerically. + // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps. if len(vs) == 0 { return s } @@ -855,6 +863,12 @@ func mapKeys(vs []reflect.Value) sort.Interface { s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } case reflect.Uint32, reflect.Uint64: s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } + case reflect.Bool: + s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true + case reflect.String: + s.less = func(a, b reflect.Value) bool { return a.String() < b.String() } + default: + panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind())) } return s @@ -895,3 +909,13 @@ const ProtoPackageIsVersion2 = true // ProtoPackageIsVersion1 is referenced from generated protocol buffer files // to assert that that code is compatible with this version of the proto package. const ProtoPackageIsVersion1 = true + +// InternalMessageInfo is a type used internally by generated .pb.go files. +// This type is not intended to be used by non-generated code. +// This type is not subject to any compatibility guarantee. +type InternalMessageInfo struct { + marshal *marshalInfo + unmarshal *unmarshalInfo + merge *mergeInfo + discard *discardInfo +} diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/proto/message_set.go b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/proto/message_set.go index fd982de..3b6ca41 100644 --- a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/proto/message_set.go +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/proto/message_set.go @@ -42,6 +42,7 @@ import ( "fmt" "reflect" "sort" + "sync" ) // errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. @@ -94,10 +95,7 @@ func (ms *messageSet) find(pb Message) *_MessageSet_Item { } func (ms *messageSet) Has(pb Message) bool { - if ms.find(pb) != nil { - return true - } - return false + return ms.find(pb) != nil } func (ms *messageSet) Unmarshal(pb Message) error { @@ -150,46 +148,42 @@ func skipVarint(buf []byte) []byte { // MarshalMessageSet encodes the extension map represented by m in the message set wire format. // It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option. func MarshalMessageSet(exts interface{}) ([]byte, error) { - var m map[int32]Extension + return marshalMessageSet(exts, false) +} + +// marshaMessageSet implements above function, with the opt to turn on / off deterministic during Marshal. +func marshalMessageSet(exts interface{}, deterministic bool) ([]byte, error) { switch exts := exts.(type) { case *XXX_InternalExtensions: - if err := encodeExtensions(exts); err != nil { - return nil, err - } - m, _ = exts.extensionsRead() + var u marshalInfo + siz := u.sizeMessageSet(exts) + b := make([]byte, 0, siz) + return u.appendMessageSet(b, exts, deterministic) + case map[int32]Extension: - if err := encodeExtensionsMap(exts); err != nil { - return nil, err + // This is an old-style extension map. + // Wrap it in a new-style XXX_InternalExtensions. + ie := XXX_InternalExtensions{ + p: &struct { + mu sync.Mutex + extensionMap map[int32]Extension + }{ + extensionMap: exts, + }, } - m = exts + + var u marshalInfo + siz := u.sizeMessageSet(&ie) + b := make([]byte, 0, siz) + return u.appendMessageSet(b, &ie, deterministic) + default: return nil, errors.New("proto: not an extension map") } - - // Sort extension IDs to provide a deterministic encoding. - // See also enc_map in encode.go. - ids := make([]int, 0, len(m)) - for id := range m { - ids = append(ids, int(id)) - } - sort.Ints(ids) - - ms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))} - for _, id := range ids { - e := m[int32(id)] - // Remove the wire type and field number varint, as well as the length varint. - msg := skipVarint(skipVarint(e.enc)) - - ms.Item = append(ms.Item, &_MessageSet_Item{ - TypeId: Int32(int32(id)), - Message: msg, - }) - } - return Marshal(ms) } // UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. -// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option. +// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option. func UnmarshalMessageSet(buf []byte, exts interface{}) error { var m map[int32]Extension switch exts := exts.(type) { @@ -235,7 +229,15 @@ func MarshalMessageSetJSON(exts interface{}) ([]byte, error) { var m map[int32]Extension switch exts := exts.(type) { case *XXX_InternalExtensions: - m, _ = exts.extensionsRead() + var mu sync.Locker + m, mu = exts.extensionsRead() + if m != nil { + // Keep the extensions map locked until we're done marshaling to prevent + // races between marshaling and unmarshaling the lazily-{en,de}coded + // values. + mu.Lock() + defer mu.Unlock() + } case map[int32]Extension: m = exts default: @@ -253,15 +255,16 @@ func MarshalMessageSetJSON(exts interface{}) ([]byte, error) { for i, id := range ids { ext := m[id] - if i > 0 { - b.WriteByte(',') - } - msd, ok := messageSetMap[id] if !ok { // Unknown type; we can't render it, so skip it. continue } + + if i > 0 && b.Len() > 1 { + b.WriteByte(',') + } + fmt.Fprintf(&b, `"[%s]":`, msd.name) x := ext.value diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/proto/pointer_reflect.go index fb512e2..b6cad90 100644 --- a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/proto/pointer_reflect.go +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/proto/pointer_reflect.go @@ -29,7 +29,7 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// +build appengine js +// +build purego appengine js // This file contains an implementation of proto field accesses using package reflect. // It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can @@ -38,32 +38,13 @@ package proto import ( - "math" "reflect" + "sync" ) -// A structPointer is a pointer to a struct. -type structPointer struct { - v reflect.Value -} - -// toStructPointer returns a structPointer equivalent to the given reflect value. -// The reflect value must itself be a pointer to a struct. -func toStructPointer(v reflect.Value) structPointer { - return structPointer{v} -} - -// IsNil reports whether p is nil. -func structPointer_IsNil(p structPointer) bool { - return p.v.IsNil() -} +const unsafeAllowed = false -// Interface returns the struct pointer as an interface value. -func structPointer_Interface(p structPointer, _ reflect.Type) interface{} { - return p.v.Interface() -} - -// A field identifies a field in a struct, accessible from a structPointer. +// A field identifies a field in a struct, accessible from a pointer. // In this implementation, a field is identified by the sequence of field indices // passed to reflect's FieldByIndex. type field []int @@ -76,409 +57,301 @@ func toField(f *reflect.StructField) field { // invalidField is an invalid field identifier. var invalidField = field(nil) +// zeroField is a noop when calling pointer.offset. +var zeroField = field([]int{}) + // IsValid reports whether the field identifier is valid. func (f field) IsValid() bool { return f != nil } -// field returns the given field in the struct as a reflect value. -func structPointer_field(p structPointer, f field) reflect.Value { - // Special case: an extension map entry with a value of type T - // passes a *T to the struct-handling code with a zero field, - // expecting that it will be treated as equivalent to *struct{ X T }, - // which has the same memory layout. We have to handle that case - // specially, because reflect will panic if we call FieldByIndex on a - // non-struct. - if f == nil { - return p.v.Elem() - } - - return p.v.Elem().FieldByIndex(f) +// The pointer type is for the table-driven decoder. +// The implementation here uses a reflect.Value of pointer type to +// create a generic pointer. In pointer_unsafe.go we use unsafe +// instead of reflect to implement the same (but faster) interface. +type pointer struct { + v reflect.Value } -// ifield returns the given field in the struct as an interface value. -func structPointer_ifield(p structPointer, f field) interface{} { - return structPointer_field(p, f).Addr().Interface() +// toPointer converts an interface of pointer type to a pointer +// that points to the same target. +func toPointer(i *Message) pointer { + return pointer{v: reflect.ValueOf(*i)} } -// Bytes returns the address of a []byte field in the struct. -func structPointer_Bytes(p structPointer, f field) *[]byte { - return structPointer_ifield(p, f).(*[]byte) +// toAddrPointer converts an interface to a pointer that points to +// the interface data. +func toAddrPointer(i *interface{}, isptr bool) pointer { + v := reflect.ValueOf(*i) + u := reflect.New(v.Type()) + u.Elem().Set(v) + return pointer{v: u} } -// BytesSlice returns the address of a [][]byte field in the struct. -func structPointer_BytesSlice(p structPointer, f field) *[][]byte { - return structPointer_ifield(p, f).(*[][]byte) +// valToPointer converts v to a pointer. v must be of pointer type. +func valToPointer(v reflect.Value) pointer { + return pointer{v: v} } -// Bool returns the address of a *bool field in the struct. -func structPointer_Bool(p structPointer, f field) **bool { - return structPointer_ifield(p, f).(**bool) +// offset converts from a pointer to a structure to a pointer to +// one of its fields. +func (p pointer) offset(f field) pointer { + return pointer{v: p.v.Elem().FieldByIndex(f).Addr()} } -// BoolVal returns the address of a bool field in the struct. -func structPointer_BoolVal(p structPointer, f field) *bool { - return structPointer_ifield(p, f).(*bool) +func (p pointer) isNil() bool { + return p.v.IsNil() } -// BoolSlice returns the address of a []bool field in the struct. -func structPointer_BoolSlice(p structPointer, f field) *[]bool { - return structPointer_ifield(p, f).(*[]bool) +// grow updates the slice s in place to make it one element longer. +// s must be addressable. +// Returns the (addressable) new element. +func grow(s reflect.Value) reflect.Value { + n, m := s.Len(), s.Cap() + if n < m { + s.SetLen(n + 1) + } else { + s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem()))) + } + return s.Index(n) } -// String returns the address of a *string field in the struct. -func structPointer_String(p structPointer, f field) **string { - return structPointer_ifield(p, f).(**string) +func (p pointer) toInt64() *int64 { + return p.v.Interface().(*int64) } - -// StringVal returns the address of a string field in the struct. -func structPointer_StringVal(p structPointer, f field) *string { - return structPointer_ifield(p, f).(*string) +func (p pointer) toInt64Ptr() **int64 { + return p.v.Interface().(**int64) } - -// StringSlice returns the address of a []string field in the struct. -func structPointer_StringSlice(p structPointer, f field) *[]string { - return structPointer_ifield(p, f).(*[]string) +func (p pointer) toInt64Slice() *[]int64 { + return p.v.Interface().(*[]int64) } -// Extensions returns the address of an extension map field in the struct. -func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions { - return structPointer_ifield(p, f).(*XXX_InternalExtensions) -} +var int32ptr = reflect.TypeOf((*int32)(nil)) -// ExtMap returns the address of an extension map field in the struct. -func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { - return structPointer_ifield(p, f).(*map[int32]Extension) +func (p pointer) toInt32() *int32 { + return p.v.Convert(int32ptr).Interface().(*int32) } -// NewAt returns the reflect.Value for a pointer to a field in the struct. -func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { - return structPointer_field(p, f).Addr() +// The toInt32Ptr/Slice methods don't work because of enums. +// Instead, we must use set/get methods for the int32ptr/slice case. +/* + func (p pointer) toInt32Ptr() **int32 { + return p.v.Interface().(**int32) } - -// SetStructPointer writes a *struct field in the struct. -func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { - structPointer_field(p, f).Set(q.v) + func (p pointer) toInt32Slice() *[]int32 { + return p.v.Interface().(*[]int32) } - -// GetStructPointer reads a *struct field in the struct. -func structPointer_GetStructPointer(p structPointer, f field) structPointer { - return structPointer{structPointer_field(p, f)} +*/ +func (p pointer) getInt32Ptr() *int32 { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + return p.v.Elem().Interface().(*int32) + } + // an enum + return p.v.Elem().Convert(int32PtrType).Interface().(*int32) +} +func (p pointer) setInt32Ptr(v int32) { + // Allocate value in a *int32. Possibly convert that to a *enum. + // Then assign it to a **int32 or **enum. + // Note: we can convert *int32 to *enum, but we can't convert + // **int32 to **enum! + p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem())) +} + +// getInt32Slice copies []int32 from p as a new slice. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) getInt32Slice() []int32 { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + return p.v.Elem().Interface().([]int32) + } + // an enum + // Allocate a []int32, then assign []enum's values into it. + // Note: we can't convert []enum to []int32. + slice := p.v.Elem() + s := make([]int32, slice.Len()) + for i := 0; i < slice.Len(); i++ { + s[i] = int32(slice.Index(i).Int()) + } + return s } -// StructPointerSlice the address of a []*struct field in the struct. -func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice { - return structPointerSlice{structPointer_field(p, f)} +// setInt32Slice copies []int32 into p as a new slice. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) setInt32Slice(v []int32) { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + p.v.Elem().Set(reflect.ValueOf(v)) + return + } + // an enum + // Allocate a []enum, then assign []int32's values into it. + // Note: we can't convert []enum to []int32. + slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v)) + for i, x := range v { + slice.Index(i).SetInt(int64(x)) + } + p.v.Elem().Set(slice) } - -// A structPointerSlice represents the address of a slice of pointers to structs -// (themselves messages or groups). That is, v.Type() is *[]*struct{...}. -type structPointerSlice struct { - v reflect.Value +func (p pointer) appendInt32Slice(v int32) { + grow(p.v.Elem()).SetInt(int64(v)) } -func (p structPointerSlice) Len() int { return p.v.Len() } -func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} } -func (p structPointerSlice) Append(q structPointer) { - p.v.Set(reflect.Append(p.v, q.v)) +func (p pointer) toUint64() *uint64 { + return p.v.Interface().(*uint64) } - -var ( - int32Type = reflect.TypeOf(int32(0)) - uint32Type = reflect.TypeOf(uint32(0)) - float32Type = reflect.TypeOf(float32(0)) - int64Type = reflect.TypeOf(int64(0)) - uint64Type = reflect.TypeOf(uint64(0)) - float64Type = reflect.TypeOf(float64(0)) -) - -// A word32 represents a field of type *int32, *uint32, *float32, or *enum. -// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable. -type word32 struct { - v reflect.Value +func (p pointer) toUint64Ptr() **uint64 { + return p.v.Interface().(**uint64) } - -// IsNil reports whether p is nil. -func word32_IsNil(p word32) bool { - return p.v.IsNil() +func (p pointer) toUint64Slice() *[]uint64 { + return p.v.Interface().(*[]uint64) } - -// Set sets p to point at a newly allocated word with bits set to x. -func word32_Set(p word32, o *Buffer, x uint32) { - t := p.v.Type().Elem() - switch t { - case int32Type: - if len(o.int32s) == 0 { - o.int32s = make([]int32, uint32PoolSize) - } - o.int32s[0] = int32(x) - p.v.Set(reflect.ValueOf(&o.int32s[0])) - o.int32s = o.int32s[1:] - return - case uint32Type: - if len(o.uint32s) == 0 { - o.uint32s = make([]uint32, uint32PoolSize) - } - o.uint32s[0] = x - p.v.Set(reflect.ValueOf(&o.uint32s[0])) - o.uint32s = o.uint32s[1:] - return - case float32Type: - if len(o.float32s) == 0 { - o.float32s = make([]float32, uint32PoolSize) - } - o.float32s[0] = math.Float32frombits(x) - p.v.Set(reflect.ValueOf(&o.float32s[0])) - o.float32s = o.float32s[1:] - return - } - - // must be enum - p.v.Set(reflect.New(t)) - p.v.Elem().SetInt(int64(int32(x))) +func (p pointer) toUint32() *uint32 { + return p.v.Interface().(*uint32) } - -// Get gets the bits pointed at by p, as a uint32. -func word32_Get(p word32) uint32 { - elem := p.v.Elem() - switch elem.Kind() { - case reflect.Int32: - return uint32(elem.Int()) - case reflect.Uint32: - return uint32(elem.Uint()) - case reflect.Float32: - return math.Float32bits(float32(elem.Float())) - } - panic("unreachable") +func (p pointer) toUint32Ptr() **uint32 { + return p.v.Interface().(**uint32) } - -// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct. -func structPointer_Word32(p structPointer, f field) word32 { - return word32{structPointer_field(p, f)} +func (p pointer) toUint32Slice() *[]uint32 { + return p.v.Interface().(*[]uint32) } - -// A word32Val represents a field of type int32, uint32, float32, or enum. -// That is, v.Type() is int32, uint32, float32, or enum and v is assignable. -type word32Val struct { - v reflect.Value +func (p pointer) toBool() *bool { + return p.v.Interface().(*bool) } - -// Set sets *p to x. -func word32Val_Set(p word32Val, x uint32) { - switch p.v.Type() { - case int32Type: - p.v.SetInt(int64(x)) - return - case uint32Type: - p.v.SetUint(uint64(x)) - return - case float32Type: - p.v.SetFloat(float64(math.Float32frombits(x))) - return - } - - // must be enum - p.v.SetInt(int64(int32(x))) +func (p pointer) toBoolPtr() **bool { + return p.v.Interface().(**bool) } - -// Get gets the bits pointed at by p, as a uint32. -func word32Val_Get(p word32Val) uint32 { - elem := p.v - switch elem.Kind() { - case reflect.Int32: - return uint32(elem.Int()) - case reflect.Uint32: - return uint32(elem.Uint()) - case reflect.Float32: - return math.Float32bits(float32(elem.Float())) - } - panic("unreachable") +func (p pointer) toBoolSlice() *[]bool { + return p.v.Interface().(*[]bool) } - -// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct. -func structPointer_Word32Val(p structPointer, f field) word32Val { - return word32Val{structPointer_field(p, f)} +func (p pointer) toFloat64() *float64 { + return p.v.Interface().(*float64) } - -// A word32Slice is a slice of 32-bit values. -// That is, v.Type() is []int32, []uint32, []float32, or []enum. -type word32Slice struct { - v reflect.Value +func (p pointer) toFloat64Ptr() **float64 { + return p.v.Interface().(**float64) } - -func (p word32Slice) Append(x uint32) { - n, m := p.v.Len(), p.v.Cap() - if n < m { - p.v.SetLen(n + 1) - } else { - t := p.v.Type().Elem() - p.v.Set(reflect.Append(p.v, reflect.Zero(t))) - } - elem := p.v.Index(n) - switch elem.Kind() { - case reflect.Int32: - elem.SetInt(int64(int32(x))) - case reflect.Uint32: - elem.SetUint(uint64(x)) - case reflect.Float32: - elem.SetFloat(float64(math.Float32frombits(x))) - } +func (p pointer) toFloat64Slice() *[]float64 { + return p.v.Interface().(*[]float64) } - -func (p word32Slice) Len() int { - return p.v.Len() +func (p pointer) toFloat32() *float32 { + return p.v.Interface().(*float32) } - -func (p word32Slice) Index(i int) uint32 { - elem := p.v.Index(i) - switch elem.Kind() { - case reflect.Int32: - return uint32(elem.Int()) - case reflect.Uint32: - return uint32(elem.Uint()) - case reflect.Float32: - return math.Float32bits(float32(elem.Float())) - } - panic("unreachable") +func (p pointer) toFloat32Ptr() **float32 { + return p.v.Interface().(**float32) } - -// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct. -func structPointer_Word32Slice(p structPointer, f field) word32Slice { - return word32Slice{structPointer_field(p, f)} +func (p pointer) toFloat32Slice() *[]float32 { + return p.v.Interface().(*[]float32) } - -// word64 is like word32 but for 64-bit values. -type word64 struct { - v reflect.Value +func (p pointer) toString() *string { + return p.v.Interface().(*string) } - -func word64_Set(p word64, o *Buffer, x uint64) { - t := p.v.Type().Elem() - switch t { - case int64Type: - if len(o.int64s) == 0 { - o.int64s = make([]int64, uint64PoolSize) - } - o.int64s[0] = int64(x) - p.v.Set(reflect.ValueOf(&o.int64s[0])) - o.int64s = o.int64s[1:] - return - case uint64Type: - if len(o.uint64s) == 0 { - o.uint64s = make([]uint64, uint64PoolSize) - } - o.uint64s[0] = x - p.v.Set(reflect.ValueOf(&o.uint64s[0])) - o.uint64s = o.uint64s[1:] - return - case float64Type: - if len(o.float64s) == 0 { - o.float64s = make([]float64, uint64PoolSize) - } - o.float64s[0] = math.Float64frombits(x) - p.v.Set(reflect.ValueOf(&o.float64s[0])) - o.float64s = o.float64s[1:] - return - } - panic("unreachable") +func (p pointer) toStringPtr() **string { + return p.v.Interface().(**string) } - -func word64_IsNil(p word64) bool { - return p.v.IsNil() +func (p pointer) toStringSlice() *[]string { + return p.v.Interface().(*[]string) } - -func word64_Get(p word64) uint64 { - elem := p.v.Elem() - switch elem.Kind() { - case reflect.Int64: - return uint64(elem.Int()) - case reflect.Uint64: - return elem.Uint() - case reflect.Float64: - return math.Float64bits(elem.Float()) - } - panic("unreachable") +func (p pointer) toBytes() *[]byte { + return p.v.Interface().(*[]byte) } - -func structPointer_Word64(p structPointer, f field) word64 { - return word64{structPointer_field(p, f)} +func (p pointer) toBytesSlice() *[][]byte { + return p.v.Interface().(*[][]byte) +} +func (p pointer) toExtensions() *XXX_InternalExtensions { + return p.v.Interface().(*XXX_InternalExtensions) +} +func (p pointer) toOldExtensions() *map[int32]Extension { + return p.v.Interface().(*map[int32]Extension) +} +func (p pointer) getPointer() pointer { + return pointer{v: p.v.Elem()} +} +func (p pointer) setPointer(q pointer) { + p.v.Elem().Set(q.v) +} +func (p pointer) appendPointer(q pointer) { + grow(p.v.Elem()).Set(q.v) } -// word64Val is like word32Val but for 64-bit values. -type word64Val struct { - v reflect.Value +// getPointerSlice copies []*T from p as a new []pointer. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) getPointerSlice() []pointer { + if p.v.IsNil() { + return nil + } + n := p.v.Elem().Len() + s := make([]pointer, n) + for i := 0; i < n; i++ { + s[i] = pointer{v: p.v.Elem().Index(i)} + } + return s } -func word64Val_Set(p word64Val, o *Buffer, x uint64) { - switch p.v.Type() { - case int64Type: - p.v.SetInt(int64(x)) - return - case uint64Type: - p.v.SetUint(x) - return - case float64Type: - p.v.SetFloat(math.Float64frombits(x)) +// setPointerSlice copies []pointer into p as a new []*T. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) setPointerSlice(v []pointer) { + if v == nil { + p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem()) return } - panic("unreachable") + s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v)) + for _, p := range v { + s = reflect.Append(s, p.v) + } + p.v.Elem().Set(s) } -func word64Val_Get(p word64Val) uint64 { - elem := p.v - switch elem.Kind() { - case reflect.Int64: - return uint64(elem.Int()) - case reflect.Uint64: - return elem.Uint() - case reflect.Float64: - return math.Float64bits(elem.Float()) +// getInterfacePointer returns a pointer that points to the +// interface data of the interface pointed by p. +func (p pointer) getInterfacePointer() pointer { + if p.v.Elem().IsNil() { + return pointer{v: p.v.Elem()} } - panic("unreachable") + return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct } -func structPointer_Word64Val(p structPointer, f field) word64Val { - return word64Val{structPointer_field(p, f)} +func (p pointer) asPointerTo(t reflect.Type) reflect.Value { + // TODO: check that p.v.Type().Elem() == t? + return p.v } -type word64Slice struct { - v reflect.Value +func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p } - -func (p word64Slice) Append(x uint64) { - n, m := p.v.Len(), p.v.Cap() - if n < m { - p.v.SetLen(n + 1) - } else { - t := p.v.Type().Elem() - p.v.Set(reflect.Append(p.v, reflect.Zero(t))) - } - elem := p.v.Index(n) - switch elem.Kind() { - case reflect.Int64: - elem.SetInt(int64(int64(x))) - case reflect.Uint64: - elem.SetUint(uint64(x)) - case reflect.Float64: - elem.SetFloat(float64(math.Float64frombits(x))) - } +func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v } - -func (p word64Slice) Len() int { - return p.v.Len() +func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p } - -func (p word64Slice) Index(i int) uint64 { - elem := p.v.Index(i) - switch elem.Kind() { - case reflect.Int64: - return uint64(elem.Int()) - case reflect.Uint64: - return uint64(elem.Uint()) - case reflect.Float64: - return math.Float64bits(float64(elem.Float())) - } - panic("unreachable") +func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v } - -func structPointer_Word64Slice(p structPointer, f field) word64Slice { - return word64Slice{structPointer_field(p, f)} +func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v } +func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} + +var atomicLock sync.Mutex diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go index 6b5567d..d55a335 100644 --- a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go @@ -29,7 +29,7 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// +build !appengine,!js +// +build !purego,!appengine,!js // This file contains the implementation of the proto field accesses using package unsafe. @@ -37,38 +37,13 @@ package proto import ( "reflect" + "sync/atomic" "unsafe" ) -// NOTE: These type_Foo functions would more idiomatically be methods, -// but Go does not allow methods on pointer types, and we must preserve -// some pointer type for the garbage collector. We use these -// funcs with clunky names as our poor approximation to methods. -// -// An alternative would be -// type structPointer struct { p unsafe.Pointer } -// but that does not registerize as well. - -// A structPointer is a pointer to a struct. -type structPointer unsafe.Pointer - -// toStructPointer returns a structPointer equivalent to the given reflect value. -func toStructPointer(v reflect.Value) structPointer { - return structPointer(unsafe.Pointer(v.Pointer())) -} - -// IsNil reports whether p is nil. -func structPointer_IsNil(p structPointer) bool { - return p == nil -} - -// Interface returns the struct pointer, assumed to have element type t, -// as an interface value. -func structPointer_Interface(p structPointer, t reflect.Type) interface{} { - return reflect.NewAt(t, unsafe.Pointer(p)).Interface() -} +const unsafeAllowed = true -// A field identifies a field in a struct, accessible from a structPointer. +// A field identifies a field in a struct, accessible from a pointer. // In this implementation, a field is identified by its byte offset from the start of the struct. type field uintptr @@ -80,191 +55,254 @@ func toField(f *reflect.StructField) field { // invalidField is an invalid field identifier. const invalidField = ^field(0) +// zeroField is a noop when calling pointer.offset. +const zeroField = field(0) + // IsValid reports whether the field identifier is valid. func (f field) IsValid() bool { - return f != ^field(0) + return f != invalidField } -// Bytes returns the address of a []byte field in the struct. -func structPointer_Bytes(p structPointer, f field) *[]byte { - return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) +// The pointer type below is for the new table-driven encoder/decoder. +// The implementation here uses unsafe.Pointer to create a generic pointer. +// In pointer_reflect.go we use reflect instead of unsafe to implement +// the same (but slower) interface. +type pointer struct { + p unsafe.Pointer } -// BytesSlice returns the address of a [][]byte field in the struct. -func structPointer_BytesSlice(p structPointer, f field) *[][]byte { - return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} +// size of pointer +var ptrSize = unsafe.Sizeof(uintptr(0)) -// Bool returns the address of a *bool field in the struct. -func structPointer_Bool(p structPointer, f field) **bool { - return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +// toPointer converts an interface of pointer type to a pointer +// that points to the same target. +func toPointer(i *Message) pointer { + // Super-tricky - read pointer out of data word of interface value. + // Saves ~25ns over the equivalent: + // return valToPointer(reflect.ValueOf(*i)) + return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} } -// BoolVal returns the address of a bool field in the struct. -func structPointer_BoolVal(p structPointer, f field) *bool { - return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +// toAddrPointer converts an interface to a pointer that points to +// the interface data. +func toAddrPointer(i *interface{}, isptr bool) pointer { + // Super-tricky - read or get the address of data word of interface value. + if isptr { + // The interface is of pointer type, thus it is a direct interface. + // The data word is the pointer data itself. We take its address. + return pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)} + } + // The interface is not of pointer type. The data word is the pointer + // to the data. + return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} } -// BoolSlice returns the address of a []bool field in the struct. -func structPointer_BoolSlice(p structPointer, f field) *[]bool { - return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +// valToPointer converts v to a pointer. v must be of pointer type. +func valToPointer(v reflect.Value) pointer { + return pointer{p: unsafe.Pointer(v.Pointer())} } -// String returns the address of a *string field in the struct. -func structPointer_String(p structPointer, f field) **string { - return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +// offset converts from a pointer to a structure to a pointer to +// one of its fields. +func (p pointer) offset(f field) pointer { + // For safety, we should panic if !f.IsValid, however calling panic causes + // this to no longer be inlineable, which is a serious performance cost. + /* + if !f.IsValid() { + panic("invalid field") + } + */ + return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))} } -// StringVal returns the address of a string field in the struct. -func structPointer_StringVal(p structPointer, f field) *string { - return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +func (p pointer) isNil() bool { + return p.p == nil } -// StringSlice returns the address of a []string field in the struct. -func structPointer_StringSlice(p structPointer, f field) *[]string { - return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +func (p pointer) toInt64() *int64 { + return (*int64)(p.p) } - -// ExtMap returns the address of an extension map field in the struct. -func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions { - return (*XXX_InternalExtensions)(unsafe.Pointer(uintptr(p) + uintptr(f))) +func (p pointer) toInt64Ptr() **int64 { + return (**int64)(p.p) } - -func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { - return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f))) +func (p pointer) toInt64Slice() *[]int64 { + return (*[]int64)(p.p) } - -// NewAt returns the reflect.Value for a pointer to a field in the struct. -func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { - return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f))) +func (p pointer) toInt32() *int32 { + return (*int32)(p.p) } -// SetStructPointer writes a *struct field in the struct. -func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { - *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q +// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist. +/* + func (p pointer) toInt32Ptr() **int32 { + return (**int32)(p.p) + } + func (p pointer) toInt32Slice() *[]int32 { + return (*[]int32)(p.p) + } +*/ +func (p pointer) getInt32Ptr() *int32 { + return *(**int32)(p.p) } - -// GetStructPointer reads a *struct field in the struct. -func structPointer_GetStructPointer(p structPointer, f field) structPointer { - return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) +func (p pointer) setInt32Ptr(v int32) { + *(**int32)(p.p) = &v } -// StructPointerSlice the address of a []*struct field in the struct. -func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice { - return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +// getInt32Slice loads a []int32 from p. +// The value returned is aliased with the original slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) getInt32Slice() []int32 { + return *(*[]int32)(p.p) } -// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups). -type structPointerSlice []structPointer - -func (v *structPointerSlice) Len() int { return len(*v) } -func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] } -func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) } - -// A word32 is the address of a "pointer to 32-bit value" field. -type word32 **uint32 - -// IsNil reports whether *v is nil. -func word32_IsNil(p word32) bool { - return *p == nil +// setInt32Slice stores a []int32 to p. +// The value set is aliased with the input slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) setInt32Slice(v []int32) { + *(*[]int32)(p.p) = v } -// Set sets *v to point at a newly allocated word set to x. -func word32_Set(p word32, o *Buffer, x uint32) { - if len(o.uint32s) == 0 { - o.uint32s = make([]uint32, uint32PoolSize) - } - o.uint32s[0] = x - *p = &o.uint32s[0] - o.uint32s = o.uint32s[1:] +// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead? +func (p pointer) appendInt32Slice(v int32) { + s := (*[]int32)(p.p) + *s = append(*s, v) } -// Get gets the value pointed at by *v. -func word32_Get(p word32) uint32 { - return **p +func (p pointer) toUint64() *uint64 { + return (*uint64)(p.p) } - -// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct. -func structPointer_Word32(p structPointer, f field) word32 { - return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +func (p pointer) toUint64Ptr() **uint64 { + return (**uint64)(p.p) } - -// A word32Val is the address of a 32-bit value field. -type word32Val *uint32 - -// Set sets *p to x. -func word32Val_Set(p word32Val, x uint32) { - *p = x +func (p pointer) toUint64Slice() *[]uint64 { + return (*[]uint64)(p.p) } - -// Get gets the value pointed at by p. -func word32Val_Get(p word32Val) uint32 { - return *p +func (p pointer) toUint32() *uint32 { + return (*uint32)(p.p) } - -// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct. -func structPointer_Word32Val(p structPointer, f field) word32Val { - return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +func (p pointer) toUint32Ptr() **uint32 { + return (**uint32)(p.p) } - -// A word32Slice is a slice of 32-bit values. -type word32Slice []uint32 - -func (v *word32Slice) Append(x uint32) { *v = append(*v, x) } -func (v *word32Slice) Len() int { return len(*v) } -func (v *word32Slice) Index(i int) uint32 { return (*v)[i] } - -// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct. -func structPointer_Word32Slice(p structPointer, f field) *word32Slice { - return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +func (p pointer) toUint32Slice() *[]uint32 { + return (*[]uint32)(p.p) } - -// word64 is like word32 but for 64-bit values. -type word64 **uint64 - -func word64_Set(p word64, o *Buffer, x uint64) { - if len(o.uint64s) == 0 { - o.uint64s = make([]uint64, uint64PoolSize) - } - o.uint64s[0] = x - *p = &o.uint64s[0] - o.uint64s = o.uint64s[1:] +func (p pointer) toBool() *bool { + return (*bool)(p.p) } - -func word64_IsNil(p word64) bool { - return *p == nil +func (p pointer) toBoolPtr() **bool { + return (**bool)(p.p) } - -func word64_Get(p word64) uint64 { - return **p +func (p pointer) toBoolSlice() *[]bool { + return (*[]bool)(p.p) +} +func (p pointer) toFloat64() *float64 { + return (*float64)(p.p) +} +func (p pointer) toFloat64Ptr() **float64 { + return (**float64)(p.p) +} +func (p pointer) toFloat64Slice() *[]float64 { + return (*[]float64)(p.p) +} +func (p pointer) toFloat32() *float32 { + return (*float32)(p.p) +} +func (p pointer) toFloat32Ptr() **float32 { + return (**float32)(p.p) +} +func (p pointer) toFloat32Slice() *[]float32 { + return (*[]float32)(p.p) +} +func (p pointer) toString() *string { + return (*string)(p.p) +} +func (p pointer) toStringPtr() **string { + return (**string)(p.p) +} +func (p pointer) toStringSlice() *[]string { + return (*[]string)(p.p) +} +func (p pointer) toBytes() *[]byte { + return (*[]byte)(p.p) +} +func (p pointer) toBytesSlice() *[][]byte { + return (*[][]byte)(p.p) +} +func (p pointer) toExtensions() *XXX_InternalExtensions { + return (*XXX_InternalExtensions)(p.p) +} +func (p pointer) toOldExtensions() *map[int32]Extension { + return (*map[int32]Extension)(p.p) } -func structPointer_Word64(p structPointer, f field) word64 { - return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +// getPointerSlice loads []*T from p as a []pointer. +// The value returned is aliased with the original slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) getPointerSlice() []pointer { + // Super-tricky - p should point to a []*T where T is a + // message type. We load it as []pointer. + return *(*[]pointer)(p.p) } -// word64Val is like word32Val but for 64-bit values. -type word64Val *uint64 +// setPointerSlice stores []pointer into p as a []*T. +// The value set is aliased with the input slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) setPointerSlice(v []pointer) { + // Super-tricky - p should point to a []*T where T is a + // message type. We store it as []pointer. + *(*[]pointer)(p.p) = v +} -func word64Val_Set(p word64Val, o *Buffer, x uint64) { - *p = x +// getPointer loads the pointer at p and returns it. +func (p pointer) getPointer() pointer { + return pointer{p: *(*unsafe.Pointer)(p.p)} } -func word64Val_Get(p word64Val) uint64 { - return *p +// setPointer stores the pointer q at p. +func (p pointer) setPointer(q pointer) { + *(*unsafe.Pointer)(p.p) = q.p } -func structPointer_Word64Val(p structPointer, f field) word64Val { - return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +// append q to the slice pointed to by p. +func (p pointer) appendPointer(q pointer) { + s := (*[]unsafe.Pointer)(p.p) + *s = append(*s, q.p) } -// word64Slice is like word32Slice but for 64-bit values. -type word64Slice []uint64 +// getInterfacePointer returns a pointer that points to the +// interface data of the interface pointed by p. +func (p pointer) getInterfacePointer() pointer { + // Super-tricky - read pointer out of data word of interface value. + return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]} +} -func (v *word64Slice) Append(x uint64) { *v = append(*v, x) } -func (v *word64Slice) Len() int { return len(*v) } -func (v *word64Slice) Index(i int) uint64 { return (*v)[i] } +// asPointerTo returns a reflect.Value that is a pointer to an +// object of type t stored at p. +func (p pointer) asPointerTo(t reflect.Type) reflect.Value { + return reflect.NewAt(t, p.p) +} -func structPointer_Word64Slice(p structPointer, f field) *word64Slice { - return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { + return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { + return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { + return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { + return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) } diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/proto/properties.go b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/proto/properties.go index ec2289c..1f272c0 100644 --- a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/proto/properties.go +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/proto/properties.go @@ -58,42 +58,6 @@ const ( WireFixed32 = 5 ) -const startSize = 10 // initial slice/string sizes - -// Encoders are defined in encode.go -// An encoder outputs the full representation of a field, including its -// tag and encoder type. -type encoder func(p *Buffer, prop *Properties, base structPointer) error - -// A valueEncoder encodes a single integer in a particular encoding. -type valueEncoder func(o *Buffer, x uint64) error - -// Sizers are defined in encode.go -// A sizer returns the encoded size of a field, including its tag and encoder -// type. -type sizer func(prop *Properties, base structPointer) int - -// A valueSizer returns the encoded size of a single integer in a particular -// encoding. -type valueSizer func(x uint64) int - -// Decoders are defined in decode.go -// A decoder creates a value from its wire representation. -// Unrecognized subelements are saved in unrec. -type decoder func(p *Buffer, prop *Properties, base structPointer) error - -// A valueDecoder decodes a single integer in a particular encoding. -type valueDecoder func(o *Buffer) (x uint64, err error) - -// A oneofMarshaler does the marshaling for all oneof fields in a message. -type oneofMarshaler func(Message, *Buffer) error - -// A oneofUnmarshaler does the unmarshaling for a oneof field in a message. -type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error) - -// A oneofSizer does the sizing for all oneof fields in a message. -type oneofSizer func(Message) int - // tagMap is an optimization over map[int]int for typical protocol buffer // use-cases. Encoded protocol buffers are often in tag order with small tag // numbers. @@ -140,13 +104,6 @@ type StructProperties struct { decoderTags tagMap // map from proto tag to struct field number decoderOrigNames map[string]int // map from original name to struct field number order []int // list of struct field numbers in tag order - unrecField field // field id of the XXX_unrecognized []byte field - extendable bool // is this an extendable proto - - oneofMarshaler oneofMarshaler - oneofUnmarshaler oneofUnmarshaler - oneofSizer oneofSizer - stype reflect.Type // OneofTypes contains information about the oneof fields in this message. // It is keyed by the original name of a field. @@ -182,41 +139,24 @@ type Properties struct { Repeated bool Packed bool // relevant for repeated primitives only Enum string // set for enum types only - proto3 bool // whether this is known to be a proto3 field; set for []byte only + proto3 bool // whether this is known to be a proto3 field oneof bool // whether this is a oneof field Default string // default value HasDefault bool // whether an explicit default was provided - def_uint64 uint64 - - enc encoder - valEnc valueEncoder // set for bool and numeric types only - field field - tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType) - tagbuf [8]byte - stype reflect.Type // set for struct types only - sprop *StructProperties // set for struct types only - isMarshaler bool - isUnmarshaler bool + + stype reflect.Type // set for struct types only + sprop *StructProperties // set for struct types only mtype reflect.Type // set for map types only mkeyprop *Properties // set for map types only mvalprop *Properties // set for map types only - - size sizer - valSize valueSizer // set for bool and numeric types only - - dec decoder - valDec valueDecoder // set for bool and numeric types only - - // If this is a packable field, this will be the decoder for the packed version of the field. - packedDec decoder } // String formats the properties in the protobuf struct field tag style. func (p *Properties) String() string { s := p.Wire - s = "," + s += "," s += strconv.Itoa(p.Tag) if p.Required { s += ",req" @@ -262,29 +202,14 @@ func (p *Properties) Parse(s string) { switch p.Wire { case "varint": p.WireType = WireVarint - p.valEnc = (*Buffer).EncodeVarint - p.valDec = (*Buffer).DecodeVarint - p.valSize = sizeVarint case "fixed32": p.WireType = WireFixed32 - p.valEnc = (*Buffer).EncodeFixed32 - p.valDec = (*Buffer).DecodeFixed32 - p.valSize = sizeFixed32 case "fixed64": p.WireType = WireFixed64 - p.valEnc = (*Buffer).EncodeFixed64 - p.valDec = (*Buffer).DecodeFixed64 - p.valSize = sizeFixed64 case "zigzag32": p.WireType = WireVarint - p.valEnc = (*Buffer).EncodeZigzag32 - p.valDec = (*Buffer).DecodeZigzag32 - p.valSize = sizeZigzag32 case "zigzag64": p.WireType = WireVarint - p.valEnc = (*Buffer).EncodeZigzag64 - p.valDec = (*Buffer).DecodeZigzag64 - p.valSize = sizeZigzag64 case "bytes", "group": p.WireType = WireBytes // no numeric converter for non-numeric types @@ -299,6 +224,7 @@ func (p *Properties) Parse(s string) { return } +outer: for i := 2; i < len(fields); i++ { f := fields[i] switch { @@ -326,229 +252,28 @@ func (p *Properties) Parse(s string) { if i+1 < len(fields) { // Commas aren't escaped, and def is always last. p.Default += "," + strings.Join(fields[i+1:], ",") - break + break outer } } } } -func logNoSliceEnc(t1, t2 reflect.Type) { - fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2) -} - var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() -// Initialize the fields for encoding and decoding. -func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { - p.enc = nil - p.dec = nil - p.size = nil - +// setFieldProps initializes the field properties for submessages and maps. +func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { switch t1 := typ; t1.Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1) - - // proto3 scalar types - - case reflect.Bool: - p.enc = (*Buffer).enc_proto3_bool - p.dec = (*Buffer).dec_proto3_bool - p.size = size_proto3_bool - case reflect.Int32: - p.enc = (*Buffer).enc_proto3_int32 - p.dec = (*Buffer).dec_proto3_int32 - p.size = size_proto3_int32 - case reflect.Uint32: - p.enc = (*Buffer).enc_proto3_uint32 - p.dec = (*Buffer).dec_proto3_int32 // can reuse - p.size = size_proto3_uint32 - case reflect.Int64, reflect.Uint64: - p.enc = (*Buffer).enc_proto3_int64 - p.dec = (*Buffer).dec_proto3_int64 - p.size = size_proto3_int64 - case reflect.Float32: - p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits - p.dec = (*Buffer).dec_proto3_int32 - p.size = size_proto3_uint32 - case reflect.Float64: - p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits - p.dec = (*Buffer).dec_proto3_int64 - p.size = size_proto3_int64 - case reflect.String: - p.enc = (*Buffer).enc_proto3_string - p.dec = (*Buffer).dec_proto3_string - p.size = size_proto3_string - case reflect.Ptr: - switch t2 := t1.Elem(); t2.Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2) - break - case reflect.Bool: - p.enc = (*Buffer).enc_bool - p.dec = (*Buffer).dec_bool - p.size = size_bool - case reflect.Int32: - p.enc = (*Buffer).enc_int32 - p.dec = (*Buffer).dec_int32 - p.size = size_int32 - case reflect.Uint32: - p.enc = (*Buffer).enc_uint32 - p.dec = (*Buffer).dec_int32 // can reuse - p.size = size_uint32 - case reflect.Int64, reflect.Uint64: - p.enc = (*Buffer).enc_int64 - p.dec = (*Buffer).dec_int64 - p.size = size_int64 - case reflect.Float32: - p.enc = (*Buffer).enc_uint32 // can just treat them as bits - p.dec = (*Buffer).dec_int32 - p.size = size_uint32 - case reflect.Float64: - p.enc = (*Buffer).enc_int64 // can just treat them as bits - p.dec = (*Buffer).dec_int64 - p.size = size_int64 - case reflect.String: - p.enc = (*Buffer).enc_string - p.dec = (*Buffer).dec_string - p.size = size_string - case reflect.Struct: + if t1.Elem().Kind() == reflect.Struct { p.stype = t1.Elem() - p.isMarshaler = isMarshaler(t1) - p.isUnmarshaler = isUnmarshaler(t1) - if p.Wire == "bytes" { - p.enc = (*Buffer).enc_struct_message - p.dec = (*Buffer).dec_struct_message - p.size = size_struct_message - } else { - p.enc = (*Buffer).enc_struct_group - p.dec = (*Buffer).dec_struct_group - p.size = size_struct_group - } } case reflect.Slice: - switch t2 := t1.Elem(); t2.Kind() { - default: - logNoSliceEnc(t1, t2) - break - case reflect.Bool: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_bool - p.size = size_slice_packed_bool - } else { - p.enc = (*Buffer).enc_slice_bool - p.size = size_slice_bool - } - p.dec = (*Buffer).dec_slice_bool - p.packedDec = (*Buffer).dec_slice_packed_bool - case reflect.Int32: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_int32 - p.size = size_slice_packed_int32 - } else { - p.enc = (*Buffer).enc_slice_int32 - p.size = size_slice_int32 - } - p.dec = (*Buffer).dec_slice_int32 - p.packedDec = (*Buffer).dec_slice_packed_int32 - case reflect.Uint32: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_uint32 - p.size = size_slice_packed_uint32 - } else { - p.enc = (*Buffer).enc_slice_uint32 - p.size = size_slice_uint32 - } - p.dec = (*Buffer).dec_slice_int32 - p.packedDec = (*Buffer).dec_slice_packed_int32 - case reflect.Int64, reflect.Uint64: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_int64 - p.size = size_slice_packed_int64 - } else { - p.enc = (*Buffer).enc_slice_int64 - p.size = size_slice_int64 - } - p.dec = (*Buffer).dec_slice_int64 - p.packedDec = (*Buffer).dec_slice_packed_int64 - case reflect.Uint8: - p.dec = (*Buffer).dec_slice_byte - if p.proto3 { - p.enc = (*Buffer).enc_proto3_slice_byte - p.size = size_proto3_slice_byte - } else { - p.enc = (*Buffer).enc_slice_byte - p.size = size_slice_byte - } - case reflect.Float32, reflect.Float64: - switch t2.Bits() { - case 32: - // can just treat them as bits - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_uint32 - p.size = size_slice_packed_uint32 - } else { - p.enc = (*Buffer).enc_slice_uint32 - p.size = size_slice_uint32 - } - p.dec = (*Buffer).dec_slice_int32 - p.packedDec = (*Buffer).dec_slice_packed_int32 - case 64: - // can just treat them as bits - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_int64 - p.size = size_slice_packed_int64 - } else { - p.enc = (*Buffer).enc_slice_int64 - p.size = size_slice_int64 - } - p.dec = (*Buffer).dec_slice_int64 - p.packedDec = (*Buffer).dec_slice_packed_int64 - default: - logNoSliceEnc(t1, t2) - break - } - case reflect.String: - p.enc = (*Buffer).enc_slice_string - p.dec = (*Buffer).dec_slice_string - p.size = size_slice_string - case reflect.Ptr: - switch t3 := t2.Elem(); t3.Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3) - break - case reflect.Struct: - p.stype = t2.Elem() - p.isMarshaler = isMarshaler(t2) - p.isUnmarshaler = isUnmarshaler(t2) - if p.Wire == "bytes" { - p.enc = (*Buffer).enc_slice_struct_message - p.dec = (*Buffer).dec_slice_struct_message - p.size = size_slice_struct_message - } else { - p.enc = (*Buffer).enc_slice_struct_group - p.dec = (*Buffer).dec_slice_struct_group - p.size = size_slice_struct_group - } - } - case reflect.Slice: - switch t2.Elem().Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem()) - break - case reflect.Uint8: - p.enc = (*Buffer).enc_slice_slice_byte - p.dec = (*Buffer).dec_slice_slice_byte - p.size = size_slice_slice_byte - } + if t2 := t1.Elem(); t2.Kind() == reflect.Ptr && t2.Elem().Kind() == reflect.Struct { + p.stype = t2.Elem() } case reflect.Map: - p.enc = (*Buffer).enc_new_map - p.dec = (*Buffer).dec_new_map - p.size = size_new_map - p.mtype = t1 p.mkeyprop = &Properties{} p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) @@ -562,20 +287,6 @@ func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lock p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) } - // precalculate tag code - wire := p.WireType - if p.Packed { - wire = WireBytes - } - x := uint32(p.Tag)<<3 | uint32(wire) - i := 0 - for i = 0; x > 127; i++ { - p.tagbuf[i] = 0x80 | uint8(x&0x7F) - x >>= 7 - } - p.tagbuf[i] = uint8(x) - p.tagcode = p.tagbuf[0 : i+1] - if p.stype != nil { if lockGetProp { p.sprop = GetProperties(p.stype) @@ -586,32 +297,9 @@ func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lock } var ( - marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() - unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() + marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() ) -// isMarshaler reports whether type t implements Marshaler. -func isMarshaler(t reflect.Type) bool { - // We're checking for (likely) pointer-receiver methods - // so if t is not a pointer, something is very wrong. - // The calls above only invoke isMarshaler on pointer types. - if t.Kind() != reflect.Ptr { - panic("proto: misuse of isMarshaler") - } - return t.Implements(marshalerType) -} - -// isUnmarshaler reports whether type t implements Unmarshaler. -func isUnmarshaler(t reflect.Type) bool { - // We're checking for (likely) pointer-receiver methods - // so if t is not a pointer, something is very wrong. - // The calls above only invoke isUnmarshaler on pointer types. - if t.Kind() != reflect.Ptr { - panic("proto: misuse of isUnmarshaler") - } - return t.Implements(unmarshalerType) -} - // Init populates the properties from a protocol buffer struct tag. func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { p.init(typ, name, tag, f, true) @@ -621,14 +309,11 @@ func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructF // "bytes,49,opt,def=hello!" p.Name = name p.OrigName = name - if f != nil { - p.field = toField(f) - } if tag == "" { return } p.Parse(tag) - p.setEncAndDec(typ, f, lockGetProp) + p.setFieldProps(typ, f, lockGetProp) } var ( @@ -678,9 +363,6 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { propertiesMap[t] = prop // build properties - prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) || - reflect.PtrTo(t).Implements(extendableProtoV1Type) - prop.unrecField = invalidField prop.Prop = make([]*Properties, t.NumField()) prop.order = make([]int, t.NumField()) @@ -690,17 +372,6 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { name := f.Name p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) - if f.Name == "XXX_InternalExtensions" { // special case - p.enc = (*Buffer).enc_exts - p.dec = nil // not needed - p.size = size_exts - } else if f.Name == "XXX_extensions" { // special case - p.enc = (*Buffer).enc_map - p.dec = nil // not needed - p.size = size_map - } else if f.Name == "XXX_unrecognized" { // special case - prop.unrecField = toField(&f) - } oneof := f.Tag.Get("protobuf_oneof") // special case if oneof != "" { // Oneof fields don't use the traditional protobuf tag. @@ -715,9 +386,6 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { } print("\n") } - if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && oneof == "" { - fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]") - } } // Re-order prop.order. @@ -728,8 +396,7 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { } if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok { var oots []interface{} - prop.oneofMarshaler, prop.oneofUnmarshaler, prop.oneofSizer, oots = om.XXX_OneofFuncs() - prop.stype = t + _, _, _, oots = om.XXX_OneofFuncs() // Interpret oneof metadata. prop.OneofTypes = make(map[string]*OneofProperties) @@ -779,30 +446,6 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { return prop } -// Return the Properties object for the x[0]'th field of the structure. -func propByIndex(t reflect.Type, x []int) *Properties { - if len(x) != 1 { - fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t) - return nil - } - prop := GetProperties(t) - return prop.Prop[x[0]] -} - -// Get the address and type of a pointer to a struct from an interface. -func getbase(pb Message) (t reflect.Type, b structPointer, err error) { - if pb == nil { - err = ErrNil - return - } - // get the reflect type of the pointer to the struct. - t = reflect.TypeOf(pb) - // get the address of the struct. - value := reflect.ValueOf(pb) - b = toStructPointer(value) - return -} - // A global registry of enum types. // The generated code will register the generated maps by calling RegisterEnum. @@ -826,20 +469,42 @@ func EnumValueMap(enumType string) map[string]int32 { // A registry of all linked message types. // The string is a fully-qualified proto name ("pkg.Message"). var ( - protoTypes = make(map[string]reflect.Type) - revProtoTypes = make(map[reflect.Type]string) + protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers + protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types + revProtoTypes = make(map[reflect.Type]string) ) // RegisterType is called from generated code and maps from the fully qualified // proto name to the type (pointer to struct) of the protocol buffer. func RegisterType(x Message, name string) { - if _, ok := protoTypes[name]; ok { + if _, ok := protoTypedNils[name]; ok { // TODO: Some day, make this a panic. log.Printf("proto: duplicate proto type registered: %s", name) return } t := reflect.TypeOf(x) - protoTypes[name] = t + if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 { + // Generated code always calls RegisterType with nil x. + // This check is just for extra safety. + protoTypedNils[name] = x + } else { + protoTypedNils[name] = reflect.Zero(t).Interface().(Message) + } + revProtoTypes[t] = name +} + +// RegisterMapType is called from generated code and maps from the fully qualified +// proto name to the native map type of the proto map definition. +func RegisterMapType(x interface{}, name string) { + if reflect.TypeOf(x).Kind() != reflect.Map { + panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name)) + } + if _, ok := protoMapTypes[name]; ok { + log.Printf("proto: duplicate proto type registered: %s", name) + return + } + t := reflect.TypeOf(x) + protoMapTypes[name] = t revProtoTypes[t] = name } @@ -855,7 +520,14 @@ func MessageName(x Message) string { } // MessageType returns the message type (pointer to struct) for a named message. -func MessageType(name string) reflect.Type { return protoTypes[name] } +// The type is not guaranteed to implement proto.Message if the name refers to a +// map entry. +func MessageType(name string) reflect.Type { + if t, ok := protoTypedNils[name]; ok { + return reflect.TypeOf(t) + } + return protoMapTypes[name] +} // A registry of all linked proto files. var ( diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/proto/proto3_proto/proto3.pb.go b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/proto/proto3_proto/proto3.pb.go index cc4d048..7a7d4e9 100644 --- a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/proto/proto3_proto/proto3.pb.go +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/proto/proto3_proto/proto3.pb.go @@ -1,27 +1,13 @@ -// Code generated by protoc-gen-go. +// Code generated by protoc-gen-go. DO NOT EDIT. // source: proto3_proto/proto3.proto -// DO NOT EDIT! -/* -Package proto3_proto is a generated protocol buffer package. - -It is generated from these files: - proto3_proto/proto3.proto - -It has these top-level messages: - Message - Nested - MessageWithMap - IntMap - IntMaps -*/ package proto3_proto import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" -import google_protobuf "github.com/golang/protobuf/ptypes/any" -import testdata "github.com/golang/protobuf/proto/testdata" +import test_proto "github.com/golang/protobuf/proto/test_proto" +import any "github.com/golang/protobuf/ptypes/any" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -59,33 +45,58 @@ var Message_Humour_value = map[string]int32{ func (x Message_Humour) String() string { return proto.EnumName(Message_Humour_name, int32(x)) } -func (Message_Humour) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 0} } +func (Message_Humour) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_proto3_78ae00cd7e6e5e35, []int{0, 0} +} type Message struct { - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Hilarity Message_Humour `protobuf:"varint,2,opt,name=hilarity,enum=proto3_proto.Message_Humour" json:"hilarity,omitempty"` - HeightInCm uint32 `protobuf:"varint,3,opt,name=height_in_cm,json=heightInCm" json:"height_in_cm,omitempty"` - Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"` - ResultCount int64 `protobuf:"varint,7,opt,name=result_count,json=resultCount" json:"result_count,omitempty"` - TrueScotsman bool `protobuf:"varint,8,opt,name=true_scotsman,json=trueScotsman" json:"true_scotsman,omitempty"` - Score float32 `protobuf:"fixed32,9,opt,name=score" json:"score,omitempty"` - Key []uint64 `protobuf:"varint,5,rep,packed,name=key" json:"key,omitempty"` - ShortKey []int32 `protobuf:"varint,19,rep,packed,name=short_key,json=shortKey" json:"short_key,omitempty"` - Nested *Nested `protobuf:"bytes,6,opt,name=nested" json:"nested,omitempty"` - RFunny []Message_Humour `protobuf:"varint,16,rep,packed,name=r_funny,json=rFunny,enum=proto3_proto.Message_Humour" json:"r_funny,omitempty"` - Terrain map[string]*Nested `protobuf:"bytes,10,rep,name=terrain" json:"terrain,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - Proto2Field *testdata.SubDefaults `protobuf:"bytes,11,opt,name=proto2_field,json=proto2Field" json:"proto2_field,omitempty"` - Proto2Value map[string]*testdata.SubDefaults `protobuf:"bytes,13,rep,name=proto2_value,json=proto2Value" json:"proto2_value,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - Anything *google_protobuf.Any `protobuf:"bytes,14,opt,name=anything" json:"anything,omitempty"` - ManyThings []*google_protobuf.Any `protobuf:"bytes,15,rep,name=many_things,json=manyThings" json:"many_things,omitempty"` - Submessage *Message `protobuf:"bytes,17,opt,name=submessage" json:"submessage,omitempty"` - Children []*Message `protobuf:"bytes,18,rep,name=children" json:"children,omitempty"` -} - -func (m *Message) Reset() { *m = Message{} } -func (m *Message) String() string { return proto.CompactTextString(m) } -func (*Message) ProtoMessage() {} -func (*Message) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Hilarity Message_Humour `protobuf:"varint,2,opt,name=hilarity,proto3,enum=proto3_proto.Message_Humour" json:"hilarity,omitempty"` + HeightInCm uint32 `protobuf:"varint,3,opt,name=height_in_cm,json=heightInCm,proto3" json:"height_in_cm,omitempty"` + Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"` + ResultCount int64 `protobuf:"varint,7,opt,name=result_count,json=resultCount,proto3" json:"result_count,omitempty"` + TrueScotsman bool `protobuf:"varint,8,opt,name=true_scotsman,json=trueScotsman,proto3" json:"true_scotsman,omitempty"` + Score float32 `protobuf:"fixed32,9,opt,name=score,proto3" json:"score,omitempty"` + Key []uint64 `protobuf:"varint,5,rep,packed,name=key,proto3" json:"key,omitempty"` + ShortKey []int32 `protobuf:"varint,19,rep,packed,name=short_key,json=shortKey,proto3" json:"short_key,omitempty"` + Nested *Nested `protobuf:"bytes,6,opt,name=nested,proto3" json:"nested,omitempty"` + RFunny []Message_Humour `protobuf:"varint,16,rep,packed,name=r_funny,json=rFunny,proto3,enum=proto3_proto.Message_Humour" json:"r_funny,omitempty"` + Terrain map[string]*Nested `protobuf:"bytes,10,rep,name=terrain,proto3" json:"terrain,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Proto2Field *test_proto.SubDefaults `protobuf:"bytes,11,opt,name=proto2_field,json=proto2Field,proto3" json:"proto2_field,omitempty"` + Proto2Value map[string]*test_proto.SubDefaults `protobuf:"bytes,13,rep,name=proto2_value,json=proto2Value,proto3" json:"proto2_value,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Anything *any.Any `protobuf:"bytes,14,opt,name=anything,proto3" json:"anything,omitempty"` + ManyThings []*any.Any `protobuf:"bytes,15,rep,name=many_things,json=manyThings,proto3" json:"many_things,omitempty"` + Submessage *Message `protobuf:"bytes,17,opt,name=submessage,proto3" json:"submessage,omitempty"` + Children []*Message `protobuf:"bytes,18,rep,name=children,proto3" json:"children,omitempty"` + StringMap map[string]string `protobuf:"bytes,20,rep,name=string_map,json=stringMap,proto3" json:"string_map,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Message) Reset() { *m = Message{} } +func (m *Message) String() string { return proto.CompactTextString(m) } +func (*Message) ProtoMessage() {} +func (*Message) Descriptor() ([]byte, []int) { + return fileDescriptor_proto3_78ae00cd7e6e5e35, []int{0} +} +func (m *Message) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Message.Unmarshal(m, b) +} +func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Message.Marshal(b, m, deterministic) +} +func (dst *Message) XXX_Merge(src proto.Message) { + xxx_messageInfo_Message.Merge(dst, src) +} +func (m *Message) XXX_Size() int { + return xxx_messageInfo_Message.Size(m) +} +func (m *Message) XXX_DiscardUnknown() { + xxx_messageInfo_Message.DiscardUnknown(m) +} + +var xxx_messageInfo_Message proto.InternalMessageInfo func (m *Message) GetName() string { if m != nil { @@ -171,28 +182,28 @@ func (m *Message) GetTerrain() map[string]*Nested { return nil } -func (m *Message) GetProto2Field() *testdata.SubDefaults { +func (m *Message) GetProto2Field() *test_proto.SubDefaults { if m != nil { return m.Proto2Field } return nil } -func (m *Message) GetProto2Value() map[string]*testdata.SubDefaults { +func (m *Message) GetProto2Value() map[string]*test_proto.SubDefaults { if m != nil { return m.Proto2Value } return nil } -func (m *Message) GetAnything() *google_protobuf.Any { +func (m *Message) GetAnything() *any.Any { if m != nil { return m.Anything } return nil } -func (m *Message) GetManyThings() []*google_protobuf.Any { +func (m *Message) GetManyThings() []*any.Any { if m != nil { return m.ManyThings } @@ -213,15 +224,44 @@ func (m *Message) GetChildren() []*Message { return nil } +func (m *Message) GetStringMap() map[string]string { + if m != nil { + return m.StringMap + } + return nil +} + type Nested struct { - Bunny string `protobuf:"bytes,1,opt,name=bunny" json:"bunny,omitempty"` - Cute bool `protobuf:"varint,2,opt,name=cute" json:"cute,omitempty"` + Bunny string `protobuf:"bytes,1,opt,name=bunny,proto3" json:"bunny,omitempty"` + Cute bool `protobuf:"varint,2,opt,name=cute,proto3" json:"cute,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Nested) Reset() { *m = Nested{} } +func (m *Nested) String() string { return proto.CompactTextString(m) } +func (*Nested) ProtoMessage() {} +func (*Nested) Descriptor() ([]byte, []int) { + return fileDescriptor_proto3_78ae00cd7e6e5e35, []int{1} +} +func (m *Nested) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Nested.Unmarshal(m, b) +} +func (m *Nested) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Nested.Marshal(b, m, deterministic) +} +func (dst *Nested) XXX_Merge(src proto.Message) { + xxx_messageInfo_Nested.Merge(dst, src) +} +func (m *Nested) XXX_Size() int { + return xxx_messageInfo_Nested.Size(m) +} +func (m *Nested) XXX_DiscardUnknown() { + xxx_messageInfo_Nested.DiscardUnknown(m) } -func (m *Nested) Reset() { *m = Nested{} } -func (m *Nested) String() string { return proto.CompactTextString(m) } -func (*Nested) ProtoMessage() {} -func (*Nested) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } +var xxx_messageInfo_Nested proto.InternalMessageInfo func (m *Nested) GetBunny() string { if m != nil { @@ -238,13 +278,35 @@ func (m *Nested) GetCute() bool { } type MessageWithMap struct { - ByteMapping map[bool][]byte `protobuf:"bytes,1,rep,name=byte_mapping,json=byteMapping" json:"byte_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value,proto3"` + ByteMapping map[bool][]byte `protobuf:"bytes,1,rep,name=byte_mapping,json=byteMapping,proto3" json:"byte_mapping,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MessageWithMap) Reset() { *m = MessageWithMap{} } +func (m *MessageWithMap) String() string { return proto.CompactTextString(m) } +func (*MessageWithMap) ProtoMessage() {} +func (*MessageWithMap) Descriptor() ([]byte, []int) { + return fileDescriptor_proto3_78ae00cd7e6e5e35, []int{2} +} +func (m *MessageWithMap) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MessageWithMap.Unmarshal(m, b) +} +func (m *MessageWithMap) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MessageWithMap.Marshal(b, m, deterministic) +} +func (dst *MessageWithMap) XXX_Merge(src proto.Message) { + xxx_messageInfo_MessageWithMap.Merge(dst, src) +} +func (m *MessageWithMap) XXX_Size() int { + return xxx_messageInfo_MessageWithMap.Size(m) +} +func (m *MessageWithMap) XXX_DiscardUnknown() { + xxx_messageInfo_MessageWithMap.DiscardUnknown(m) } -func (m *MessageWithMap) Reset() { *m = MessageWithMap{} } -func (m *MessageWithMap) String() string { return proto.CompactTextString(m) } -func (*MessageWithMap) ProtoMessage() {} -func (*MessageWithMap) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } +var xxx_messageInfo_MessageWithMap proto.InternalMessageInfo func (m *MessageWithMap) GetByteMapping() map[bool][]byte { if m != nil { @@ -254,13 +316,35 @@ func (m *MessageWithMap) GetByteMapping() map[bool][]byte { } type IntMap struct { - Rtt map[int32]int32 `protobuf:"bytes,1,rep,name=rtt" json:"rtt,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` + Rtt map[int32]int32 `protobuf:"bytes,1,rep,name=rtt,proto3" json:"rtt,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *IntMap) Reset() { *m = IntMap{} } -func (m *IntMap) String() string { return proto.CompactTextString(m) } -func (*IntMap) ProtoMessage() {} -func (*IntMap) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } +func (m *IntMap) Reset() { *m = IntMap{} } +func (m *IntMap) String() string { return proto.CompactTextString(m) } +func (*IntMap) ProtoMessage() {} +func (*IntMap) Descriptor() ([]byte, []int) { + return fileDescriptor_proto3_78ae00cd7e6e5e35, []int{3} +} +func (m *IntMap) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_IntMap.Unmarshal(m, b) +} +func (m *IntMap) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_IntMap.Marshal(b, m, deterministic) +} +func (dst *IntMap) XXX_Merge(src proto.Message) { + xxx_messageInfo_IntMap.Merge(dst, src) +} +func (m *IntMap) XXX_Size() int { + return xxx_messageInfo_IntMap.Size(m) +} +func (m *IntMap) XXX_DiscardUnknown() { + xxx_messageInfo_IntMap.DiscardUnknown(m) +} + +var xxx_messageInfo_IntMap proto.InternalMessageInfo func (m *IntMap) GetRtt() map[int32]int32 { if m != nil { @@ -270,13 +354,35 @@ func (m *IntMap) GetRtt() map[int32]int32 { } type IntMaps struct { - Maps []*IntMap `protobuf:"bytes,1,rep,name=maps" json:"maps,omitempty"` + Maps []*IntMap `protobuf:"bytes,1,rep,name=maps,proto3" json:"maps,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *IntMaps) Reset() { *m = IntMaps{} } +func (m *IntMaps) String() string { return proto.CompactTextString(m) } +func (*IntMaps) ProtoMessage() {} +func (*IntMaps) Descriptor() ([]byte, []int) { + return fileDescriptor_proto3_78ae00cd7e6e5e35, []int{4} +} +func (m *IntMaps) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_IntMaps.Unmarshal(m, b) +} +func (m *IntMaps) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_IntMaps.Marshal(b, m, deterministic) +} +func (dst *IntMaps) XXX_Merge(src proto.Message) { + xxx_messageInfo_IntMaps.Merge(dst, src) +} +func (m *IntMaps) XXX_Size() int { + return xxx_messageInfo_IntMaps.Size(m) +} +func (m *IntMaps) XXX_DiscardUnknown() { + xxx_messageInfo_IntMaps.DiscardUnknown(m) } -func (m *IntMaps) Reset() { *m = IntMaps{} } -func (m *IntMaps) String() string { return proto.CompactTextString(m) } -func (*IntMaps) ProtoMessage() {} -func (*IntMaps) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } +var xxx_messageInfo_IntMaps proto.InternalMessageInfo func (m *IntMaps) GetMaps() []*IntMap { if m != nil { @@ -285,63 +391,221 @@ func (m *IntMaps) GetMaps() []*IntMap { return nil } +type TestUTF8 struct { + Scalar string `protobuf:"bytes,1,opt,name=scalar,proto3" json:"scalar,omitempty"` + Vector []string `protobuf:"bytes,2,rep,name=vector,proto3" json:"vector,omitempty"` + // Types that are valid to be assigned to Oneof: + // *TestUTF8_Field + Oneof isTestUTF8_Oneof `protobuf_oneof:"oneof"` + MapKey map[string]int64 `protobuf:"bytes,4,rep,name=map_key,json=mapKey,proto3" json:"map_key,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` + MapValue map[int64]string `protobuf:"bytes,5,rep,name=map_value,json=mapValue,proto3" json:"map_value,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TestUTF8) Reset() { *m = TestUTF8{} } +func (m *TestUTF8) String() string { return proto.CompactTextString(m) } +func (*TestUTF8) ProtoMessage() {} +func (*TestUTF8) Descriptor() ([]byte, []int) { + return fileDescriptor_proto3_78ae00cd7e6e5e35, []int{5} +} +func (m *TestUTF8) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TestUTF8.Unmarshal(m, b) +} +func (m *TestUTF8) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TestUTF8.Marshal(b, m, deterministic) +} +func (dst *TestUTF8) XXX_Merge(src proto.Message) { + xxx_messageInfo_TestUTF8.Merge(dst, src) +} +func (m *TestUTF8) XXX_Size() int { + return xxx_messageInfo_TestUTF8.Size(m) +} +func (m *TestUTF8) XXX_DiscardUnknown() { + xxx_messageInfo_TestUTF8.DiscardUnknown(m) +} + +var xxx_messageInfo_TestUTF8 proto.InternalMessageInfo + +type isTestUTF8_Oneof interface { + isTestUTF8_Oneof() +} + +type TestUTF8_Field struct { + Field string `protobuf:"bytes,3,opt,name=field,proto3,oneof"` +} + +func (*TestUTF8_Field) isTestUTF8_Oneof() {} + +func (m *TestUTF8) GetOneof() isTestUTF8_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (m *TestUTF8) GetScalar() string { + if m != nil { + return m.Scalar + } + return "" +} + +func (m *TestUTF8) GetVector() []string { + if m != nil { + return m.Vector + } + return nil +} + +func (m *TestUTF8) GetField() string { + if x, ok := m.GetOneof().(*TestUTF8_Field); ok { + return x.Field + } + return "" +} + +func (m *TestUTF8) GetMapKey() map[string]int64 { + if m != nil { + return m.MapKey + } + return nil +} + +func (m *TestUTF8) GetMapValue() map[int64]string { + if m != nil { + return m.MapValue + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*TestUTF8) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _TestUTF8_OneofMarshaler, _TestUTF8_OneofUnmarshaler, _TestUTF8_OneofSizer, []interface{}{ + (*TestUTF8_Field)(nil), + } +} + +func _TestUTF8_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*TestUTF8) + // oneof + switch x := m.Oneof.(type) { + case *TestUTF8_Field: + b.EncodeVarint(3<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Field) + case nil: + default: + return fmt.Errorf("TestUTF8.Oneof has unexpected type %T", x) + } + return nil +} + +func _TestUTF8_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*TestUTF8) + switch tag { + case 3: // oneof.field + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Oneof = &TestUTF8_Field{x} + return true, err + default: + return false, nil + } +} + +func _TestUTF8_OneofSizer(msg proto.Message) (n int) { + m := msg.(*TestUTF8) + // oneof + switch x := m.Oneof.(type) { + case *TestUTF8_Field: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(len(x.Field))) + n += len(x.Field) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + func init() { proto.RegisterType((*Message)(nil), "proto3_proto.Message") + proto.RegisterMapType((map[string]*test_proto.SubDefaults)(nil), "proto3_proto.Message.Proto2ValueEntry") + proto.RegisterMapType((map[string]string)(nil), "proto3_proto.Message.StringMapEntry") + proto.RegisterMapType((map[string]*Nested)(nil), "proto3_proto.Message.TerrainEntry") proto.RegisterType((*Nested)(nil), "proto3_proto.Nested") proto.RegisterType((*MessageWithMap)(nil), "proto3_proto.MessageWithMap") + proto.RegisterMapType((map[bool][]byte)(nil), "proto3_proto.MessageWithMap.ByteMappingEntry") proto.RegisterType((*IntMap)(nil), "proto3_proto.IntMap") + proto.RegisterMapType((map[int32]int32)(nil), "proto3_proto.IntMap.RttEntry") proto.RegisterType((*IntMaps)(nil), "proto3_proto.IntMaps") + proto.RegisterType((*TestUTF8)(nil), "proto3_proto.TestUTF8") + proto.RegisterMapType((map[string]int64)(nil), "proto3_proto.TestUTF8.MapKeyEntry") + proto.RegisterMapType((map[int64]string)(nil), "proto3_proto.TestUTF8.MapValueEntry") proto.RegisterEnum("proto3_proto.Message_Humour", Message_Humour_name, Message_Humour_value) } -func init() { proto.RegisterFile("proto3_proto/proto3.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 733 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x84, 0x53, 0x6d, 0x6f, 0xf3, 0x34, - 0x14, 0x25, 0x4d, 0x5f, 0xd2, 0x9b, 0x74, 0x0b, 0x5e, 0x91, 0xbc, 0x02, 0x52, 0x28, 0x12, 0x8a, - 0x78, 0x49, 0xa1, 0xd3, 0xd0, 0x84, 0x10, 0x68, 0x1b, 0x9b, 0xa8, 0xd6, 0x95, 0xca, 0xdd, 0x98, - 0xf8, 0x14, 0xa5, 0xad, 0xdb, 0x46, 0x34, 0x4e, 0x49, 0x1c, 0xa4, 0xfc, 0x1d, 0xfe, 0x28, 0x8f, - 0x6c, 0xa7, 0x5d, 0x36, 0x65, 0xcf, 0xf3, 0x29, 0xf6, 0xf1, 0xb9, 0xf7, 0x9c, 0x1c, 0x5f, 0xc3, - 0xe9, 0x2e, 0x89, 0x79, 0x7c, 0xe6, 0xcb, 0xcf, 0x40, 0x6d, 0x3c, 0xf9, 0x41, 0x56, 0xf9, 0xa8, - 0x77, 0xba, 0x8e, 0xe3, 0xf5, 0x96, 0x2a, 0xca, 0x3c, 0x5b, 0x0d, 0x02, 0x96, 0x2b, 0x62, 0xef, - 0x84, 0xd3, 0x94, 0x2f, 0x03, 0x1e, 0x0c, 0xc4, 0x42, 0x81, 0xfd, 0xff, 0x5b, 0xd0, 0xba, 0xa7, - 0x69, 0x1a, 0xac, 0x29, 0x42, 0x50, 0x67, 0x41, 0x44, 0xb1, 0xe6, 0x68, 0x6e, 0x9b, 0xc8, 0x35, - 0xba, 0x00, 0x63, 0x13, 0x6e, 0x83, 0x24, 0xe4, 0x39, 0xae, 0x39, 0x9a, 0x7b, 0x34, 0xfc, 0xcc, - 0x2b, 0x0b, 0x7a, 0x45, 0xb1, 0xf7, 0x7b, 0x16, 0xc5, 0x59, 0x42, 0x0e, 0x6c, 0xe4, 0x80, 0xb5, - 0xa1, 0xe1, 0x7a, 0xc3, 0xfd, 0x90, 0xf9, 0x8b, 0x08, 0xeb, 0x8e, 0xe6, 0x76, 0x08, 0x28, 0x6c, - 0xc4, 0xae, 0x23, 0xa1, 0x27, 0xec, 0xe0, 0xba, 0xa3, 0xb9, 0x16, 0x91, 0x6b, 0xf4, 0x05, 0x58, - 0x09, 0x4d, 0xb3, 0x2d, 0xf7, 0x17, 0x71, 0xc6, 0x38, 0x6e, 0x39, 0x9a, 0xab, 0x13, 0x53, 0x61, - 0xd7, 0x02, 0x42, 0x5f, 0x42, 0x87, 0x27, 0x19, 0xf5, 0xd3, 0x45, 0xcc, 0xd3, 0x28, 0x60, 0xd8, - 0x70, 0x34, 0xd7, 0x20, 0x96, 0x00, 0x67, 0x05, 0x86, 0xba, 0xd0, 0x48, 0x17, 0x71, 0x42, 0x71, - 0xdb, 0xd1, 0xdc, 0x1a, 0x51, 0x1b, 0x64, 0x83, 0xfe, 0x37, 0xcd, 0x71, 0xc3, 0xd1, 0xdd, 0x3a, - 0x11, 0x4b, 0xf4, 0x29, 0xb4, 0xd3, 0x4d, 0x9c, 0x70, 0x5f, 0xe0, 0x27, 0x8e, 0xee, 0x36, 0x88, - 0x21, 0x81, 0x3b, 0x9a, 0xa3, 0x6f, 0xa1, 0xc9, 0x68, 0xca, 0xe9, 0x12, 0x37, 0x1d, 0xcd, 0x35, - 0x87, 0xdd, 0x97, 0xbf, 0x3e, 0x91, 0x67, 0xa4, 0xe0, 0xa0, 0x73, 0x68, 0x25, 0xfe, 0x2a, 0x63, - 0x2c, 0xc7, 0xb6, 0xa3, 0x7f, 0x30, 0xa9, 0x66, 0x72, 0x2b, 0xb8, 0xe8, 0x67, 0x68, 0x71, 0x9a, - 0x24, 0x41, 0xc8, 0x30, 0x38, 0xba, 0x6b, 0x0e, 0xfb, 0xd5, 0x65, 0x0f, 0x8a, 0x74, 0xc3, 0x78, - 0x92, 0x93, 0x7d, 0x09, 0xba, 0x00, 0x75, 0xff, 0x43, 0x7f, 0x15, 0xd2, 0xed, 0x12, 0x9b, 0xd2, - 0xe8, 0x27, 0xde, 0xfe, 0xae, 0xbd, 0x59, 0x36, 0xff, 0x8d, 0xae, 0x82, 0x6c, 0xcb, 0x53, 0x62, - 0x2a, 0xea, 0xad, 0x60, 0xa2, 0xd1, 0xa1, 0xf2, 0xdf, 0x60, 0x9b, 0x51, 0xdc, 0x91, 0xe2, 0x5f, - 0x55, 0x8b, 0x4f, 0x25, 0xf3, 0x4f, 0x41, 0x54, 0x06, 0x8a, 0x56, 0x12, 0x41, 0xdf, 0x83, 0x11, - 0xb0, 0x9c, 0x6f, 0x42, 0xb6, 0xc6, 0x47, 0x45, 0x52, 0x6a, 0x0e, 0xbd, 0xfd, 0x1c, 0x7a, 0x97, - 0x2c, 0x27, 0x07, 0x16, 0x3a, 0x07, 0x33, 0x0a, 0x58, 0xee, 0xcb, 0x5d, 0x8a, 0x8f, 0xa5, 0x76, - 0x75, 0x11, 0x08, 0xe2, 0x83, 0xe4, 0xa1, 0x73, 0x80, 0x34, 0x9b, 0x47, 0xca, 0x14, 0xfe, 0xb8, - 0xf8, 0xd7, 0x2a, 0xc7, 0xa4, 0x44, 0x44, 0x3f, 0x80, 0xb1, 0xd8, 0x84, 0xdb, 0x65, 0x42, 0x19, - 0x46, 0x52, 0xea, 0x8d, 0xa2, 0x03, 0xad, 0x37, 0x05, 0xab, 0x1c, 0xf8, 0x7e, 0x72, 0xd4, 0xd3, - 0x90, 0x93, 0xf3, 0x35, 0x34, 0x54, 0x70, 0xb5, 0xf7, 0xcc, 0x86, 0xa2, 0xfc, 0x54, 0xbb, 0xd0, - 0x7a, 0x8f, 0x60, 0xbf, 0x4e, 0xb1, 0xa2, 0xeb, 0x37, 0x2f, 0xbb, 0xbe, 0x71, 0x91, 0xcf, 0x6d, - 0xfb, 0xbf, 0x42, 0x53, 0x0d, 0x14, 0x32, 0xa1, 0xf5, 0x38, 0xb9, 0x9b, 0xfc, 0xf1, 0x34, 0xb1, - 0x3f, 0x42, 0x06, 0xd4, 0xa7, 0x8f, 0x93, 0x99, 0xad, 0xa1, 0x0e, 0xb4, 0x67, 0xe3, 0xcb, 0xe9, - 0xec, 0x61, 0x74, 0x7d, 0x67, 0xd7, 0xd0, 0x31, 0x98, 0x57, 0xa3, 0xf1, 0xd8, 0xbf, 0xba, 0x1c, - 0x8d, 0x6f, 0xfe, 0xb2, 0xf5, 0xfe, 0x10, 0x9a, 0xca, 0xac, 0x78, 0x33, 0x73, 0x39, 0xbe, 0xca, - 0x8f, 0xda, 0x88, 0x57, 0xba, 0xc8, 0xb8, 0x32, 0x64, 0x10, 0xb9, 0xee, 0xff, 0xa7, 0xc1, 0x51, - 0x91, 0xd9, 0x53, 0xc8, 0x37, 0xf7, 0xc1, 0x0e, 0x4d, 0xc1, 0x9a, 0xe7, 0x9c, 0xfa, 0x51, 0xb0, - 0xdb, 0x89, 0x39, 0xd0, 0x64, 0xce, 0xdf, 0x55, 0xe6, 0x5c, 0xd4, 0x78, 0x57, 0x39, 0xa7, 0xf7, - 0x8a, 0x5f, 0x4c, 0xd5, 0xfc, 0x19, 0xe9, 0xfd, 0x02, 0xf6, 0x6b, 0x42, 0x39, 0x30, 0x43, 0x05, - 0xd6, 0x2d, 0x07, 0x66, 0x95, 0x93, 0xf9, 0x07, 0x9a, 0x23, 0xc6, 0x85, 0xb7, 0x01, 0xe8, 0x09, - 0xe7, 0x85, 0xa5, 0xcf, 0x5f, 0x5a, 0x52, 0x14, 0x8f, 0x70, 0xae, 0x2c, 0x08, 0x66, 0xef, 0x47, - 0x30, 0xf6, 0x40, 0x59, 0xb2, 0x51, 0x21, 0xd9, 0x28, 0x4b, 0x9e, 0x41, 0x4b, 0xf5, 0x4b, 0x91, - 0x0b, 0xf5, 0x28, 0xd8, 0xa5, 0x85, 0x68, 0xb7, 0x4a, 0x94, 0x48, 0xc6, 0xbc, 0xa9, 0x8e, 0xde, - 0x05, 0x00, 0x00, 0xff, 0xff, 0x75, 0x38, 0xad, 0x84, 0xe4, 0x05, 0x00, 0x00, +func init() { proto.RegisterFile("proto3_proto/proto3.proto", fileDescriptor_proto3_78ae00cd7e6e5e35) } + +var fileDescriptor_proto3_78ae00cd7e6e5e35 = []byte{ + // 896 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x54, 0x6f, 0x6f, 0xdb, 0xb6, + 0x13, 0xae, 0x2c, 0xff, 0x91, 0xcf, 0x76, 0xea, 0x1f, 0x7f, 0x6e, 0xc7, 0x7a, 0x1b, 0xa0, 0x79, + 0xc3, 0x20, 0x0c, 0xab, 0xb2, 0xb9, 0xc8, 0x90, 0xb5, 0xc5, 0x86, 0x24, 0x6b, 0x50, 0x23, 0xb1, + 0x67, 0xd0, 0xce, 0x82, 0xbd, 0x12, 0x68, 0x87, 0xb6, 0x85, 0x59, 0x94, 0x27, 0x52, 0x05, 0xf4, + 0x05, 0xf6, 0x41, 0xf6, 0x95, 0xf6, 0x85, 0x06, 0x92, 0x72, 0x2a, 0x17, 0xea, 0xf2, 0x4a, 0xbc, + 0x47, 0xcf, 0xdd, 0x73, 0xbc, 0x3b, 0x1e, 0x3c, 0xdb, 0x25, 0xb1, 0x8c, 0x5f, 0x04, 0xfa, 0x73, + 0x6c, 0x0c, 0x5f, 0x7f, 0x50, 0xbb, 0xf8, 0xab, 0xff, 0x6c, 0x1d, 0xc7, 0xeb, 0x2d, 0x33, 0x94, + 0x45, 0xba, 0x3a, 0xa6, 0x3c, 0x33, 0xc4, 0xfe, 0x13, 0xc9, 0x84, 0xcc, 0x23, 0xa8, 0xa3, 0x81, + 0x07, 0x7f, 0x35, 0xa1, 0x31, 0x66, 0x42, 0xd0, 0x35, 0x43, 0x08, 0xaa, 0x9c, 0x46, 0x0c, 0x5b, + 0xae, 0xe5, 0x35, 0x89, 0x3e, 0xa3, 0x53, 0x70, 0x36, 0xe1, 0x96, 0x26, 0xa1, 0xcc, 0x70, 0xc5, + 0xb5, 0xbc, 0xa3, 0xe1, 0x67, 0x7e, 0x51, 0xd2, 0xcf, 0x9d, 0xfd, 0xb7, 0x69, 0x14, 0xa7, 0x09, + 0xb9, 0x67, 0x23, 0x17, 0xda, 0x1b, 0x16, 0xae, 0x37, 0x32, 0x08, 0x79, 0xb0, 0x8c, 0xb0, 0xed, + 0x5a, 0x5e, 0x87, 0x80, 0xc1, 0x46, 0xfc, 0x22, 0x52, 0x7a, 0x77, 0x54, 0x52, 0x5c, 0x75, 0x2d, + 0xaf, 0x4d, 0xf4, 0x19, 0x7d, 0x01, 0xed, 0x84, 0x89, 0x74, 0x2b, 0x83, 0x65, 0x9c, 0x72, 0x89, + 0x1b, 0xae, 0xe5, 0xd9, 0xa4, 0x65, 0xb0, 0x0b, 0x05, 0xa1, 0x2f, 0xa1, 0x23, 0x93, 0x94, 0x05, + 0x62, 0x19, 0x4b, 0x11, 0x51, 0x8e, 0x1d, 0xd7, 0xf2, 0x1c, 0xd2, 0x56, 0xe0, 0x2c, 0xc7, 0x50, + 0x0f, 0x6a, 0x62, 0x19, 0x27, 0x0c, 0x37, 0x5d, 0xcb, 0xab, 0x10, 0x63, 0xa0, 0x2e, 0xd8, 0x7f, + 0xb0, 0x0c, 0xd7, 0x5c, 0xdb, 0xab, 0x12, 0x75, 0x44, 0x9f, 0x42, 0x53, 0x6c, 0xe2, 0x44, 0x06, + 0x0a, 0xff, 0xbf, 0x6b, 0x7b, 0x35, 0xe2, 0x68, 0xe0, 0x8a, 0x65, 0xe8, 0x5b, 0xa8, 0x73, 0x26, + 0x24, 0xbb, 0xc3, 0x75, 0xd7, 0xf2, 0x5a, 0xc3, 0xde, 0xe1, 0xd5, 0x27, 0xfa, 0x1f, 0xc9, 0x39, + 0xe8, 0x04, 0x1a, 0x49, 0xb0, 0x4a, 0x39, 0xcf, 0x70, 0xd7, 0xb5, 0x1f, 0xac, 0x54, 0x3d, 0xb9, + 0x54, 0x5c, 0xf4, 0x1a, 0x1a, 0x92, 0x25, 0x09, 0x0d, 0x39, 0x06, 0xd7, 0xf6, 0x5a, 0xc3, 0x41, + 0xb9, 0xdb, 0xdc, 0x90, 0xde, 0x70, 0x99, 0x64, 0x64, 0xef, 0x82, 0x5e, 0x82, 0x99, 0x80, 0x61, + 0xb0, 0x0a, 0xd9, 0xf6, 0x0e, 0xb7, 0x74, 0xa2, 0x9f, 0xf8, 0xef, 0xbb, 0xed, 0xcf, 0xd2, 0xc5, + 0x2f, 0x6c, 0x45, 0xd3, 0xad, 0x14, 0xa4, 0x65, 0xc8, 0x97, 0x8a, 0x8b, 0x46, 0xf7, 0xbe, 0xef, + 0xe8, 0x36, 0x65, 0xb8, 0xa3, 0xe5, 0xbf, 0x2e, 0x97, 0x9f, 0x6a, 0xe6, 0x6f, 0x8a, 0x68, 0x52, + 0xc8, 0x43, 0x69, 0x04, 0x7d, 0x07, 0x0e, 0xe5, 0x99, 0xdc, 0x84, 0x7c, 0x8d, 0x8f, 0xf2, 0x5a, + 0x99, 0x59, 0xf4, 0xf7, 0xb3, 0xe8, 0x9f, 0xf1, 0x8c, 0xdc, 0xb3, 0xd0, 0x09, 0xb4, 0x22, 0xca, + 0xb3, 0x40, 0x5b, 0x02, 0x3f, 0xd6, 0xda, 0xe5, 0x4e, 0xa0, 0x88, 0x73, 0xcd, 0x43, 0x27, 0x00, + 0x22, 0x5d, 0x44, 0x26, 0x29, 0xfc, 0x3f, 0x2d, 0xf5, 0xa4, 0x34, 0x63, 0x52, 0x20, 0xa2, 0xef, + 0xc1, 0x59, 0x6e, 0xc2, 0xed, 0x5d, 0xc2, 0x38, 0x46, 0x5a, 0xea, 0x23, 0x4e, 0xf7, 0x34, 0x74, + 0x01, 0x20, 0x64, 0x12, 0xf2, 0x75, 0x10, 0xd1, 0x1d, 0xee, 0x69, 0xa7, 0xaf, 0xca, 0x6b, 0x33, + 0xd3, 0xbc, 0x31, 0xdd, 0x99, 0xca, 0x34, 0xc5, 0xde, 0xee, 0x4f, 0xa1, 0x5d, 0xec, 0xdb, 0x7e, + 0x00, 0xcd, 0x0b, 0xd3, 0x03, 0xf8, 0x0d, 0xd4, 0x4c, 0xf5, 0x2b, 0xff, 0x31, 0x62, 0x86, 0xf2, + 0xb2, 0x72, 0x6a, 0xf5, 0x6f, 0xa1, 0xfb, 0x61, 0x2b, 0x4a, 0xa2, 0x3e, 0x3f, 0x8c, 0xfa, 0xd1, + 0x79, 0x28, 0x04, 0x7e, 0x0d, 0x47, 0x87, 0xf7, 0x28, 0x09, 0xdb, 0x2b, 0x86, 0x6d, 0x16, 0xbc, + 0x07, 0x3f, 0x43, 0xdd, 0xcc, 0x35, 0x6a, 0x41, 0xe3, 0x66, 0x72, 0x35, 0xf9, 0xf5, 0x76, 0xd2, + 0x7d, 0x84, 0x1c, 0xa8, 0x4e, 0x6f, 0x26, 0xb3, 0xae, 0x85, 0x3a, 0xd0, 0x9c, 0x5d, 0x9f, 0x4d, + 0x67, 0xf3, 0xd1, 0xc5, 0x55, 0xb7, 0x82, 0x1e, 0x43, 0xeb, 0x7c, 0x74, 0x7d, 0x1d, 0x9c, 0x9f, + 0x8d, 0xae, 0xdf, 0xfc, 0xde, 0xb5, 0x07, 0x43, 0xa8, 0x9b, 0xcb, 0x2a, 0x91, 0x85, 0x7e, 0x45, + 0x46, 0xd8, 0x18, 0x6a, 0x59, 0x2c, 0x53, 0x69, 0x94, 0x1d, 0xa2, 0xcf, 0x83, 0xbf, 0x2d, 0x38, + 0xca, 0x7b, 0x70, 0x1b, 0xca, 0xcd, 0x98, 0xee, 0xd0, 0x14, 0xda, 0x8b, 0x4c, 0x32, 0xd5, 0xb3, + 0x9d, 0x1a, 0x46, 0x4b, 0xf7, 0xed, 0x79, 0x69, 0xdf, 0x72, 0x1f, 0xff, 0x3c, 0x93, 0x6c, 0x6c, + 0xf8, 0xf9, 0x68, 0x2f, 0xde, 0x23, 0xfd, 0x9f, 0xa0, 0xfb, 0x21, 0xa1, 0x58, 0x19, 0xa7, 0xa4, + 0x32, 0xed, 0x62, 0x65, 0xfe, 0x84, 0xfa, 0x88, 0x4b, 0x95, 0xdb, 0x31, 0xd8, 0x89, 0x94, 0x79, + 0x4a, 0x9f, 0x1f, 0xa6, 0x64, 0x28, 0x3e, 0x91, 0xd2, 0xa4, 0xa0, 0x98, 0xfd, 0x1f, 0xc0, 0xd9, + 0x03, 0x45, 0xc9, 0x5a, 0x89, 0x64, 0xad, 0x28, 0xf9, 0x02, 0x1a, 0x26, 0x9e, 0x40, 0x1e, 0x54, + 0x23, 0xba, 0x13, 0xb9, 0x68, 0xaf, 0x4c, 0x94, 0x68, 0xc6, 0xe0, 0x9f, 0x0a, 0x38, 0x73, 0x26, + 0xe4, 0xcd, 0xfc, 0xf2, 0x14, 0x3d, 0x85, 0xba, 0x58, 0xd2, 0x2d, 0x4d, 0xf2, 0x26, 0xe4, 0x96, + 0xc2, 0xdf, 0xb1, 0xa5, 0x8c, 0x13, 0x5c, 0x71, 0x6d, 0x85, 0x1b, 0x0b, 0x3d, 0x85, 0x9a, 0xd9, + 0x3f, 0x6a, 0xcb, 0x37, 0xdf, 0x3e, 0x22, 0xc6, 0x44, 0xaf, 0xa0, 0x11, 0xd1, 0x9d, 0x5e, 0xae, + 0xd5, 0xb2, 0xe5, 0xb6, 0x17, 0xf4, 0xc7, 0x74, 0x77, 0xc5, 0x32, 0x73, 0xf7, 0x7a, 0xa4, 0x0d, + 0x74, 0x06, 0x4d, 0xe5, 0x6c, 0x2e, 0x59, 0x2b, 0x7b, 0x80, 0x45, 0xf7, 0xc2, 0x6a, 0x72, 0xa2, + 0xdc, 0xec, 0xff, 0x08, 0xad, 0x42, 0xe4, 0x87, 0x26, 0xda, 0x2e, 0xbe, 0x87, 0x57, 0xd0, 0x39, + 0x88, 0x5a, 0x74, 0xb6, 0x1f, 0x78, 0x0e, 0xe7, 0x0d, 0xa8, 0xc5, 0x9c, 0xc5, 0xab, 0x45, 0xdd, + 0xe4, 0xfb, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xeb, 0x74, 0x17, 0x7f, 0xc3, 0x07, 0x00, 0x00, } diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/proto/proto3_proto/proto3.proto b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/proto/proto3_proto/proto3.proto index 2048655..6adea22 100644 --- a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/proto/proto3_proto/proto3.proto +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/proto/proto3_proto/proto3.proto @@ -32,7 +32,7 @@ syntax = "proto3"; import "google/protobuf/any.proto"; -import "testdata/test.proto"; +import "test_proto/test.proto"; package proto3_proto; @@ -58,14 +58,16 @@ message Message { repeated Humour r_funny = 16; map terrain = 10; - testdata.SubDefaults proto2_field = 11; - map proto2_value = 13; + test_proto.SubDefaults proto2_field = 11; + map proto2_value = 13; google.protobuf.Any anything = 14; repeated google.protobuf.Any many_things = 15; Message submessage = 17; repeated Message children = 18; + + map string_map = 20; } message Nested { @@ -85,3 +87,11 @@ message IntMap { message IntMaps { repeated IntMap maps = 1; } + +message TestUTF8 { + string scalar = 1; + repeated string vector = 2; + oneof oneof { string field = 3; } + map map_key = 4; + map map_value = 5; +} diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/proto/table_marshal.go b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/proto/table_marshal.go new file mode 100644 index 0000000..9eda845 --- /dev/null +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/proto/table_marshal.go @@ -0,0 +1,2736 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "errors" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "unicode/utf8" +) + +// a sizer takes a pointer to a field and the size of its tag, computes the size of +// the encoded data. +type sizer func(pointer, int) int + +// a marshaler takes a byte slice, a pointer to a field, and its tag (in wire format), +// marshals the field to the end of the slice, returns the slice and error (if any). +type marshaler func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) + +// marshalInfo is the information used for marshaling a message. +type marshalInfo struct { + typ reflect.Type + fields []*marshalFieldInfo + unrecognized field // offset of XXX_unrecognized + extensions field // offset of XXX_InternalExtensions + v1extensions field // offset of XXX_extensions + sizecache field // offset of XXX_sizecache + initialized int32 // 0 -- only typ is set, 1 -- fully initialized + messageset bool // uses message set wire format + hasmarshaler bool // has custom marshaler + sync.RWMutex // protect extElems map, also for initialization + extElems map[int32]*marshalElemInfo // info of extension elements +} + +// marshalFieldInfo is the information used for marshaling a field of a message. +type marshalFieldInfo struct { + field field + wiretag uint64 // tag in wire format + tagsize int // size of tag in wire format + sizer sizer + marshaler marshaler + isPointer bool + required bool // field is required + name string // name of the field, for error reporting + oneofElems map[reflect.Type]*marshalElemInfo // info of oneof elements +} + +// marshalElemInfo is the information used for marshaling an extension or oneof element. +type marshalElemInfo struct { + wiretag uint64 // tag in wire format + tagsize int // size of tag in wire format + sizer sizer + marshaler marshaler + isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only) +} + +var ( + marshalInfoMap = map[reflect.Type]*marshalInfo{} + marshalInfoLock sync.Mutex +) + +// getMarshalInfo returns the information to marshal a given type of message. +// The info it returns may not necessarily initialized. +// t is the type of the message (NOT the pointer to it). +func getMarshalInfo(t reflect.Type) *marshalInfo { + marshalInfoLock.Lock() + u, ok := marshalInfoMap[t] + if !ok { + u = &marshalInfo{typ: t} + marshalInfoMap[t] = u + } + marshalInfoLock.Unlock() + return u +} + +// Size is the entry point from generated code, +// and should be ONLY called by generated code. +// It computes the size of encoded data of msg. +// a is a pointer to a place to store cached marshal info. +func (a *InternalMessageInfo) Size(msg Message) int { + u := getMessageMarshalInfo(msg, a) + ptr := toPointer(&msg) + if ptr.isNil() { + // We get here if msg is a typed nil ((*SomeMessage)(nil)), + // so it satisfies the interface, and msg == nil wouldn't + // catch it. We don't want crash in this case. + return 0 + } + return u.size(ptr) +} + +// Marshal is the entry point from generated code, +// and should be ONLY called by generated code. +// It marshals msg to the end of b. +// a is a pointer to a place to store cached marshal info. +func (a *InternalMessageInfo) Marshal(b []byte, msg Message, deterministic bool) ([]byte, error) { + u := getMessageMarshalInfo(msg, a) + ptr := toPointer(&msg) + if ptr.isNil() { + // We get here if msg is a typed nil ((*SomeMessage)(nil)), + // so it satisfies the interface, and msg == nil wouldn't + // catch it. We don't want crash in this case. + return b, ErrNil + } + return u.marshal(b, ptr, deterministic) +} + +func getMessageMarshalInfo(msg interface{}, a *InternalMessageInfo) *marshalInfo { + // u := a.marshal, but atomically. + // We use an atomic here to ensure memory consistency. + u := atomicLoadMarshalInfo(&a.marshal) + if u == nil { + // Get marshal information from type of message. + t := reflect.ValueOf(msg).Type() + if t.Kind() != reflect.Ptr { + panic(fmt.Sprintf("cannot handle non-pointer message type %v", t)) + } + u = getMarshalInfo(t.Elem()) + // Store it in the cache for later users. + // a.marshal = u, but atomically. + atomicStoreMarshalInfo(&a.marshal, u) + } + return u +} + +// size is the main function to compute the size of the encoded data of a message. +// ptr is the pointer to the message. +func (u *marshalInfo) size(ptr pointer) int { + if atomic.LoadInt32(&u.initialized) == 0 { + u.computeMarshalInfo() + } + + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if u.hasmarshaler { + m := ptr.asPointerTo(u.typ).Interface().(Marshaler) + b, _ := m.Marshal() + return len(b) + } + + n := 0 + for _, f := range u.fields { + if f.isPointer && ptr.offset(f.field).getPointer().isNil() { + // nil pointer always marshals to nothing + continue + } + n += f.sizer(ptr.offset(f.field), f.tagsize) + } + if u.extensions.IsValid() { + e := ptr.offset(u.extensions).toExtensions() + if u.messageset { + n += u.sizeMessageSet(e) + } else { + n += u.sizeExtensions(e) + } + } + if u.v1extensions.IsValid() { + m := *ptr.offset(u.v1extensions).toOldExtensions() + n += u.sizeV1Extensions(m) + } + if u.unrecognized.IsValid() { + s := *ptr.offset(u.unrecognized).toBytes() + n += len(s) + } + // cache the result for use in marshal + if u.sizecache.IsValid() { + atomic.StoreInt32(ptr.offset(u.sizecache).toInt32(), int32(n)) + } + return n +} + +// cachedsize gets the size from cache. If there is no cache (i.e. message is not generated), +// fall back to compute the size. +func (u *marshalInfo) cachedsize(ptr pointer) int { + if u.sizecache.IsValid() { + return int(atomic.LoadInt32(ptr.offset(u.sizecache).toInt32())) + } + return u.size(ptr) +} + +// marshal is the main function to marshal a message. It takes a byte slice and appends +// the encoded data to the end of the slice, returns the slice and error (if any). +// ptr is the pointer to the message. +// If deterministic is true, map is marshaled in deterministic order. +func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte, error) { + if atomic.LoadInt32(&u.initialized) == 0 { + u.computeMarshalInfo() + } + + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if u.hasmarshaler { + m := ptr.asPointerTo(u.typ).Interface().(Marshaler) + b1, err := m.Marshal() + b = append(b, b1...) + return b, err + } + + var err, errreq error + // The old marshaler encodes extensions at beginning. + if u.extensions.IsValid() { + e := ptr.offset(u.extensions).toExtensions() + if u.messageset { + b, err = u.appendMessageSet(b, e, deterministic) + } else { + b, err = u.appendExtensions(b, e, deterministic) + } + if err != nil { + return b, err + } + } + if u.v1extensions.IsValid() { + m := *ptr.offset(u.v1extensions).toOldExtensions() + b, err = u.appendV1Extensions(b, m, deterministic) + if err != nil { + return b, err + } + } + for _, f := range u.fields { + if f.required && errreq == nil { + if ptr.offset(f.field).getPointer().isNil() { + // Required field is not set. + // We record the error but keep going, to give a complete marshaling. + errreq = &RequiredNotSetError{f.name} + continue + } + } + if f.isPointer && ptr.offset(f.field).getPointer().isNil() { + // nil pointer always marshals to nothing + continue + } + b, err = f.marshaler(b, ptr.offset(f.field), f.wiretag, deterministic) + if err != nil { + if err1, ok := err.(*RequiredNotSetError); ok { + // Required field in submessage is not set. + // We record the error but keep going, to give a complete marshaling. + if errreq == nil { + errreq = &RequiredNotSetError{f.name + "." + err1.field} + } + continue + } + if err == errRepeatedHasNil { + err = errors.New("proto: repeated field " + f.name + " has nil element") + } + if err == errInvalidUTF8 { + fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name + err = fmt.Errorf("proto: string field %q contains invalid UTF-8", fullName) + } + return b, err + } + } + if u.unrecognized.IsValid() { + s := *ptr.offset(u.unrecognized).toBytes() + b = append(b, s...) + } + return b, errreq +} + +// computeMarshalInfo initializes the marshal info. +func (u *marshalInfo) computeMarshalInfo() { + u.Lock() + defer u.Unlock() + if u.initialized != 0 { // non-atomic read is ok as it is protected by the lock + return + } + + t := u.typ + u.unrecognized = invalidField + u.extensions = invalidField + u.v1extensions = invalidField + u.sizecache = invalidField + + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if reflect.PtrTo(t).Implements(marshalerType) { + u.hasmarshaler = true + atomic.StoreInt32(&u.initialized, 1) + return + } + + // get oneof implementers + var oneofImplementers []interface{} + if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok { + _, _, _, oneofImplementers = m.XXX_OneofFuncs() + } + + n := t.NumField() + + // deal with XXX fields first + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if !strings.HasPrefix(f.Name, "XXX_") { + continue + } + switch f.Name { + case "XXX_sizecache": + u.sizecache = toField(&f) + case "XXX_unrecognized": + u.unrecognized = toField(&f) + case "XXX_InternalExtensions": + u.extensions = toField(&f) + u.messageset = f.Tag.Get("protobuf_messageset") == "1" + case "XXX_extensions": + u.v1extensions = toField(&f) + case "XXX_NoUnkeyedLiteral": + // nothing to do + default: + panic("unknown XXX field: " + f.Name) + } + n-- + } + + // normal fields + fields := make([]marshalFieldInfo, n) // batch allocation + u.fields = make([]*marshalFieldInfo, 0, n) + for i, j := 0, 0; i < t.NumField(); i++ { + f := t.Field(i) + + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + field := &fields[j] + j++ + field.name = f.Name + u.fields = append(u.fields, field) + if f.Tag.Get("protobuf_oneof") != "" { + field.computeOneofFieldInfo(&f, oneofImplementers) + continue + } + if f.Tag.Get("protobuf") == "" { + // field has no tag (not in generated message), ignore it + u.fields = u.fields[:len(u.fields)-1] + j-- + continue + } + field.computeMarshalFieldInfo(&f) + } + + // fields are marshaled in tag order on the wire. + sort.Sort(byTag(u.fields)) + + atomic.StoreInt32(&u.initialized, 1) +} + +// helper for sorting fields by tag +type byTag []*marshalFieldInfo + +func (a byTag) Len() int { return len(a) } +func (a byTag) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byTag) Less(i, j int) bool { return a[i].wiretag < a[j].wiretag } + +// getExtElemInfo returns the information to marshal an extension element. +// The info it returns is initialized. +func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo { + // get from cache first + u.RLock() + e, ok := u.extElems[desc.Field] + u.RUnlock() + if ok { + return e + } + + t := reflect.TypeOf(desc.ExtensionType) // pointer or slice to basic type or struct + tags := strings.Split(desc.Tag, ",") + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + sizer, marshaler := typeMarshaler(t, tags, false, false) + e = &marshalElemInfo{ + wiretag: uint64(tag)<<3 | wt, + tagsize: SizeVarint(uint64(tag) << 3), + sizer: sizer, + marshaler: marshaler, + isptr: t.Kind() == reflect.Ptr, + } + + // update cache + u.Lock() + if u.extElems == nil { + u.extElems = make(map[int32]*marshalElemInfo) + } + u.extElems[desc.Field] = e + u.Unlock() + return e +} + +// computeMarshalFieldInfo fills up the information to marshal a field. +func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) { + // parse protobuf tag of the field. + // tag has format of "bytes,49,opt,name=foo,def=hello!" + tags := strings.Split(f.Tag.Get("protobuf"), ",") + if tags[0] == "" { + return + } + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + if tags[2] == "req" { + fi.required = true + } + fi.setTag(f, tag, wt) + fi.setMarshaler(f, tags) +} + +func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) { + fi.field = toField(f) + fi.wiretag = 1<<31 - 1 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire. + fi.isPointer = true + fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f) + fi.oneofElems = make(map[reflect.Type]*marshalElemInfo) + + ityp := f.Type // interface type + for _, o := range oneofImplementers { + t := reflect.TypeOf(o) + if !t.Implements(ityp) { + continue + } + sf := t.Elem().Field(0) // oneof implementer is a struct with a single field + tags := strings.Split(sf.Tag.Get("protobuf"), ",") + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + sizer, marshaler := typeMarshaler(sf.Type, tags, false, true) // oneof should not omit any zero value + fi.oneofElems[t.Elem()] = &marshalElemInfo{ + wiretag: uint64(tag)<<3 | wt, + tagsize: SizeVarint(uint64(tag) << 3), + sizer: sizer, + marshaler: marshaler, + } + } +} + +type oneofMessage interface { + XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) +} + +// wiretype returns the wire encoding of the type. +func wiretype(encoding string) uint64 { + switch encoding { + case "fixed32": + return WireFixed32 + case "fixed64": + return WireFixed64 + case "varint", "zigzag32", "zigzag64": + return WireVarint + case "bytes": + return WireBytes + case "group": + return WireStartGroup + } + panic("unknown wire type " + encoding) +} + +// setTag fills up the tag (in wire format) and its size in the info of a field. +func (fi *marshalFieldInfo) setTag(f *reflect.StructField, tag int, wt uint64) { + fi.field = toField(f) + fi.wiretag = uint64(tag)<<3 | wt + fi.tagsize = SizeVarint(uint64(tag) << 3) +} + +// setMarshaler fills up the sizer and marshaler in the info of a field. +func (fi *marshalFieldInfo) setMarshaler(f *reflect.StructField, tags []string) { + switch f.Type.Kind() { + case reflect.Map: + // map field + fi.isPointer = true + fi.sizer, fi.marshaler = makeMapMarshaler(f) + return + case reflect.Ptr, reflect.Slice: + fi.isPointer = true + } + fi.sizer, fi.marshaler = typeMarshaler(f.Type, tags, true, false) +} + +// typeMarshaler returns the sizer and marshaler of a given field. +// t is the type of the field. +// tags is the generated "protobuf" tag of the field. +// If nozero is true, zero value is not marshaled to the wire. +// If oneof is true, it is a oneof field. +func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, marshaler) { + encoding := tags[0] + + pointer := false + slice := false + if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { + slice = true + t = t.Elem() + } + if t.Kind() == reflect.Ptr { + pointer = true + t = t.Elem() + } + + packed := false + proto3 := false + validateUTF8 := true + for i := 2; i < len(tags); i++ { + if tags[i] == "packed" { + packed = true + } + if tags[i] == "proto3" { + proto3 = true + } + } + validateUTF8 = validateUTF8 && proto3 + + switch t.Kind() { + case reflect.Bool: + if pointer { + return sizeBoolPtr, appendBoolPtr + } + if slice { + if packed { + return sizeBoolPackedSlice, appendBoolPackedSlice + } + return sizeBoolSlice, appendBoolSlice + } + if nozero { + return sizeBoolValueNoZero, appendBoolValueNoZero + } + return sizeBoolValue, appendBoolValue + case reflect.Uint32: + switch encoding { + case "fixed32": + if pointer { + return sizeFixed32Ptr, appendFixed32Ptr + } + if slice { + if packed { + return sizeFixed32PackedSlice, appendFixed32PackedSlice + } + return sizeFixed32Slice, appendFixed32Slice + } + if nozero { + return sizeFixed32ValueNoZero, appendFixed32ValueNoZero + } + return sizeFixed32Value, appendFixed32Value + case "varint": + if pointer { + return sizeVarint32Ptr, appendVarint32Ptr + } + if slice { + if packed { + return sizeVarint32PackedSlice, appendVarint32PackedSlice + } + return sizeVarint32Slice, appendVarint32Slice + } + if nozero { + return sizeVarint32ValueNoZero, appendVarint32ValueNoZero + } + return sizeVarint32Value, appendVarint32Value + } + case reflect.Int32: + switch encoding { + case "fixed32": + if pointer { + return sizeFixedS32Ptr, appendFixedS32Ptr + } + if slice { + if packed { + return sizeFixedS32PackedSlice, appendFixedS32PackedSlice + } + return sizeFixedS32Slice, appendFixedS32Slice + } + if nozero { + return sizeFixedS32ValueNoZero, appendFixedS32ValueNoZero + } + return sizeFixedS32Value, appendFixedS32Value + case "varint": + if pointer { + return sizeVarintS32Ptr, appendVarintS32Ptr + } + if slice { + if packed { + return sizeVarintS32PackedSlice, appendVarintS32PackedSlice + } + return sizeVarintS32Slice, appendVarintS32Slice + } + if nozero { + return sizeVarintS32ValueNoZero, appendVarintS32ValueNoZero + } + return sizeVarintS32Value, appendVarintS32Value + case "zigzag32": + if pointer { + return sizeZigzag32Ptr, appendZigzag32Ptr + } + if slice { + if packed { + return sizeZigzag32PackedSlice, appendZigzag32PackedSlice + } + return sizeZigzag32Slice, appendZigzag32Slice + } + if nozero { + return sizeZigzag32ValueNoZero, appendZigzag32ValueNoZero + } + return sizeZigzag32Value, appendZigzag32Value + } + case reflect.Uint64: + switch encoding { + case "fixed64": + if pointer { + return sizeFixed64Ptr, appendFixed64Ptr + } + if slice { + if packed { + return sizeFixed64PackedSlice, appendFixed64PackedSlice + } + return sizeFixed64Slice, appendFixed64Slice + } + if nozero { + return sizeFixed64ValueNoZero, appendFixed64ValueNoZero + } + return sizeFixed64Value, appendFixed64Value + case "varint": + if pointer { + return sizeVarint64Ptr, appendVarint64Ptr + } + if slice { + if packed { + return sizeVarint64PackedSlice, appendVarint64PackedSlice + } + return sizeVarint64Slice, appendVarint64Slice + } + if nozero { + return sizeVarint64ValueNoZero, appendVarint64ValueNoZero + } + return sizeVarint64Value, appendVarint64Value + } + case reflect.Int64: + switch encoding { + case "fixed64": + if pointer { + return sizeFixedS64Ptr, appendFixedS64Ptr + } + if slice { + if packed { + return sizeFixedS64PackedSlice, appendFixedS64PackedSlice + } + return sizeFixedS64Slice, appendFixedS64Slice + } + if nozero { + return sizeFixedS64ValueNoZero, appendFixedS64ValueNoZero + } + return sizeFixedS64Value, appendFixedS64Value + case "varint": + if pointer { + return sizeVarintS64Ptr, appendVarintS64Ptr + } + if slice { + if packed { + return sizeVarintS64PackedSlice, appendVarintS64PackedSlice + } + return sizeVarintS64Slice, appendVarintS64Slice + } + if nozero { + return sizeVarintS64ValueNoZero, appendVarintS64ValueNoZero + } + return sizeVarintS64Value, appendVarintS64Value + case "zigzag64": + if pointer { + return sizeZigzag64Ptr, appendZigzag64Ptr + } + if slice { + if packed { + return sizeZigzag64PackedSlice, appendZigzag64PackedSlice + } + return sizeZigzag64Slice, appendZigzag64Slice + } + if nozero { + return sizeZigzag64ValueNoZero, appendZigzag64ValueNoZero + } + return sizeZigzag64Value, appendZigzag64Value + } + case reflect.Float32: + if pointer { + return sizeFloat32Ptr, appendFloat32Ptr + } + if slice { + if packed { + return sizeFloat32PackedSlice, appendFloat32PackedSlice + } + return sizeFloat32Slice, appendFloat32Slice + } + if nozero { + return sizeFloat32ValueNoZero, appendFloat32ValueNoZero + } + return sizeFloat32Value, appendFloat32Value + case reflect.Float64: + if pointer { + return sizeFloat64Ptr, appendFloat64Ptr + } + if slice { + if packed { + return sizeFloat64PackedSlice, appendFloat64PackedSlice + } + return sizeFloat64Slice, appendFloat64Slice + } + if nozero { + return sizeFloat64ValueNoZero, appendFloat64ValueNoZero + } + return sizeFloat64Value, appendFloat64Value + case reflect.String: + if validateUTF8 { + if pointer { + return sizeStringPtr, appendUTF8StringPtr + } + if slice { + return sizeStringSlice, appendUTF8StringSlice + } + if nozero { + return sizeStringValueNoZero, appendUTF8StringValueNoZero + } + return sizeStringValue, appendUTF8StringValue + } + if pointer { + return sizeStringPtr, appendStringPtr + } + if slice { + return sizeStringSlice, appendStringSlice + } + if nozero { + return sizeStringValueNoZero, appendStringValueNoZero + } + return sizeStringValue, appendStringValue + case reflect.Slice: + if slice { + return sizeBytesSlice, appendBytesSlice + } + if oneof { + // Oneof bytes field may also have "proto3" tag. + // We want to marshal it as a oneof field. Do this + // check before the proto3 check. + return sizeBytesOneof, appendBytesOneof + } + if proto3 { + return sizeBytes3, appendBytes3 + } + return sizeBytes, appendBytes + case reflect.Struct: + switch encoding { + case "group": + if slice { + return makeGroupSliceMarshaler(getMarshalInfo(t)) + } + return makeGroupMarshaler(getMarshalInfo(t)) + case "bytes": + if slice { + return makeMessageSliceMarshaler(getMarshalInfo(t)) + } + return makeMessageMarshaler(getMarshalInfo(t)) + } + } + panic(fmt.Sprintf("unknown or mismatched type: type: %v, wire type: %v", t, encoding)) +} + +// Below are functions to size/marshal a specific type of a field. +// They are stored in the field's info, and called by function pointers. +// They have type sizer or marshaler. + +func sizeFixed32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFixed32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFixed32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFixed32Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + return (4 + tagsize) * len(s) +} +func sizeFixed32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFixedS32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFixedS32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFixedS32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFixedS32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + return (4 + tagsize) * len(s) +} +func sizeFixedS32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFloat32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFloat32ValueNoZero(ptr pointer, tagsize int) int { + v := math.Float32bits(*ptr.toFloat32()) + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFloat32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toFloat32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFloat32Slice(ptr pointer, tagsize int) int { + s := *ptr.toFloat32Slice() + return (4 + tagsize) * len(s) +} +func sizeFloat32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toFloat32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFixed64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFixed64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFixed64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFixed64Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + return (8 + tagsize) * len(s) +} +func sizeFixed64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeFixedS64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFixedS64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFixedS64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFixedS64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + return (8 + tagsize) * len(s) +} +func sizeFixedS64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeFloat64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFloat64ValueNoZero(ptr pointer, tagsize int) int { + v := math.Float64bits(*ptr.toFloat64()) + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFloat64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toFloat64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFloat64Slice(ptr pointer, tagsize int) int { + s := *ptr.toFloat64Slice() + return (8 + tagsize) * len(s) +} +func sizeFloat64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toFloat64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeVarint32Value(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarint32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarint32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint32Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarint32Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarint32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarintS32Value(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarintS32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarintS32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarint64Value(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + return SizeVarint(v) + tagsize +} +func sizeVarint64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + if v == 0 { + return 0 + } + return SizeVarint(v) + tagsize +} +func sizeVarint64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint64Ptr() + if p == nil { + return 0 + } + return SizeVarint(*p) + tagsize +} +func sizeVarint64Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(v) + tagsize + } + return n +} +func sizeVarint64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(v) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarintS64Value(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarintS64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarintS64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeZigzag32Value(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + v := *p + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize + } + return n +} +func sizeZigzag32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeZigzag64Value(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + v := *p + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize + } + return n +} +func sizeZigzag64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeBoolValue(_ pointer, tagsize int) int { + return 1 + tagsize +} +func sizeBoolValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toBool() + if !v { + return 0 + } + return 1 + tagsize +} +func sizeBoolPtr(ptr pointer, tagsize int) int { + p := *ptr.toBoolPtr() + if p == nil { + return 0 + } + return 1 + tagsize +} +func sizeBoolSlice(ptr pointer, tagsize int) int { + s := *ptr.toBoolSlice() + return (1 + tagsize) * len(s) +} +func sizeBoolPackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toBoolSlice() + if len(s) == 0 { + return 0 + } + return len(s) + SizeVarint(uint64(len(s))) + tagsize +} +func sizeStringValue(ptr pointer, tagsize int) int { + v := *ptr.toString() + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toString() + if v == "" { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringPtr(ptr pointer, tagsize int) int { + p := *ptr.toStringPtr() + if p == nil { + return 0 + } + v := *p + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringSlice(ptr pointer, tagsize int) int { + s := *ptr.toStringSlice() + n := 0 + for _, v := range s { + n += len(v) + SizeVarint(uint64(len(v))) + tagsize + } + return n +} +func sizeBytes(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + if v == nil { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytes3(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + if len(v) == 0 { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytesOneof(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytesSlice(ptr pointer, tagsize int) int { + s := *ptr.toBytesSlice() + n := 0 + for _, v := range s { + n += len(v) + SizeVarint(uint64(len(v))) + tagsize + } + return n +} + +// appendFixed32 appends an encoded fixed32 to b. +func appendFixed32(b []byte, v uint32) []byte { + b = append(b, + byte(v), + byte(v>>8), + byte(v>>16), + byte(v>>24)) + return b +} + +// appendFixed64 appends an encoded fixed64 to b. +func appendFixed64(b []byte, v uint64) []byte { + b = append(b, + byte(v), + byte(v>>8), + byte(v>>16), + byte(v>>24), + byte(v>>32), + byte(v>>40), + byte(v>>48), + byte(v>>56)) + return b +} + +// appendVarint appends an encoded varint to b. +func appendVarint(b []byte, v uint64) []byte { + // TODO: make 1-byte (maybe 2-byte) case inline-able, once we + // have non-leaf inliner. + switch { + case v < 1<<7: + b = append(b, byte(v)) + case v < 1<<14: + b = append(b, + byte(v&0x7f|0x80), + byte(v>>7)) + case v < 1<<21: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte(v>>14)) + case v < 1<<28: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte(v>>21)) + case v < 1<<35: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte(v>>28)) + case v < 1<<42: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte(v>>35)) + case v < 1<<49: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte(v>>42)) + case v < 1<<56: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte(v>>49)) + case v < 1<<63: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte((v>>49)&0x7f|0x80), + byte(v>>56)) + default: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte((v>>49)&0x7f|0x80), + byte((v>>56)&0x7f|0x80), + 1) + } + return b +} + +func appendFixed32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFixed32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFixed32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, *p) + return b, nil +} +func appendFixed32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + } + return b, nil +} +func appendFixed32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, v) + } + return b, nil +} +func appendFixedS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + return b, nil +} +func appendFixedS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + return b, nil +} +func appendFixedS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(*p)) + return b, nil +} +func appendFixedS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + } + return b, nil +} +func appendFixedS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, uint32(v)) + } + return b, nil +} +func appendFloat32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float32bits(*ptr.toFloat32()) + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFloat32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float32bits(*ptr.toFloat32()) + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFloat32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toFloat32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, math.Float32bits(*p)) + return b, nil +} +func appendFloat32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, math.Float32bits(v)) + } + return b, nil +} +func appendFloat32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, math.Float32bits(v)) + } + return b, nil +} +func appendFixed64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFixed64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFixed64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, *p) + return b, nil +} +func appendFixed64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + } + return b, nil +} +func appendFixed64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, v) + } + return b, nil +} +func appendFixedS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + return b, nil +} +func appendFixedS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + return b, nil +} +func appendFixedS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(*p)) + return b, nil +} +func appendFixedS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + } + return b, nil +} +func appendFixedS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, uint64(v)) + } + return b, nil +} +func appendFloat64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float64bits(*ptr.toFloat64()) + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFloat64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float64bits(*ptr.toFloat64()) + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFloat64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toFloat64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, math.Float64bits(*p)) + return b, nil +} +func appendFloat64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, math.Float64bits(v)) + } + return b, nil +} +func appendFloat64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, math.Float64bits(v)) + } + return b, nil +} +func appendVarint32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarint32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarint32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarint32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarint32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarintS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarint64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + return b, nil +} +func appendVarint64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + return b, nil +} +func appendVarint64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, *p) + return b, nil +} +func appendVarint64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + } + return b, nil +} +func appendVarint64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(v) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, v) + } + return b, nil +} +func appendVarintS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarintS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendZigzag32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + v := *p + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + } + return b, nil +} +func appendZigzag32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + } + return b, nil +} +func appendZigzag64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + v := *p + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + } + return b, nil +} +func appendZigzag64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + } + return b, nil +} +func appendBoolValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBool() + b = appendVarint(b, wiretag) + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + return b, nil +} +func appendBoolValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBool() + if !v { + return b, nil + } + b = appendVarint(b, wiretag) + b = append(b, 1) + return b, nil +} + +func appendBoolPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toBoolPtr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + if *p { + b = append(b, 1) + } else { + b = append(b, 0) + } + return b, nil +} +func appendBoolSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBoolSlice() + for _, v := range s { + b = appendVarint(b, wiretag) + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + } + return b, nil +} +func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBoolSlice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(len(s))) + for _, v := range s { + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + } + return b, nil +} +func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toString() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toString() + if v == "" { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toStringPtr() + if p == nil { + return b, nil + } + v := *p + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toStringSlice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + } + return b, nil +} +func appendUTF8StringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toString() + if !utf8.ValidString(v) { + return nil, errInvalidUTF8 + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendUTF8StringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toString() + if v == "" { + return b, nil + } + if !utf8.ValidString(v) { + return nil, errInvalidUTF8 + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendUTF8StringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toStringPtr() + if p == nil { + return b, nil + } + v := *p + if !utf8.ValidString(v) { + return nil, errInvalidUTF8 + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendUTF8StringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toStringSlice() + for _, v := range s { + if !utf8.ValidString(v) { + return nil, errInvalidUTF8 + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + } + return b, nil +} +func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + if v == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytes3(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + if len(v) == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytesOneof(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytesSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBytesSlice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + } + return b, nil +} + +// makeGroupMarshaler returns the sizer and marshaler for a group. +// u is the marshal info of the underlying message. +func makeGroupMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + p := ptr.getPointer() + if p.isNil() { + return 0 + } + return u.size(p) + 2*tagsize + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + p := ptr.getPointer() + if p.isNil() { + return b, nil + } + var err error + b = appendVarint(b, wiretag) // start group + b, err = u.marshal(b, p, deterministic) + b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group + return b, err + } +} + +// makeGroupSliceMarshaler returns the sizer and marshaler for a group slice. +// u is the marshal info of the underlying message. +func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getPointerSlice() + n := 0 + for _, v := range s { + if v.isNil() { + continue + } + n += u.size(v) + 2*tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getPointerSlice() + var err, errreq error + for _, v := range s { + if v.isNil() { + return b, errRepeatedHasNil + } + b = appendVarint(b, wiretag) // start group + b, err = u.marshal(b, v, deterministic) + b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group + if err != nil { + if _, ok := err.(*RequiredNotSetError); ok { + // Required field in submessage is not set. + // We record the error but keep going, to give a complete marshaling. + if errreq == nil { + errreq = err + } + continue + } + if err == ErrNil { + err = errRepeatedHasNil + } + return b, err + } + } + return b, errreq + } +} + +// makeMessageMarshaler returns the sizer and marshaler for a message field. +// u is the marshal info of the message. +func makeMessageMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + p := ptr.getPointer() + if p.isNil() { + return 0 + } + siz := u.size(p) + return siz + SizeVarint(uint64(siz)) + tagsize + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + p := ptr.getPointer() + if p.isNil() { + return b, nil + } + b = appendVarint(b, wiretag) + siz := u.cachedsize(p) + b = appendVarint(b, uint64(siz)) + return u.marshal(b, p, deterministic) + } +} + +// makeMessageSliceMarshaler returns the sizer and marshaler for a message slice. +// u is the marshal info of the message. +func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getPointerSlice() + n := 0 + for _, v := range s { + if v.isNil() { + continue + } + siz := u.size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getPointerSlice() + var err, errreq error + for _, v := range s { + if v.isNil() { + return b, errRepeatedHasNil + } + b = appendVarint(b, wiretag) + siz := u.cachedsize(v) + b = appendVarint(b, uint64(siz)) + b, err = u.marshal(b, v, deterministic) + + if err != nil { + if _, ok := err.(*RequiredNotSetError); ok { + // Required field in submessage is not set. + // We record the error but keep going, to give a complete marshaling. + if errreq == nil { + errreq = err + } + continue + } + if err == ErrNil { + err = errRepeatedHasNil + } + return b, err + } + } + return b, errreq + } +} + +// makeMapMarshaler returns the sizer and marshaler for a map field. +// f is the pointer to the reflect data structure of the field. +func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) { + // figure out key and value type + t := f.Type + keyType := t.Key() + valType := t.Elem() + keyTags := strings.Split(f.Tag.Get("protobuf_key"), ",") + valTags := strings.Split(f.Tag.Get("protobuf_val"), ",") + keySizer, keyMarshaler := typeMarshaler(keyType, keyTags, false, false) // don't omit zero value in map + valSizer, valMarshaler := typeMarshaler(valType, valTags, false, false) // don't omit zero value in map + keyWireTag := 1<<3 | wiretype(keyTags[0]) + valWireTag := 2<<3 | wiretype(valTags[0]) + + // We create an interface to get the addresses of the map key and value. + // If value is pointer-typed, the interface is a direct interface, the + // idata itself is the value. Otherwise, the idata is the pointer to the + // value. + // Key cannot be pointer-typed. + valIsPtr := valType.Kind() == reflect.Ptr + return func(ptr pointer, tagsize int) int { + m := ptr.asPointerTo(t).Elem() // the map + n := 0 + for _, k := range m.MapKeys() { + ki := k.Interface() + vi := m.MapIndex(k).Interface() + kaddr := toAddrPointer(&ki, false) // pointer to key + vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value + siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, tag uint64, deterministic bool) ([]byte, error) { + m := ptr.asPointerTo(t).Elem() // the map + var err error + keys := m.MapKeys() + if len(keys) > 1 && deterministic { + sort.Sort(mapKeys(keys)) + } + for _, k := range keys { + ki := k.Interface() + vi := m.MapIndex(k).Interface() + kaddr := toAddrPointer(&ki, false) // pointer to key + vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value + b = appendVarint(b, tag) + siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) + b = appendVarint(b, uint64(siz)) + b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic) + if err != nil { + return b, err + } + b, err = valMarshaler(b, vaddr, valWireTag, deterministic) + if err != nil && err != ErrNil { // allow nil value in map + return b, err + } + } + return b, nil + } +} + +// makeOneOfMarshaler returns the sizer and marshaler for a oneof field. +// fi is the marshal info of the field. +// f is the pointer to the reflect data structure of the field. +func makeOneOfMarshaler(fi *marshalFieldInfo, f *reflect.StructField) (sizer, marshaler) { + // Oneof field is an interface. We need to get the actual data type on the fly. + t := f.Type + return func(ptr pointer, _ int) int { + p := ptr.getInterfacePointer() + if p.isNil() { + return 0 + } + v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct + telem := v.Type() + e := fi.oneofElems[telem] + return e.sizer(p, e.tagsize) + }, + func(b []byte, ptr pointer, _ uint64, deterministic bool) ([]byte, error) { + p := ptr.getInterfacePointer() + if p.isNil() { + return b, nil + } + v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct + telem := v.Type() + if telem.Field(0).Type.Kind() == reflect.Ptr && p.getPointer().isNil() { + return b, errOneofHasNil + } + e := fi.oneofElems[telem] + return e.marshaler(b, p, e.wiretag, deterministic) + } +} + +// sizeExtensions computes the size of encoded data for a XXX_InternalExtensions field. +func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int { + m, mu := ext.extensionsRead() + if m == nil { + return 0 + } + mu.Lock() + + n := 0 + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + n += len(e.enc) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + n += ei.sizer(p, ei.tagsize) + } + mu.Unlock() + return n +} + +// appendExtensions marshals a XXX_InternalExtensions field to the end of byte slice b. +func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { + m, mu := ext.extensionsRead() + if m == nil { + return b, nil + } + mu.Lock() + defer mu.Unlock() + + var err error + + // Fast-path for common cases: zero or one extensions. + // Don't bother sorting the keys. + if len(m) <= 1 { + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if err != nil { + return b, err + } + } + return b, nil + } + + // Sort the keys to provide a deterministic encoding. + // Not sure this is required, but the old code does it. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + for _, k := range keys { + e := m[int32(k)] + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if err != nil { + return b, err + } + } + return b, nil +} + +// message set format is: +// message MessageSet { +// repeated group Item = 1 { +// required int32 type_id = 2; +// required string message = 3; +// }; +// } + +// sizeMessageSet computes the size of encoded data for a XXX_InternalExtensions field +// in message set format (above). +func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int { + m, mu := ext.extensionsRead() + if m == nil { + return 0 + } + mu.Lock() + + n := 0 + for id, e := range m { + n += 2 // start group, end group. tag = 1 (size=1) + n += SizeVarint(uint64(id)) + 1 // type_id, tag = 2 (size=1) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + siz := len(msgWithLen) + n += siz + 1 // message, tag = 3 (size=1) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + n += ei.sizer(p, 1) // message, tag = 3 (size=1) + } + mu.Unlock() + return n +} + +// appendMessageSet marshals a XXX_InternalExtensions field in message set format (above) +// to the end of byte slice b. +func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { + m, mu := ext.extensionsRead() + if m == nil { + return b, nil + } + mu.Lock() + defer mu.Unlock() + + var err error + + // Fast-path for common cases: zero or one extensions. + // Don't bother sorting the keys. + if len(m) <= 1 { + for id, e := range m { + b = append(b, 1<<3|WireStartGroup) + b = append(b, 2<<3|WireVarint) + b = appendVarint(b, uint64(id)) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + b = append(b, 3<<3|WireBytes) + b = append(b, msgWithLen...) + b = append(b, 1<<3|WireEndGroup) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) + if err != nil { + return b, err + } + b = append(b, 1<<3|WireEndGroup) + } + return b, nil + } + + // Sort the keys to provide a deterministic encoding. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + for _, id := range keys { + e := m[int32(id)] + b = append(b, 1<<3|WireStartGroup) + b = append(b, 2<<3|WireVarint) + b = appendVarint(b, uint64(id)) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + b = append(b, 3<<3|WireBytes) + b = append(b, msgWithLen...) + b = append(b, 1<<3|WireEndGroup) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) + b = append(b, 1<<3|WireEndGroup) + if err != nil { + return b, err + } + } + return b, nil +} + +// sizeV1Extensions computes the size of encoded data for a V1-API extension field. +func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int { + if m == nil { + return 0 + } + + n := 0 + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + n += len(e.enc) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + n += ei.sizer(p, ei.tagsize) + } + return n +} + +// appendV1Extensions marshals a V1-API extension field to the end of byte slice b. +func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, deterministic bool) ([]byte, error) { + if m == nil { + return b, nil + } + + // Sort the keys to provide a deterministic encoding. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + var err error + for _, k := range keys { + e := m[int32(k)] + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if err != nil { + return b, err + } + } + return b, nil +} + +// newMarshaler is the interface representing objects that can marshal themselves. +// +// This exists to support protoc-gen-go generated messages. +// The proto package will stop type-asserting to this interface in the future. +// +// DO NOT DEPEND ON THIS. +type newMarshaler interface { + XXX_Size() int + XXX_Marshal(b []byte, deterministic bool) ([]byte, error) +} + +// Size returns the encoded size of a protocol buffer message. +// This is the main entry point. +func Size(pb Message) int { + if m, ok := pb.(newMarshaler); ok { + return m.XXX_Size() + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + b, _ := m.Marshal() + return len(b) + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return 0 + } + var info InternalMessageInfo + return info.Size(pb) +} + +// Marshal takes a protocol buffer message +// and encodes it into the wire format, returning the data. +// This is the main entry point. +func Marshal(pb Message) ([]byte, error) { + if m, ok := pb.(newMarshaler); ok { + siz := m.XXX_Size() + b := make([]byte, 0, siz) + return m.XXX_Marshal(b, false) + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + return m.Marshal() + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return nil, ErrNil + } + var info InternalMessageInfo + siz := info.Size(pb) + b := make([]byte, 0, siz) + return info.Marshal(b, pb, false) +} + +// Marshal takes a protocol buffer message +// and encodes it into the wire format, writing the result to the +// Buffer. +// This is an alternative entry point. It is not necessary to use +// a Buffer for most applications. +func (p *Buffer) Marshal(pb Message) error { + var err error + if m, ok := pb.(newMarshaler); ok { + siz := m.XXX_Size() + p.grow(siz) // make sure buf has enough capacity + p.buf, err = m.XXX_Marshal(p.buf, p.deterministic) + return err + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + b, err := m.Marshal() + p.buf = append(p.buf, b...) + return err + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return ErrNil + } + var info InternalMessageInfo + siz := info.Size(pb) + p.grow(siz) // make sure buf has enough capacity + p.buf, err = info.Marshal(p.buf, pb, p.deterministic) + return err +} + +// grow grows the buffer's capacity, if necessary, to guarantee space for +// another n bytes. After grow(n), at least n bytes can be written to the +// buffer without another allocation. +func (p *Buffer) grow(n int) { + need := len(p.buf) + n + if need <= cap(p.buf) { + return + } + newCap := len(p.buf) * 2 + if newCap < need { + newCap = need + } + p.buf = append(make([]byte, 0, newCap), p.buf...) +} diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/proto/table_merge.go b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/proto/table_merge.go new file mode 100644 index 0000000..5525def --- /dev/null +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/proto/table_merge.go @@ -0,0 +1,654 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "reflect" + "strings" + "sync" + "sync/atomic" +) + +// Merge merges the src message into dst. +// This assumes that dst and src of the same type and are non-nil. +func (a *InternalMessageInfo) Merge(dst, src Message) { + mi := atomicLoadMergeInfo(&a.merge) + if mi == nil { + mi = getMergeInfo(reflect.TypeOf(dst).Elem()) + atomicStoreMergeInfo(&a.merge, mi) + } + mi.merge(toPointer(&dst), toPointer(&src)) +} + +type mergeInfo struct { + typ reflect.Type + + initialized int32 // 0: only typ is valid, 1: everything is valid + lock sync.Mutex + + fields []mergeFieldInfo + unrecognized field // Offset of XXX_unrecognized +} + +type mergeFieldInfo struct { + field field // Offset of field, guaranteed to be valid + + // isPointer reports whether the value in the field is a pointer. + // This is true for the following situations: + // * Pointer to struct + // * Pointer to basic type (proto2 only) + // * Slice (first value in slice header is a pointer) + // * String (first value in string header is a pointer) + isPointer bool + + // basicWidth reports the width of the field assuming that it is directly + // embedded in the struct (as is the case for basic types in proto3). + // The possible values are: + // 0: invalid + // 1: bool + // 4: int32, uint32, float32 + // 8: int64, uint64, float64 + basicWidth int + + // Where dst and src are pointers to the types being merged. + merge func(dst, src pointer) +} + +var ( + mergeInfoMap = map[reflect.Type]*mergeInfo{} + mergeInfoLock sync.Mutex +) + +func getMergeInfo(t reflect.Type) *mergeInfo { + mergeInfoLock.Lock() + defer mergeInfoLock.Unlock() + mi := mergeInfoMap[t] + if mi == nil { + mi = &mergeInfo{typ: t} + mergeInfoMap[t] = mi + } + return mi +} + +// merge merges src into dst assuming they are both of type *mi.typ. +func (mi *mergeInfo) merge(dst, src pointer) { + if dst.isNil() { + panic("proto: nil destination") + } + if src.isNil() { + return // Nothing to do. + } + + if atomic.LoadInt32(&mi.initialized) == 0 { + mi.computeMergeInfo() + } + + for _, fi := range mi.fields { + sfp := src.offset(fi.field) + + // As an optimization, we can avoid the merge function call cost + // if we know for sure that the source will have no effect + // by checking if it is the zero value. + if unsafeAllowed { + if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string + continue + } + if fi.basicWidth > 0 { + switch { + case fi.basicWidth == 1 && !*sfp.toBool(): + continue + case fi.basicWidth == 4 && *sfp.toUint32() == 0: + continue + case fi.basicWidth == 8 && *sfp.toUint64() == 0: + continue + } + } + } + + dfp := dst.offset(fi.field) + fi.merge(dfp, sfp) + } + + // TODO: Make this faster? + out := dst.asPointerTo(mi.typ).Elem() + in := src.asPointerTo(mi.typ).Elem() + if emIn, err := extendable(in.Addr().Interface()); err == nil { + emOut, _ := extendable(out.Addr().Interface()) + mIn, muIn := emIn.extensionsRead() + if mIn != nil { + mOut := emOut.extensionsWrite() + muIn.Lock() + mergeExtension(mOut, mIn) + muIn.Unlock() + } + } + + if mi.unrecognized.IsValid() { + if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 { + *dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...) + } + } +} + +func (mi *mergeInfo) computeMergeInfo() { + mi.lock.Lock() + defer mi.lock.Unlock() + if mi.initialized != 0 { + return + } + t := mi.typ + n := t.NumField() + + props := GetProperties(t) + for i := 0; i < n; i++ { + f := t.Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + + mfi := mergeFieldInfo{field: toField(&f)} + tf := f.Type + + // As an optimization, we can avoid the merge function call cost + // if we know for sure that the source will have no effect + // by checking if it is the zero value. + if unsafeAllowed { + switch tf.Kind() { + case reflect.Ptr, reflect.Slice, reflect.String: + // As a special case, we assume slices and strings are pointers + // since we know that the first field in the SliceSlice or + // StringHeader is a data pointer. + mfi.isPointer = true + case reflect.Bool: + mfi.basicWidth = 1 + case reflect.Int32, reflect.Uint32, reflect.Float32: + mfi.basicWidth = 4 + case reflect.Int64, reflect.Uint64, reflect.Float64: + mfi.basicWidth = 8 + } + } + + // Unwrap tf to get at its most basic type. + var isPointer, isSlice bool + if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { + isSlice = true + tf = tf.Elem() + } + if tf.Kind() == reflect.Ptr { + isPointer = true + tf = tf.Elem() + } + if isPointer && isSlice && tf.Kind() != reflect.Struct { + panic("both pointer and slice for basic type in " + tf.Name()) + } + + switch tf.Kind() { + case reflect.Int32: + switch { + case isSlice: // E.g., []int32 + mfi.merge = func(dst, src pointer) { + // NOTE: toInt32Slice is not defined (see pointer_reflect.go). + /* + sfsp := src.toInt32Slice() + if *sfsp != nil { + dfsp := dst.toInt32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []int64{} + } + } + */ + sfs := src.getInt32Slice() + if sfs != nil { + dfs := dst.getInt32Slice() + dfs = append(dfs, sfs...) + if dfs == nil { + dfs = []int32{} + } + dst.setInt32Slice(dfs) + } + } + case isPointer: // E.g., *int32 + mfi.merge = func(dst, src pointer) { + // NOTE: toInt32Ptr is not defined (see pointer_reflect.go). + /* + sfpp := src.toInt32Ptr() + if *sfpp != nil { + dfpp := dst.toInt32Ptr() + if *dfpp == nil { + *dfpp = Int32(**sfpp) + } else { + **dfpp = **sfpp + } + } + */ + sfp := src.getInt32Ptr() + if sfp != nil { + dfp := dst.getInt32Ptr() + if dfp == nil { + dst.setInt32Ptr(*sfp) + } else { + *dfp = *sfp + } + } + } + default: // E.g., int32 + mfi.merge = func(dst, src pointer) { + if v := *src.toInt32(); v != 0 { + *dst.toInt32() = v + } + } + } + case reflect.Int64: + switch { + case isSlice: // E.g., []int64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toInt64Slice() + if *sfsp != nil { + dfsp := dst.toInt64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []int64{} + } + } + } + case isPointer: // E.g., *int64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toInt64Ptr() + if *sfpp != nil { + dfpp := dst.toInt64Ptr() + if *dfpp == nil { + *dfpp = Int64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., int64 + mfi.merge = func(dst, src pointer) { + if v := *src.toInt64(); v != 0 { + *dst.toInt64() = v + } + } + } + case reflect.Uint32: + switch { + case isSlice: // E.g., []uint32 + mfi.merge = func(dst, src pointer) { + sfsp := src.toUint32Slice() + if *sfsp != nil { + dfsp := dst.toUint32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []uint32{} + } + } + } + case isPointer: // E.g., *uint32 + mfi.merge = func(dst, src pointer) { + sfpp := src.toUint32Ptr() + if *sfpp != nil { + dfpp := dst.toUint32Ptr() + if *dfpp == nil { + *dfpp = Uint32(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., uint32 + mfi.merge = func(dst, src pointer) { + if v := *src.toUint32(); v != 0 { + *dst.toUint32() = v + } + } + } + case reflect.Uint64: + switch { + case isSlice: // E.g., []uint64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toUint64Slice() + if *sfsp != nil { + dfsp := dst.toUint64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []uint64{} + } + } + } + case isPointer: // E.g., *uint64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toUint64Ptr() + if *sfpp != nil { + dfpp := dst.toUint64Ptr() + if *dfpp == nil { + *dfpp = Uint64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., uint64 + mfi.merge = func(dst, src pointer) { + if v := *src.toUint64(); v != 0 { + *dst.toUint64() = v + } + } + } + case reflect.Float32: + switch { + case isSlice: // E.g., []float32 + mfi.merge = func(dst, src pointer) { + sfsp := src.toFloat32Slice() + if *sfsp != nil { + dfsp := dst.toFloat32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []float32{} + } + } + } + case isPointer: // E.g., *float32 + mfi.merge = func(dst, src pointer) { + sfpp := src.toFloat32Ptr() + if *sfpp != nil { + dfpp := dst.toFloat32Ptr() + if *dfpp == nil { + *dfpp = Float32(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., float32 + mfi.merge = func(dst, src pointer) { + if v := *src.toFloat32(); v != 0 { + *dst.toFloat32() = v + } + } + } + case reflect.Float64: + switch { + case isSlice: // E.g., []float64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toFloat64Slice() + if *sfsp != nil { + dfsp := dst.toFloat64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []float64{} + } + } + } + case isPointer: // E.g., *float64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toFloat64Ptr() + if *sfpp != nil { + dfpp := dst.toFloat64Ptr() + if *dfpp == nil { + *dfpp = Float64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., float64 + mfi.merge = func(dst, src pointer) { + if v := *src.toFloat64(); v != 0 { + *dst.toFloat64() = v + } + } + } + case reflect.Bool: + switch { + case isSlice: // E.g., []bool + mfi.merge = func(dst, src pointer) { + sfsp := src.toBoolSlice() + if *sfsp != nil { + dfsp := dst.toBoolSlice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []bool{} + } + } + } + case isPointer: // E.g., *bool + mfi.merge = func(dst, src pointer) { + sfpp := src.toBoolPtr() + if *sfpp != nil { + dfpp := dst.toBoolPtr() + if *dfpp == nil { + *dfpp = Bool(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., bool + mfi.merge = func(dst, src pointer) { + if v := *src.toBool(); v { + *dst.toBool() = v + } + } + } + case reflect.String: + switch { + case isSlice: // E.g., []string + mfi.merge = func(dst, src pointer) { + sfsp := src.toStringSlice() + if *sfsp != nil { + dfsp := dst.toStringSlice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []string{} + } + } + } + case isPointer: // E.g., *string + mfi.merge = func(dst, src pointer) { + sfpp := src.toStringPtr() + if *sfpp != nil { + dfpp := dst.toStringPtr() + if *dfpp == nil { + *dfpp = String(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., string + mfi.merge = func(dst, src pointer) { + if v := *src.toString(); v != "" { + *dst.toString() = v + } + } + } + case reflect.Slice: + isProto3 := props.Prop[i].proto3 + switch { + case isPointer: + panic("bad pointer in byte slice case in " + tf.Name()) + case tf.Elem().Kind() != reflect.Uint8: + panic("bad element kind in byte slice case in " + tf.Name()) + case isSlice: // E.g., [][]byte + mfi.merge = func(dst, src pointer) { + sbsp := src.toBytesSlice() + if *sbsp != nil { + dbsp := dst.toBytesSlice() + for _, sb := range *sbsp { + if sb == nil { + *dbsp = append(*dbsp, nil) + } else { + *dbsp = append(*dbsp, append([]byte{}, sb...)) + } + } + if *dbsp == nil { + *dbsp = [][]byte{} + } + } + } + default: // E.g., []byte + mfi.merge = func(dst, src pointer) { + sbp := src.toBytes() + if *sbp != nil { + dbp := dst.toBytes() + if !isProto3 || len(*sbp) > 0 { + *dbp = append([]byte{}, *sbp...) + } + } + } + } + case reflect.Struct: + switch { + case !isPointer: + panic(fmt.Sprintf("message field %s without pointer", tf)) + case isSlice: // E.g., []*pb.T + mi := getMergeInfo(tf) + mfi.merge = func(dst, src pointer) { + sps := src.getPointerSlice() + if sps != nil { + dps := dst.getPointerSlice() + for _, sp := range sps { + var dp pointer + if !sp.isNil() { + dp = valToPointer(reflect.New(tf)) + mi.merge(dp, sp) + } + dps = append(dps, dp) + } + if dps == nil { + dps = []pointer{} + } + dst.setPointerSlice(dps) + } + } + default: // E.g., *pb.T + mi := getMergeInfo(tf) + mfi.merge = func(dst, src pointer) { + sp := src.getPointer() + if !sp.isNil() { + dp := dst.getPointer() + if dp.isNil() { + dp = valToPointer(reflect.New(tf)) + dst.setPointer(dp) + } + mi.merge(dp, sp) + } + } + } + case reflect.Map: + switch { + case isPointer || isSlice: + panic("bad pointer or slice in map case in " + tf.Name()) + default: // E.g., map[K]V + mfi.merge = func(dst, src pointer) { + sm := src.asPointerTo(tf).Elem() + if sm.Len() == 0 { + return + } + dm := dst.asPointerTo(tf).Elem() + if dm.IsNil() { + dm.Set(reflect.MakeMap(tf)) + } + + switch tf.Elem().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + val = reflect.ValueOf(Clone(val.Interface().(Message))) + dm.SetMapIndex(key, val) + } + case reflect.Slice: // E.g. Bytes type (e.g., []byte) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) + dm.SetMapIndex(key, val) + } + default: // Basic type (e.g., string) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + dm.SetMapIndex(key, val) + } + } + } + } + case reflect.Interface: + // Must be oneof field. + switch { + case isPointer || isSlice: + panic("bad pointer or slice in interface case in " + tf.Name()) + default: // E.g., interface{} + // TODO: Make this faster? + mfi.merge = func(dst, src pointer) { + su := src.asPointerTo(tf).Elem() + if !su.IsNil() { + du := dst.asPointerTo(tf).Elem() + typ := su.Elem().Type() + if du.IsNil() || du.Elem().Type() != typ { + du.Set(reflect.New(typ.Elem())) // Initialize interface if empty + } + sv := su.Elem().Elem().Field(0) + if sv.Kind() == reflect.Ptr && sv.IsNil() { + return + } + dv := du.Elem().Elem().Field(0) + if dv.Kind() == reflect.Ptr && dv.IsNil() { + dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty + } + switch sv.Type().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + Merge(dv.Interface().(Message), sv.Interface().(Message)) + case reflect.Slice: // E.g. Bytes type (e.g., []byte) + dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...))) + default: // Basic type (e.g., string) + dv.Set(sv) + } + } + } + } + default: + panic(fmt.Sprintf("merger not found for type:%s", tf)) + } + mi.fields = append(mi.fields, mfi) + } + + mi.unrecognized = invalidField + if f, ok := t.FieldByName("XXX_unrecognized"); ok { + if f.Type != reflect.TypeOf([]byte{}) { + panic("expected XXX_unrecognized to be of type []byte") + } + mi.unrecognized = toField(&f) + } + + atomic.StoreInt32(&mi.initialized, 1) +} diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/proto/table_unmarshal.go b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/proto/table_unmarshal.go new file mode 100644 index 0000000..90ec6c2 --- /dev/null +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/proto/table_unmarshal.go @@ -0,0 +1,2048 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "errors" + "fmt" + "io" + "math" + "reflect" + "strconv" + "strings" + "sync" + "sync/atomic" + "unicode/utf8" +) + +// Unmarshal is the entry point from the generated .pb.go files. +// This function is not intended to be used by non-generated code. +// This function is not subject to any compatibility guarantee. +// msg contains a pointer to a protocol buffer struct. +// b is the data to be unmarshaled into the protocol buffer. +// a is a pointer to a place to store cached unmarshal information. +func (a *InternalMessageInfo) Unmarshal(msg Message, b []byte) error { + // Load the unmarshal information for this message type. + // The atomic load ensures memory consistency. + u := atomicLoadUnmarshalInfo(&a.unmarshal) + if u == nil { + // Slow path: find unmarshal info for msg, update a with it. + u = getUnmarshalInfo(reflect.TypeOf(msg).Elem()) + atomicStoreUnmarshalInfo(&a.unmarshal, u) + } + // Then do the unmarshaling. + err := u.unmarshal(toPointer(&msg), b) + return err +} + +type unmarshalInfo struct { + typ reflect.Type // type of the protobuf struct + + // 0 = only typ field is initialized + // 1 = completely initialized + initialized int32 + lock sync.Mutex // prevents double initialization + dense []unmarshalFieldInfo // fields indexed by tag # + sparse map[uint64]unmarshalFieldInfo // fields indexed by tag # + reqFields []string // names of required fields + reqMask uint64 // 1< 0 { + // Read tag and wire type. + // Special case 1 and 2 byte varints. + var x uint64 + if b[0] < 128 { + x = uint64(b[0]) + b = b[1:] + } else if len(b) >= 2 && b[1] < 128 { + x = uint64(b[0]&0x7f) + uint64(b[1])<<7 + b = b[2:] + } else { + var n int + x, n = decodeVarint(b) + if n == 0 { + return io.ErrUnexpectedEOF + } + b = b[n:] + } + tag := x >> 3 + wire := int(x) & 7 + + // Dispatch on the tag to one of the unmarshal* functions below. + var f unmarshalFieldInfo + if tag < uint64(len(u.dense)) { + f = u.dense[tag] + } else { + f = u.sparse[tag] + } + if fn := f.unmarshal; fn != nil { + var err error + b, err = fn(b, m.offset(f.field), wire) + if err == nil { + reqMask |= f.reqMask + continue + } + if r, ok := err.(*RequiredNotSetError); ok { + // Remember this error, but keep parsing. We need to produce + // a full parse even if a required field is missing. + rnse = r + reqMask |= f.reqMask + continue + } + if err != errInternalBadWireType { + if err == errInvalidUTF8 { + fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name + err = fmt.Errorf("proto: string field %q contains invalid UTF-8", fullName) + } + return err + } + // Fragments with bad wire type are treated as unknown fields. + } + + // Unknown tag. + if !u.unrecognized.IsValid() { + // Don't keep unrecognized data; just skip it. + var err error + b, err = skipField(b, wire) + if err != nil { + return err + } + continue + } + // Keep unrecognized data around. + // maybe in extensions, maybe in the unrecognized field. + z := m.offset(u.unrecognized).toBytes() + var emap map[int32]Extension + var e Extension + for _, r := range u.extensionRanges { + if uint64(r.Start) <= tag && tag <= uint64(r.End) { + if u.extensions.IsValid() { + mp := m.offset(u.extensions).toExtensions() + emap = mp.extensionsWrite() + e = emap[int32(tag)] + z = &e.enc + break + } + if u.oldExtensions.IsValid() { + p := m.offset(u.oldExtensions).toOldExtensions() + emap = *p + if emap == nil { + emap = map[int32]Extension{} + *p = emap + } + e = emap[int32(tag)] + z = &e.enc + break + } + panic("no extensions field available") + } + } + + // Use wire type to skip data. + var err error + b0 := b + b, err = skipField(b, wire) + if err != nil { + return err + } + *z = encodeVarint(*z, tag<<3|uint64(wire)) + *z = append(*z, b0[:len(b0)-len(b)]...) + + if emap != nil { + emap[int32(tag)] = e + } + } + if rnse != nil { + // A required field of a submessage/group is missing. Return that error. + return rnse + } + if reqMask != u.reqMask { + // A required field of this message is missing. + for _, n := range u.reqFields { + if reqMask&1 == 0 { + return &RequiredNotSetError{n} + } + reqMask >>= 1 + } + } + return nil +} + +// computeUnmarshalInfo fills in u with information for use +// in unmarshaling protocol buffers of type u.typ. +func (u *unmarshalInfo) computeUnmarshalInfo() { + u.lock.Lock() + defer u.lock.Unlock() + if u.initialized != 0 { + return + } + t := u.typ + n := t.NumField() + + // Set up the "not found" value for the unrecognized byte buffer. + // This is the default for proto3. + u.unrecognized = invalidField + u.extensions = invalidField + u.oldExtensions = invalidField + + // List of the generated type and offset for each oneof field. + type oneofField struct { + ityp reflect.Type // interface type of oneof field + field field // offset in containing message + } + var oneofFields []oneofField + + for i := 0; i < n; i++ { + f := t.Field(i) + if f.Name == "XXX_unrecognized" { + // The byte slice used to hold unrecognized input is special. + if f.Type != reflect.TypeOf(([]byte)(nil)) { + panic("bad type for XXX_unrecognized field: " + f.Type.Name()) + } + u.unrecognized = toField(&f) + continue + } + if f.Name == "XXX_InternalExtensions" { + // Ditto here. + if f.Type != reflect.TypeOf(XXX_InternalExtensions{}) { + panic("bad type for XXX_InternalExtensions field: " + f.Type.Name()) + } + u.extensions = toField(&f) + if f.Tag.Get("protobuf_messageset") == "1" { + u.isMessageSet = true + } + continue + } + if f.Name == "XXX_extensions" { + // An older form of the extensions field. + if f.Type != reflect.TypeOf((map[int32]Extension)(nil)) { + panic("bad type for XXX_extensions field: " + f.Type.Name()) + } + u.oldExtensions = toField(&f) + continue + } + if f.Name == "XXX_NoUnkeyedLiteral" || f.Name == "XXX_sizecache" { + continue + } + + oneof := f.Tag.Get("protobuf_oneof") + if oneof != "" { + oneofFields = append(oneofFields, oneofField{f.Type, toField(&f)}) + // The rest of oneof processing happens below. + continue + } + + tags := f.Tag.Get("protobuf") + tagArray := strings.Split(tags, ",") + if len(tagArray) < 2 { + panic("protobuf tag not enough fields in " + t.Name() + "." + f.Name + ": " + tags) + } + tag, err := strconv.Atoi(tagArray[1]) + if err != nil { + panic("protobuf tag field not an integer: " + tagArray[1]) + } + + name := "" + for _, tag := range tagArray[3:] { + if strings.HasPrefix(tag, "name=") { + name = tag[5:] + } + } + + // Extract unmarshaling function from the field (its type and tags). + unmarshal := fieldUnmarshaler(&f) + + // Required field? + var reqMask uint64 + if tagArray[2] == "req" { + bit := len(u.reqFields) + u.reqFields = append(u.reqFields, name) + reqMask = uint64(1) << uint(bit) + // TODO: if we have more than 64 required fields, we end up + // not verifying that all required fields are present. + // Fix this, perhaps using a count of required fields? + } + + // Store the info in the correct slot in the message. + u.setTag(tag, toField(&f), unmarshal, reqMask, name) + } + + // Find any types associated with oneof fields. + // TODO: XXX_OneofFuncs returns more info than we need. Get rid of some of it? + fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("XXX_OneofFuncs") + if fn.IsValid() { + res := fn.Call(nil)[3] // last return value from XXX_OneofFuncs: []interface{} + for i := res.Len() - 1; i >= 0; i-- { + v := res.Index(i) // interface{} + tptr := reflect.ValueOf(v.Interface()).Type() // *Msg_X + typ := tptr.Elem() // Msg_X + + f := typ.Field(0) // oneof implementers have one field + baseUnmarshal := fieldUnmarshaler(&f) + tags := strings.Split(f.Tag.Get("protobuf"), ",") + fieldNum, err := strconv.Atoi(tags[1]) + if err != nil { + panic("protobuf tag field not an integer: " + tags[1]) + } + var name string + for _, tag := range tags { + if strings.HasPrefix(tag, "name=") { + name = strings.TrimPrefix(tag, "name=") + break + } + } + + // Find the oneof field that this struct implements. + // Might take O(n^2) to process all of the oneofs, but who cares. + for _, of := range oneofFields { + if tptr.Implements(of.ityp) { + // We have found the corresponding interface for this struct. + // That lets us know where this struct should be stored + // when we encounter it during unmarshaling. + unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal) + u.setTag(fieldNum, of.field, unmarshal, 0, name) + } + } + } + } + + // Get extension ranges, if any. + fn = reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray") + if fn.IsValid() { + if !u.extensions.IsValid() && !u.oldExtensions.IsValid() { + panic("a message with extensions, but no extensions field in " + t.Name()) + } + u.extensionRanges = fn.Call(nil)[0].Interface().([]ExtensionRange) + } + + // Explicitly disallow tag 0. This will ensure we flag an error + // when decoding a buffer of all zeros. Without this code, we + // would decode and skip an all-zero buffer of even length. + // [0 0] is [tag=0/wiretype=varint varint-encoded-0]. + u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) { + return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w) + }, 0, "") + + // Set mask for required field check. + u.reqMask = uint64(1)<= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here? + for len(u.dense) <= tag { + u.dense = append(u.dense, unmarshalFieldInfo{}) + } + u.dense[tag] = i + return + } + if u.sparse == nil { + u.sparse = map[uint64]unmarshalFieldInfo{} + } + u.sparse[uint64(tag)] = i +} + +// fieldUnmarshaler returns an unmarshaler for the given field. +func fieldUnmarshaler(f *reflect.StructField) unmarshaler { + if f.Type.Kind() == reflect.Map { + return makeUnmarshalMap(f) + } + return typeUnmarshaler(f.Type, f.Tag.Get("protobuf")) +} + +// typeUnmarshaler returns an unmarshaler for the given field type / field tag pair. +func typeUnmarshaler(t reflect.Type, tags string) unmarshaler { + tagArray := strings.Split(tags, ",") + encoding := tagArray[0] + name := "unknown" + proto3 := false + validateUTF8 := true + for _, tag := range tagArray[3:] { + if strings.HasPrefix(tag, "name=") { + name = tag[5:] + } + if tag == "proto3" { + proto3 = true + } + } + validateUTF8 = validateUTF8 && proto3 + + // Figure out packaging (pointer, slice, or both) + slice := false + pointer := false + if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { + slice = true + t = t.Elem() + } + if t.Kind() == reflect.Ptr { + pointer = true + t = t.Elem() + } + + // We'll never have both pointer and slice for basic types. + if pointer && slice && t.Kind() != reflect.Struct { + panic("both pointer and slice for basic type in " + t.Name()) + } + + switch t.Kind() { + case reflect.Bool: + if pointer { + return unmarshalBoolPtr + } + if slice { + return unmarshalBoolSlice + } + return unmarshalBoolValue + case reflect.Int32: + switch encoding { + case "fixed32": + if pointer { + return unmarshalFixedS32Ptr + } + if slice { + return unmarshalFixedS32Slice + } + return unmarshalFixedS32Value + case "varint": + // this could be int32 or enum + if pointer { + return unmarshalInt32Ptr + } + if slice { + return unmarshalInt32Slice + } + return unmarshalInt32Value + case "zigzag32": + if pointer { + return unmarshalSint32Ptr + } + if slice { + return unmarshalSint32Slice + } + return unmarshalSint32Value + } + case reflect.Int64: + switch encoding { + case "fixed64": + if pointer { + return unmarshalFixedS64Ptr + } + if slice { + return unmarshalFixedS64Slice + } + return unmarshalFixedS64Value + case "varint": + if pointer { + return unmarshalInt64Ptr + } + if slice { + return unmarshalInt64Slice + } + return unmarshalInt64Value + case "zigzag64": + if pointer { + return unmarshalSint64Ptr + } + if slice { + return unmarshalSint64Slice + } + return unmarshalSint64Value + } + case reflect.Uint32: + switch encoding { + case "fixed32": + if pointer { + return unmarshalFixed32Ptr + } + if slice { + return unmarshalFixed32Slice + } + return unmarshalFixed32Value + case "varint": + if pointer { + return unmarshalUint32Ptr + } + if slice { + return unmarshalUint32Slice + } + return unmarshalUint32Value + } + case reflect.Uint64: + switch encoding { + case "fixed64": + if pointer { + return unmarshalFixed64Ptr + } + if slice { + return unmarshalFixed64Slice + } + return unmarshalFixed64Value + case "varint": + if pointer { + return unmarshalUint64Ptr + } + if slice { + return unmarshalUint64Slice + } + return unmarshalUint64Value + } + case reflect.Float32: + if pointer { + return unmarshalFloat32Ptr + } + if slice { + return unmarshalFloat32Slice + } + return unmarshalFloat32Value + case reflect.Float64: + if pointer { + return unmarshalFloat64Ptr + } + if slice { + return unmarshalFloat64Slice + } + return unmarshalFloat64Value + case reflect.Map: + panic("map type in typeUnmarshaler in " + t.Name()) + case reflect.Slice: + if pointer { + panic("bad pointer in slice case in " + t.Name()) + } + if slice { + return unmarshalBytesSlice + } + return unmarshalBytesValue + case reflect.String: + if validateUTF8 { + if pointer { + return unmarshalUTF8StringPtr + } + if slice { + return unmarshalUTF8StringSlice + } + return unmarshalUTF8StringValue + } + if pointer { + return unmarshalStringPtr + } + if slice { + return unmarshalStringSlice + } + return unmarshalStringValue + case reflect.Struct: + // message or group field + if !pointer { + panic(fmt.Sprintf("message/group field %s:%s without pointer", t, encoding)) + } + switch encoding { + case "bytes": + if slice { + return makeUnmarshalMessageSlicePtr(getUnmarshalInfo(t), name) + } + return makeUnmarshalMessagePtr(getUnmarshalInfo(t), name) + case "group": + if slice { + return makeUnmarshalGroupSlicePtr(getUnmarshalInfo(t), name) + } + return makeUnmarshalGroupPtr(getUnmarshalInfo(t), name) + } + } + panic(fmt.Sprintf("unmarshaler not found type:%s encoding:%s", t, encoding)) +} + +// Below are all the unmarshalers for individual fields of various types. + +func unmarshalInt64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + *f.toInt64() = v + return b, nil +} + +func unmarshalInt64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + *f.toInt64Ptr() = &v + return b, nil +} + +func unmarshalInt64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + s := f.toInt64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + s := f.toInt64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalSint64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + *f.toInt64() = v + return b, nil +} + +func unmarshalSint64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + *f.toInt64Ptr() = &v + return b, nil +} + +func unmarshalSint64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + s := f.toInt64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + s := f.toInt64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalUint64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + *f.toUint64() = v + return b, nil +} + +func unmarshalUint64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + *f.toUint64Ptr() = &v + return b, nil +} + +func unmarshalUint64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + s := f.toUint64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + s := f.toUint64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalInt32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + *f.toInt32() = v + return b, nil +} + +func unmarshalInt32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.setInt32Ptr(v) + return b, nil +} + +func unmarshalInt32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.appendInt32Slice(v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.appendInt32Slice(v) + return b, nil +} + +func unmarshalSint32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + *f.toInt32() = v + return b, nil +} + +func unmarshalSint32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.setInt32Ptr(v) + return b, nil +} + +func unmarshalSint32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.appendInt32Slice(v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.appendInt32Slice(v) + return b, nil +} + +func unmarshalUint32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + *f.toUint32() = v + return b, nil +} + +func unmarshalUint32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + *f.toUint32Ptr() = &v + return b, nil +} + +func unmarshalUint32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + s := f.toUint32Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + s := f.toUint32Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalFixed64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + *f.toUint64() = v + return b[8:], nil +} + +func unmarshalFixed64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + *f.toUint64Ptr() = &v + return b[8:], nil +} + +func unmarshalFixed64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + s := f.toUint64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + s := f.toUint64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFixedS64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + *f.toInt64() = v + return b[8:], nil +} + +func unmarshalFixedS64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + *f.toInt64Ptr() = &v + return b[8:], nil +} + +func unmarshalFixedS64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + s := f.toInt64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + s := f.toInt64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFixed32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + *f.toUint32() = v + return b[4:], nil +} + +func unmarshalFixed32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + *f.toUint32Ptr() = &v + return b[4:], nil +} + +func unmarshalFixed32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + s := f.toUint32Slice() + *s = append(*s, v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + s := f.toUint32Slice() + *s = append(*s, v) + return b[4:], nil +} + +func unmarshalFixedS32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + *f.toInt32() = v + return b[4:], nil +} + +func unmarshalFixedS32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.setInt32Ptr(v) + return b[4:], nil +} + +func unmarshalFixedS32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.appendInt32Slice(v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.appendInt32Slice(v) + return b[4:], nil +} + +func unmarshalBoolValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + // Note: any length varint is allowed, even though any sane + // encoder will use one byte. + // See https://github.com/golang/protobuf/issues/76 + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + // TODO: check if x>1? Tests seem to indicate no. + v := x != 0 + *f.toBool() = v + return b[n:], nil +} + +func unmarshalBoolPtr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + *f.toBoolPtr() = &v + return b[n:], nil +} + +func unmarshalBoolSlice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + s := f.toBoolSlice() + *s = append(*s, v) + b = b[n:] + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + s := f.toBoolSlice() + *s = append(*s, v) + return b[n:], nil +} + +func unmarshalFloat64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + *f.toFloat64() = v + return b[8:], nil +} + +func unmarshalFloat64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + *f.toFloat64Ptr() = &v + return b[8:], nil +} + +func unmarshalFloat64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + s := f.toFloat64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + s := f.toFloat64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFloat32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + *f.toFloat32() = v + return b[4:], nil +} + +func unmarshalFloat32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + *f.toFloat32Ptr() = &v + return b[4:], nil +} + +func unmarshalFloat32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + s := f.toFloat32Slice() + *s = append(*s, v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + s := f.toFloat32Slice() + *s = append(*s, v) + return b[4:], nil +} + +func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toString() = v + return b[x:], nil +} + +func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toStringPtr() = &v + return b[x:], nil +} + +func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + s := f.toStringSlice() + *s = append(*s, v) + return b[x:], nil +} + +func unmarshalUTF8StringValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + if !utf8.ValidString(v) { + return nil, errInvalidUTF8 + } + *f.toString() = v + return b[x:], nil +} + +func unmarshalUTF8StringPtr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + if !utf8.ValidString(v) { + return nil, errInvalidUTF8 + } + *f.toStringPtr() = &v + return b[x:], nil +} + +func unmarshalUTF8StringSlice(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + if !utf8.ValidString(v) { + return nil, errInvalidUTF8 + } + s := f.toStringSlice() + *s = append(*s, v) + return b[x:], nil +} + +var emptyBuf [0]byte + +func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + // The use of append here is a trick which avoids the zeroing + // that would be required if we used a make/copy pair. + // We append to emptyBuf instead of nil because we want + // a non-nil result even when the length is 0. + v := append(emptyBuf[:], b[:x]...) + *f.toBytes() = v + return b[x:], nil +} + +func unmarshalBytesSlice(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := append(emptyBuf[:], b[:x]...) + s := f.toBytesSlice() + *s = append(*s, v) + return b[x:], nil +} + +func makeUnmarshalMessagePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + // First read the message field to see if something is there. + // The semantics of multiple submessages are weird. Instead of + // the last one winning (as it is for all other fields), multiple + // submessages are merged. + v := f.getPointer() + if v.isNil() { + v = valToPointer(reflect.New(sub.typ)) + f.setPointer(v) + } + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + return b[x:], err + } +} + +func makeUnmarshalMessageSlicePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := valToPointer(reflect.New(sub.typ)) + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + f.appendPointer(v) + return b[x:], err + } +} + +func makeUnmarshalGroupPtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireStartGroup { + return b, errInternalBadWireType + } + x, y := findEndGroup(b) + if x < 0 { + return nil, io.ErrUnexpectedEOF + } + v := f.getPointer() + if v.isNil() { + v = valToPointer(reflect.New(sub.typ)) + f.setPointer(v) + } + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + return b[y:], err + } +} + +func makeUnmarshalGroupSlicePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireStartGroup { + return b, errInternalBadWireType + } + x, y := findEndGroup(b) + if x < 0 { + return nil, io.ErrUnexpectedEOF + } + v := valToPointer(reflect.New(sub.typ)) + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + f.appendPointer(v) + return b[y:], err + } +} + +func makeUnmarshalMap(f *reflect.StructField) unmarshaler { + t := f.Type + kt := t.Key() + vt := t.Elem() + unmarshalKey := typeUnmarshaler(kt, f.Tag.Get("protobuf_key")) + unmarshalVal := typeUnmarshaler(vt, f.Tag.Get("protobuf_val")) + return func(b []byte, f pointer, w int) ([]byte, error) { + // The map entry is a submessage. Figure out how big it is. + if w != WireBytes { + return nil, fmt.Errorf("proto: bad wiretype for map field: got %d want %d", w, WireBytes) + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + r := b[x:] // unused data to return + b = b[:x] // data for map entry + + // Note: we could use #keys * #values ~= 200 functions + // to do map decoding without reflection. Probably not worth it. + // Maps will be somewhat slow. Oh well. + + // Read key and value from data. + k := reflect.New(kt) + v := reflect.New(vt) + for len(b) > 0 { + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + wire := int(x) & 7 + b = b[n:] + + var err error + switch x >> 3 { + case 1: + b, err = unmarshalKey(b, valToPointer(k), wire) + case 2: + b, err = unmarshalVal(b, valToPointer(v), wire) + default: + err = errInternalBadWireType // skip unknown tag + } + + if err == nil { + continue + } + if err != errInternalBadWireType { + return nil, err + } + + // Skip past unknown fields. + b, err = skipField(b, wire) + if err != nil { + return nil, err + } + } + + // Get map, allocate if needed. + m := f.asPointerTo(t).Elem() // an addressable map[K]T + if m.IsNil() { + m.Set(reflect.MakeMap(t)) + } + + // Insert into map. + m.SetMapIndex(k.Elem(), v.Elem()) + + return r, nil + } +} + +// makeUnmarshalOneof makes an unmarshaler for oneof fields. +// for: +// message Msg { +// oneof F { +// int64 X = 1; +// float64 Y = 2; +// } +// } +// typ is the type of the concrete entry for a oneof case (e.g. Msg_X). +// ityp is the interface type of the oneof field (e.g. isMsg_F). +// unmarshal is the unmarshaler for the base type of the oneof case (e.g. int64). +// Note that this function will be called once for each case in the oneof. +func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshaler { + sf := typ.Field(0) + field0 := toField(&sf) + return func(b []byte, f pointer, w int) ([]byte, error) { + // Allocate holder for value. + v := reflect.New(typ) + + // Unmarshal data into holder. + // We unmarshal into the first field of the holder object. + var err error + b, err = unmarshal(b, valToPointer(v).offset(field0), w) + if err != nil { + return nil, err + } + + // Write pointer to holder into target field. + f.asPointerTo(ityp).Elem().Set(v) + + return b, nil + } +} + +// Error used by decode internally. +var errInternalBadWireType = errors.New("proto: internal error: bad wiretype") + +// skipField skips past a field of type wire and returns the remaining bytes. +func skipField(b []byte, wire int) ([]byte, error) { + switch wire { + case WireVarint: + _, k := decodeVarint(b) + if k == 0 { + return b, io.ErrUnexpectedEOF + } + b = b[k:] + case WireFixed32: + if len(b) < 4 { + return b, io.ErrUnexpectedEOF + } + b = b[4:] + case WireFixed64: + if len(b) < 8 { + return b, io.ErrUnexpectedEOF + } + b = b[8:] + case WireBytes: + m, k := decodeVarint(b) + if k == 0 || uint64(len(b)-k) < m { + return b, io.ErrUnexpectedEOF + } + b = b[uint64(k)+m:] + case WireStartGroup: + _, i := findEndGroup(b) + if i == -1 { + return b, io.ErrUnexpectedEOF + } + b = b[i:] + default: + return b, fmt.Errorf("proto: can't skip unknown wire type %d", wire) + } + return b, nil +} + +// findEndGroup finds the index of the next EndGroup tag. +// Groups may be nested, so the "next" EndGroup tag is the first +// unpaired EndGroup. +// findEndGroup returns the indexes of the start and end of the EndGroup tag. +// Returns (-1,-1) if it can't find one. +func findEndGroup(b []byte) (int, int) { + depth := 1 + i := 0 + for { + x, n := decodeVarint(b[i:]) + if n == 0 { + return -1, -1 + } + j := i + i += n + switch x & 7 { + case WireVarint: + _, k := decodeVarint(b[i:]) + if k == 0 { + return -1, -1 + } + i += k + case WireFixed32: + if len(b)-4 < i { + return -1, -1 + } + i += 4 + case WireFixed64: + if len(b)-8 < i { + return -1, -1 + } + i += 8 + case WireBytes: + m, k := decodeVarint(b[i:]) + if k == 0 { + return -1, -1 + } + i += k + if uint64(len(b)-i) < m { + return -1, -1 + } + i += int(m) + case WireStartGroup: + depth++ + case WireEndGroup: + depth-- + if depth == 0 { + return j, i + } + default: + return -1, -1 + } + } +} + +// encodeVarint appends a varint-encoded integer to b and returns the result. +func encodeVarint(b []byte, x uint64) []byte { + for x >= 1<<7 { + b = append(b, byte(x&0x7f|0x80)) + x >>= 7 + } + return append(b, byte(x)) +} + +// decodeVarint reads a varint-encoded integer from b. +// Returns the decoded integer and the number of bytes read. +// If there is an error, it returns 0,0. +func decodeVarint(b []byte) (uint64, int) { + var x, y uint64 + if len(b) <= 0 { + goto bad + } + x = uint64(b[0]) + if x < 0x80 { + return x, 1 + } + x -= 0x80 + + if len(b) <= 1 { + goto bad + } + y = uint64(b[1]) + x += y << 7 + if y < 0x80 { + return x, 2 + } + x -= 0x80 << 7 + + if len(b) <= 2 { + goto bad + } + y = uint64(b[2]) + x += y << 14 + if y < 0x80 { + return x, 3 + } + x -= 0x80 << 14 + + if len(b) <= 3 { + goto bad + } + y = uint64(b[3]) + x += y << 21 + if y < 0x80 { + return x, 4 + } + x -= 0x80 << 21 + + if len(b) <= 4 { + goto bad + } + y = uint64(b[4]) + x += y << 28 + if y < 0x80 { + return x, 5 + } + x -= 0x80 << 28 + + if len(b) <= 5 { + goto bad + } + y = uint64(b[5]) + x += y << 35 + if y < 0x80 { + return x, 6 + } + x -= 0x80 << 35 + + if len(b) <= 6 { + goto bad + } + y = uint64(b[6]) + x += y << 42 + if y < 0x80 { + return x, 7 + } + x -= 0x80 << 42 + + if len(b) <= 7 { + goto bad + } + y = uint64(b[7]) + x += y << 49 + if y < 0x80 { + return x, 8 + } + x -= 0x80 << 49 + + if len(b) <= 8 { + goto bad + } + y = uint64(b[8]) + x += y << 56 + if y < 0x80 { + return x, 9 + } + x -= 0x80 << 56 + + if len(b) <= 9 { + goto bad + } + y = uint64(b[9]) + x += y << 63 + if y < 2 { + return x, 10 + } + +bad: + return 0, 0 +} diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/proto/test_proto/test.pb.go b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/proto/test_proto/test.pb.go new file mode 100644 index 0000000..570227b --- /dev/null +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/proto/test_proto/test.pb.go @@ -0,0 +1,5268 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: test_proto/test.proto + +package test_proto // import "github.com/golang/protobuf/proto/test_proto" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type FOO int32 + +const ( + FOO_FOO1 FOO = 1 +) + +var FOO_name = map[int32]string{ + 1: "FOO1", +} +var FOO_value = map[string]int32{ + "FOO1": 1, +} + +func (x FOO) Enum() *FOO { + p := new(FOO) + *p = x + return p +} +func (x FOO) String() string { + return proto.EnumName(FOO_name, int32(x)) +} +func (x *FOO) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FOO_value, data, "FOO") + if err != nil { + return err + } + *x = FOO(value) + return nil +} +func (FOO) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_test_ee9f66cbbebc227c, []int{0} +} + +// An enum, for completeness. +type GoTest_KIND int32 + +const ( + GoTest_VOID GoTest_KIND = 0 + // Basic types + GoTest_BOOL GoTest_KIND = 1 + GoTest_BYTES GoTest_KIND = 2 + GoTest_FINGERPRINT GoTest_KIND = 3 + GoTest_FLOAT GoTest_KIND = 4 + GoTest_INT GoTest_KIND = 5 + GoTest_STRING GoTest_KIND = 6 + GoTest_TIME GoTest_KIND = 7 + // Groupings + GoTest_TUPLE GoTest_KIND = 8 + GoTest_ARRAY GoTest_KIND = 9 + GoTest_MAP GoTest_KIND = 10 + // Table types + GoTest_TABLE GoTest_KIND = 11 + // Functions + GoTest_FUNCTION GoTest_KIND = 12 +) + +var GoTest_KIND_name = map[int32]string{ + 0: "VOID", + 1: "BOOL", + 2: "BYTES", + 3: "FINGERPRINT", + 4: "FLOAT", + 5: "INT", + 6: "STRING", + 7: "TIME", + 8: "TUPLE", + 9: "ARRAY", + 10: "MAP", + 11: "TABLE", + 12: "FUNCTION", +} +var GoTest_KIND_value = map[string]int32{ + "VOID": 0, + "BOOL": 1, + "BYTES": 2, + "FINGERPRINT": 3, + "FLOAT": 4, + "INT": 5, + "STRING": 6, + "TIME": 7, + "TUPLE": 8, + "ARRAY": 9, + "MAP": 10, + "TABLE": 11, + "FUNCTION": 12, +} + +func (x GoTest_KIND) Enum() *GoTest_KIND { + p := new(GoTest_KIND) + *p = x + return p +} +func (x GoTest_KIND) String() string { + return proto.EnumName(GoTest_KIND_name, int32(x)) +} +func (x *GoTest_KIND) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(GoTest_KIND_value, data, "GoTest_KIND") + if err != nil { + return err + } + *x = GoTest_KIND(value) + return nil +} +func (GoTest_KIND) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_test_ee9f66cbbebc227c, []int{2, 0} +} + +type MyMessage_Color int32 + +const ( + MyMessage_RED MyMessage_Color = 0 + MyMessage_GREEN MyMessage_Color = 1 + MyMessage_BLUE MyMessage_Color = 2 +) + +var MyMessage_Color_name = map[int32]string{ + 0: "RED", + 1: "GREEN", + 2: "BLUE", +} +var MyMessage_Color_value = map[string]int32{ + "RED": 0, + "GREEN": 1, + "BLUE": 2, +} + +func (x MyMessage_Color) Enum() *MyMessage_Color { + p := new(MyMessage_Color) + *p = x + return p +} +func (x MyMessage_Color) String() string { + return proto.EnumName(MyMessage_Color_name, int32(x)) +} +func (x *MyMessage_Color) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MyMessage_Color_value, data, "MyMessage_Color") + if err != nil { + return err + } + *x = MyMessage_Color(value) + return nil +} +func (MyMessage_Color) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_test_ee9f66cbbebc227c, []int{13, 0} +} + +type DefaultsMessage_DefaultsEnum int32 + +const ( + DefaultsMessage_ZERO DefaultsMessage_DefaultsEnum = 0 + DefaultsMessage_ONE DefaultsMessage_DefaultsEnum = 1 + DefaultsMessage_TWO DefaultsMessage_DefaultsEnum = 2 +) + +var DefaultsMessage_DefaultsEnum_name = map[int32]string{ + 0: "ZERO", + 1: "ONE", + 2: "TWO", +} +var DefaultsMessage_DefaultsEnum_value = map[string]int32{ + "ZERO": 0, + "ONE": 1, + "TWO": 2, +} + +func (x DefaultsMessage_DefaultsEnum) Enum() *DefaultsMessage_DefaultsEnum { + p := new(DefaultsMessage_DefaultsEnum) + *p = x + return p +} +func (x DefaultsMessage_DefaultsEnum) String() string { + return proto.EnumName(DefaultsMessage_DefaultsEnum_name, int32(x)) +} +func (x *DefaultsMessage_DefaultsEnum) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(DefaultsMessage_DefaultsEnum_value, data, "DefaultsMessage_DefaultsEnum") + if err != nil { + return err + } + *x = DefaultsMessage_DefaultsEnum(value) + return nil +} +func (DefaultsMessage_DefaultsEnum) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_test_ee9f66cbbebc227c, []int{16, 0} +} + +type Defaults_Color int32 + +const ( + Defaults_RED Defaults_Color = 0 + Defaults_GREEN Defaults_Color = 1 + Defaults_BLUE Defaults_Color = 2 +) + +var Defaults_Color_name = map[int32]string{ + 0: "RED", + 1: "GREEN", + 2: "BLUE", +} +var Defaults_Color_value = map[string]int32{ + "RED": 0, + "GREEN": 1, + "BLUE": 2, +} + +func (x Defaults_Color) Enum() *Defaults_Color { + p := new(Defaults_Color) + *p = x + return p +} +func (x Defaults_Color) String() string { + return proto.EnumName(Defaults_Color_name, int32(x)) +} +func (x *Defaults_Color) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Defaults_Color_value, data, "Defaults_Color") + if err != nil { + return err + } + *x = Defaults_Color(value) + return nil +} +func (Defaults_Color) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_test_ee9f66cbbebc227c, []int{21, 0} +} + +type RepeatedEnum_Color int32 + +const ( + RepeatedEnum_RED RepeatedEnum_Color = 1 +) + +var RepeatedEnum_Color_name = map[int32]string{ + 1: "RED", +} +var RepeatedEnum_Color_value = map[string]int32{ + "RED": 1, +} + +func (x RepeatedEnum_Color) Enum() *RepeatedEnum_Color { + p := new(RepeatedEnum_Color) + *p = x + return p +} +func (x RepeatedEnum_Color) String() string { + return proto.EnumName(RepeatedEnum_Color_name, int32(x)) +} +func (x *RepeatedEnum_Color) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(RepeatedEnum_Color_value, data, "RepeatedEnum_Color") + if err != nil { + return err + } + *x = RepeatedEnum_Color(value) + return nil +} +func (RepeatedEnum_Color) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_test_ee9f66cbbebc227c, []int{23, 0} +} + +type GoEnum struct { + Foo *FOO `protobuf:"varint,1,req,name=foo,enum=test_proto.FOO" json:"foo,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GoEnum) Reset() { *m = GoEnum{} } +func (m *GoEnum) String() string { return proto.CompactTextString(m) } +func (*GoEnum) ProtoMessage() {} +func (*GoEnum) Descriptor() ([]byte, []int) { + return fileDescriptor_test_ee9f66cbbebc227c, []int{0} +} +func (m *GoEnum) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GoEnum.Unmarshal(m, b) +} +func (m *GoEnum) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GoEnum.Marshal(b, m, deterministic) +} +func (dst *GoEnum) XXX_Merge(src proto.Message) { + xxx_messageInfo_GoEnum.Merge(dst, src) +} +func (m *GoEnum) XXX_Size() int { + return xxx_messageInfo_GoEnum.Size(m) +} +func (m *GoEnum) XXX_DiscardUnknown() { + xxx_messageInfo_GoEnum.DiscardUnknown(m) +} + +var xxx_messageInfo_GoEnum proto.InternalMessageInfo + +func (m *GoEnum) GetFoo() FOO { + if m != nil && m.Foo != nil { + return *m.Foo + } + return FOO_FOO1 +} + +type GoTestField struct { + Label *string `protobuf:"bytes,1,req,name=Label" json:"Label,omitempty"` + Type *string `protobuf:"bytes,2,req,name=Type" json:"Type,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GoTestField) Reset() { *m = GoTestField{} } +func (m *GoTestField) String() string { return proto.CompactTextString(m) } +func (*GoTestField) ProtoMessage() {} +func (*GoTestField) Descriptor() ([]byte, []int) { + return fileDescriptor_test_ee9f66cbbebc227c, []int{1} +} +func (m *GoTestField) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GoTestField.Unmarshal(m, b) +} +func (m *GoTestField) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GoTestField.Marshal(b, m, deterministic) +} +func (dst *GoTestField) XXX_Merge(src proto.Message) { + xxx_messageInfo_GoTestField.Merge(dst, src) +} +func (m *GoTestField) XXX_Size() int { + return xxx_messageInfo_GoTestField.Size(m) +} +func (m *GoTestField) XXX_DiscardUnknown() { + xxx_messageInfo_GoTestField.DiscardUnknown(m) +} + +var xxx_messageInfo_GoTestField proto.InternalMessageInfo + +func (m *GoTestField) GetLabel() string { + if m != nil && m.Label != nil { + return *m.Label + } + return "" +} + +func (m *GoTestField) GetType() string { + if m != nil && m.Type != nil { + return *m.Type + } + return "" +} + +type GoTest struct { + // Some typical parameters + Kind *GoTest_KIND `protobuf:"varint,1,req,name=Kind,enum=test_proto.GoTest_KIND" json:"Kind,omitempty"` + Table *string `protobuf:"bytes,2,opt,name=Table" json:"Table,omitempty"` + Param *int32 `protobuf:"varint,3,opt,name=Param" json:"Param,omitempty"` + // Required, repeated and optional foreign fields. + RequiredField *GoTestField `protobuf:"bytes,4,req,name=RequiredField" json:"RequiredField,omitempty"` + RepeatedField []*GoTestField `protobuf:"bytes,5,rep,name=RepeatedField" json:"RepeatedField,omitempty"` + OptionalField *GoTestField `protobuf:"bytes,6,opt,name=OptionalField" json:"OptionalField,omitempty"` + // Required fields of all basic types + F_BoolRequired *bool `protobuf:"varint,10,req,name=F_Bool_required,json=FBoolRequired" json:"F_Bool_required,omitempty"` + F_Int32Required *int32 `protobuf:"varint,11,req,name=F_Int32_required,json=FInt32Required" json:"F_Int32_required,omitempty"` + F_Int64Required *int64 `protobuf:"varint,12,req,name=F_Int64_required,json=FInt64Required" json:"F_Int64_required,omitempty"` + F_Fixed32Required *uint32 `protobuf:"fixed32,13,req,name=F_Fixed32_required,json=FFixed32Required" json:"F_Fixed32_required,omitempty"` + F_Fixed64Required *uint64 `protobuf:"fixed64,14,req,name=F_Fixed64_required,json=FFixed64Required" json:"F_Fixed64_required,omitempty"` + F_Uint32Required *uint32 `protobuf:"varint,15,req,name=F_Uint32_required,json=FUint32Required" json:"F_Uint32_required,omitempty"` + F_Uint64Required *uint64 `protobuf:"varint,16,req,name=F_Uint64_required,json=FUint64Required" json:"F_Uint64_required,omitempty"` + F_FloatRequired *float32 `protobuf:"fixed32,17,req,name=F_Float_required,json=FFloatRequired" json:"F_Float_required,omitempty"` + F_DoubleRequired *float64 `protobuf:"fixed64,18,req,name=F_Double_required,json=FDoubleRequired" json:"F_Double_required,omitempty"` + F_StringRequired *string `protobuf:"bytes,19,req,name=F_String_required,json=FStringRequired" json:"F_String_required,omitempty"` + F_BytesRequired []byte `protobuf:"bytes,101,req,name=F_Bytes_required,json=FBytesRequired" json:"F_Bytes_required,omitempty"` + F_Sint32Required *int32 `protobuf:"zigzag32,102,req,name=F_Sint32_required,json=FSint32Required" json:"F_Sint32_required,omitempty"` + F_Sint64Required *int64 `protobuf:"zigzag64,103,req,name=F_Sint64_required,json=FSint64Required" json:"F_Sint64_required,omitempty"` + F_Sfixed32Required *int32 `protobuf:"fixed32,104,req,name=F_Sfixed32_required,json=FSfixed32Required" json:"F_Sfixed32_required,omitempty"` + F_Sfixed64Required *int64 `protobuf:"fixed64,105,req,name=F_Sfixed64_required,json=FSfixed64Required" json:"F_Sfixed64_required,omitempty"` + // Repeated fields of all basic types + F_BoolRepeated []bool `protobuf:"varint,20,rep,name=F_Bool_repeated,json=FBoolRepeated" json:"F_Bool_repeated,omitempty"` + F_Int32Repeated []int32 `protobuf:"varint,21,rep,name=F_Int32_repeated,json=FInt32Repeated" json:"F_Int32_repeated,omitempty"` + F_Int64Repeated []int64 `protobuf:"varint,22,rep,name=F_Int64_repeated,json=FInt64Repeated" json:"F_Int64_repeated,omitempty"` + F_Fixed32Repeated []uint32 `protobuf:"fixed32,23,rep,name=F_Fixed32_repeated,json=FFixed32Repeated" json:"F_Fixed32_repeated,omitempty"` + F_Fixed64Repeated []uint64 `protobuf:"fixed64,24,rep,name=F_Fixed64_repeated,json=FFixed64Repeated" json:"F_Fixed64_repeated,omitempty"` + F_Uint32Repeated []uint32 `protobuf:"varint,25,rep,name=F_Uint32_repeated,json=FUint32Repeated" json:"F_Uint32_repeated,omitempty"` + F_Uint64Repeated []uint64 `protobuf:"varint,26,rep,name=F_Uint64_repeated,json=FUint64Repeated" json:"F_Uint64_repeated,omitempty"` + F_FloatRepeated []float32 `protobuf:"fixed32,27,rep,name=F_Float_repeated,json=FFloatRepeated" json:"F_Float_repeated,omitempty"` + F_DoubleRepeated []float64 `protobuf:"fixed64,28,rep,name=F_Double_repeated,json=FDoubleRepeated" json:"F_Double_repeated,omitempty"` + F_StringRepeated []string `protobuf:"bytes,29,rep,name=F_String_repeated,json=FStringRepeated" json:"F_String_repeated,omitempty"` + F_BytesRepeated [][]byte `protobuf:"bytes,201,rep,name=F_Bytes_repeated,json=FBytesRepeated" json:"F_Bytes_repeated,omitempty"` + F_Sint32Repeated []int32 `protobuf:"zigzag32,202,rep,name=F_Sint32_repeated,json=FSint32Repeated" json:"F_Sint32_repeated,omitempty"` + F_Sint64Repeated []int64 `protobuf:"zigzag64,203,rep,name=F_Sint64_repeated,json=FSint64Repeated" json:"F_Sint64_repeated,omitempty"` + F_Sfixed32Repeated []int32 `protobuf:"fixed32,204,rep,name=F_Sfixed32_repeated,json=FSfixed32Repeated" json:"F_Sfixed32_repeated,omitempty"` + F_Sfixed64Repeated []int64 `protobuf:"fixed64,205,rep,name=F_Sfixed64_repeated,json=FSfixed64Repeated" json:"F_Sfixed64_repeated,omitempty"` + // Optional fields of all basic types + F_BoolOptional *bool `protobuf:"varint,30,opt,name=F_Bool_optional,json=FBoolOptional" json:"F_Bool_optional,omitempty"` + F_Int32Optional *int32 `protobuf:"varint,31,opt,name=F_Int32_optional,json=FInt32Optional" json:"F_Int32_optional,omitempty"` + F_Int64Optional *int64 `protobuf:"varint,32,opt,name=F_Int64_optional,json=FInt64Optional" json:"F_Int64_optional,omitempty"` + F_Fixed32Optional *uint32 `protobuf:"fixed32,33,opt,name=F_Fixed32_optional,json=FFixed32Optional" json:"F_Fixed32_optional,omitempty"` + F_Fixed64Optional *uint64 `protobuf:"fixed64,34,opt,name=F_Fixed64_optional,json=FFixed64Optional" json:"F_Fixed64_optional,omitempty"` + F_Uint32Optional *uint32 `protobuf:"varint,35,opt,name=F_Uint32_optional,json=FUint32Optional" json:"F_Uint32_optional,omitempty"` + F_Uint64Optional *uint64 `protobuf:"varint,36,opt,name=F_Uint64_optional,json=FUint64Optional" json:"F_Uint64_optional,omitempty"` + F_FloatOptional *float32 `protobuf:"fixed32,37,opt,name=F_Float_optional,json=FFloatOptional" json:"F_Float_optional,omitempty"` + F_DoubleOptional *float64 `protobuf:"fixed64,38,opt,name=F_Double_optional,json=FDoubleOptional" json:"F_Double_optional,omitempty"` + F_StringOptional *string `protobuf:"bytes,39,opt,name=F_String_optional,json=FStringOptional" json:"F_String_optional,omitempty"` + F_BytesOptional []byte `protobuf:"bytes,301,opt,name=F_Bytes_optional,json=FBytesOptional" json:"F_Bytes_optional,omitempty"` + F_Sint32Optional *int32 `protobuf:"zigzag32,302,opt,name=F_Sint32_optional,json=FSint32Optional" json:"F_Sint32_optional,omitempty"` + F_Sint64Optional *int64 `protobuf:"zigzag64,303,opt,name=F_Sint64_optional,json=FSint64Optional" json:"F_Sint64_optional,omitempty"` + F_Sfixed32Optional *int32 `protobuf:"fixed32,304,opt,name=F_Sfixed32_optional,json=FSfixed32Optional" json:"F_Sfixed32_optional,omitempty"` + F_Sfixed64Optional *int64 `protobuf:"fixed64,305,opt,name=F_Sfixed64_optional,json=FSfixed64Optional" json:"F_Sfixed64_optional,omitempty"` + // Default-valued fields of all basic types + F_BoolDefaulted *bool `protobuf:"varint,40,opt,name=F_Bool_defaulted,json=FBoolDefaulted,def=1" json:"F_Bool_defaulted,omitempty"` + F_Int32Defaulted *int32 `protobuf:"varint,41,opt,name=F_Int32_defaulted,json=FInt32Defaulted,def=32" json:"F_Int32_defaulted,omitempty"` + F_Int64Defaulted *int64 `protobuf:"varint,42,opt,name=F_Int64_defaulted,json=FInt64Defaulted,def=64" json:"F_Int64_defaulted,omitempty"` + F_Fixed32Defaulted *uint32 `protobuf:"fixed32,43,opt,name=F_Fixed32_defaulted,json=FFixed32Defaulted,def=320" json:"F_Fixed32_defaulted,omitempty"` + F_Fixed64Defaulted *uint64 `protobuf:"fixed64,44,opt,name=F_Fixed64_defaulted,json=FFixed64Defaulted,def=640" json:"F_Fixed64_defaulted,omitempty"` + F_Uint32Defaulted *uint32 `protobuf:"varint,45,opt,name=F_Uint32_defaulted,json=FUint32Defaulted,def=3200" json:"F_Uint32_defaulted,omitempty"` + F_Uint64Defaulted *uint64 `protobuf:"varint,46,opt,name=F_Uint64_defaulted,json=FUint64Defaulted,def=6400" json:"F_Uint64_defaulted,omitempty"` + F_FloatDefaulted *float32 `protobuf:"fixed32,47,opt,name=F_Float_defaulted,json=FFloatDefaulted,def=314159" json:"F_Float_defaulted,omitempty"` + F_DoubleDefaulted *float64 `protobuf:"fixed64,48,opt,name=F_Double_defaulted,json=FDoubleDefaulted,def=271828" json:"F_Double_defaulted,omitempty"` + F_StringDefaulted *string `protobuf:"bytes,49,opt,name=F_String_defaulted,json=FStringDefaulted,def=hello, \"world!\"\n" json:"F_String_defaulted,omitempty"` + F_BytesDefaulted []byte `protobuf:"bytes,401,opt,name=F_Bytes_defaulted,json=FBytesDefaulted,def=Bignose" json:"F_Bytes_defaulted,omitempty"` + F_Sint32Defaulted *int32 `protobuf:"zigzag32,402,opt,name=F_Sint32_defaulted,json=FSint32Defaulted,def=-32" json:"F_Sint32_defaulted,omitempty"` + F_Sint64Defaulted *int64 `protobuf:"zigzag64,403,opt,name=F_Sint64_defaulted,json=FSint64Defaulted,def=-64" json:"F_Sint64_defaulted,omitempty"` + F_Sfixed32Defaulted *int32 `protobuf:"fixed32,404,opt,name=F_Sfixed32_defaulted,json=FSfixed32Defaulted,def=-32" json:"F_Sfixed32_defaulted,omitempty"` + F_Sfixed64Defaulted *int64 `protobuf:"fixed64,405,opt,name=F_Sfixed64_defaulted,json=FSfixed64Defaulted,def=-64" json:"F_Sfixed64_defaulted,omitempty"` + // Packed repeated fields (no string or bytes). + F_BoolRepeatedPacked []bool `protobuf:"varint,50,rep,packed,name=F_Bool_repeated_packed,json=FBoolRepeatedPacked" json:"F_Bool_repeated_packed,omitempty"` + F_Int32RepeatedPacked []int32 `protobuf:"varint,51,rep,packed,name=F_Int32_repeated_packed,json=FInt32RepeatedPacked" json:"F_Int32_repeated_packed,omitempty"` + F_Int64RepeatedPacked []int64 `protobuf:"varint,52,rep,packed,name=F_Int64_repeated_packed,json=FInt64RepeatedPacked" json:"F_Int64_repeated_packed,omitempty"` + F_Fixed32RepeatedPacked []uint32 `protobuf:"fixed32,53,rep,packed,name=F_Fixed32_repeated_packed,json=FFixed32RepeatedPacked" json:"F_Fixed32_repeated_packed,omitempty"` + F_Fixed64RepeatedPacked []uint64 `protobuf:"fixed64,54,rep,packed,name=F_Fixed64_repeated_packed,json=FFixed64RepeatedPacked" json:"F_Fixed64_repeated_packed,omitempty"` + F_Uint32RepeatedPacked []uint32 `protobuf:"varint,55,rep,packed,name=F_Uint32_repeated_packed,json=FUint32RepeatedPacked" json:"F_Uint32_repeated_packed,omitempty"` + F_Uint64RepeatedPacked []uint64 `protobuf:"varint,56,rep,packed,name=F_Uint64_repeated_packed,json=FUint64RepeatedPacked" json:"F_Uint64_repeated_packed,omitempty"` + F_FloatRepeatedPacked []float32 `protobuf:"fixed32,57,rep,packed,name=F_Float_repeated_packed,json=FFloatRepeatedPacked" json:"F_Float_repeated_packed,omitempty"` + F_DoubleRepeatedPacked []float64 `protobuf:"fixed64,58,rep,packed,name=F_Double_repeated_packed,json=FDoubleRepeatedPacked" json:"F_Double_repeated_packed,omitempty"` + F_Sint32RepeatedPacked []int32 `protobuf:"zigzag32,502,rep,packed,name=F_Sint32_repeated_packed,json=FSint32RepeatedPacked" json:"F_Sint32_repeated_packed,omitempty"` + F_Sint64RepeatedPacked []int64 `protobuf:"zigzag64,503,rep,packed,name=F_Sint64_repeated_packed,json=FSint64RepeatedPacked" json:"F_Sint64_repeated_packed,omitempty"` + F_Sfixed32RepeatedPacked []int32 `protobuf:"fixed32,504,rep,packed,name=F_Sfixed32_repeated_packed,json=FSfixed32RepeatedPacked" json:"F_Sfixed32_repeated_packed,omitempty"` + F_Sfixed64RepeatedPacked []int64 `protobuf:"fixed64,505,rep,packed,name=F_Sfixed64_repeated_packed,json=FSfixed64RepeatedPacked" json:"F_Sfixed64_repeated_packed,omitempty"` + Requiredgroup *GoTest_RequiredGroup `protobuf:"group,70,req,name=RequiredGroup,json=requiredgroup" json:"requiredgroup,omitempty"` + Repeatedgroup []*GoTest_RepeatedGroup `protobuf:"group,80,rep,name=RepeatedGroup,json=repeatedgroup" json:"repeatedgroup,omitempty"` + Optionalgroup *GoTest_OptionalGroup `protobuf:"group,90,opt,name=OptionalGroup,json=optionalgroup" json:"optionalgroup,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GoTest) Reset() { *m = GoTest{} } +func (m *GoTest) String() string { return proto.CompactTextString(m) } +func (*GoTest) ProtoMessage() {} +func (*GoTest) Descriptor() ([]byte, []int) { + return fileDescriptor_test_ee9f66cbbebc227c, []int{2} +} +func (m *GoTest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GoTest.Unmarshal(m, b) +} +func (m *GoTest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GoTest.Marshal(b, m, deterministic) +} +func (dst *GoTest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GoTest.Merge(dst, src) +} +func (m *GoTest) XXX_Size() int { + return xxx_messageInfo_GoTest.Size(m) +} +func (m *GoTest) XXX_DiscardUnknown() { + xxx_messageInfo_GoTest.DiscardUnknown(m) +} + +var xxx_messageInfo_GoTest proto.InternalMessageInfo + +const Default_GoTest_F_BoolDefaulted bool = true +const Default_GoTest_F_Int32Defaulted int32 = 32 +const Default_GoTest_F_Int64Defaulted int64 = 64 +const Default_GoTest_F_Fixed32Defaulted uint32 = 320 +const Default_GoTest_F_Fixed64Defaulted uint64 = 640 +const Default_GoTest_F_Uint32Defaulted uint32 = 3200 +const Default_GoTest_F_Uint64Defaulted uint64 = 6400 +const Default_GoTest_F_FloatDefaulted float32 = 314159 +const Default_GoTest_F_DoubleDefaulted float64 = 271828 +const Default_GoTest_F_StringDefaulted string = "hello, \"world!\"\n" + +var Default_GoTest_F_BytesDefaulted []byte = []byte("Bignose") + +const Default_GoTest_F_Sint32Defaulted int32 = -32 +const Default_GoTest_F_Sint64Defaulted int64 = -64 +const Default_GoTest_F_Sfixed32Defaulted int32 = -32 +const Default_GoTest_F_Sfixed64Defaulted int64 = -64 + +func (m *GoTest) GetKind() GoTest_KIND { + if m != nil && m.Kind != nil { + return *m.Kind + } + return GoTest_VOID +} + +func (m *GoTest) GetTable() string { + if m != nil && m.Table != nil { + return *m.Table + } + return "" +} + +func (m *GoTest) GetParam() int32 { + if m != nil && m.Param != nil { + return *m.Param + } + return 0 +} + +func (m *GoTest) GetRequiredField() *GoTestField { + if m != nil { + return m.RequiredField + } + return nil +} + +func (m *GoTest) GetRepeatedField() []*GoTestField { + if m != nil { + return m.RepeatedField + } + return nil +} + +func (m *GoTest) GetOptionalField() *GoTestField { + if m != nil { + return m.OptionalField + } + return nil +} + +func (m *GoTest) GetF_BoolRequired() bool { + if m != nil && m.F_BoolRequired != nil { + return *m.F_BoolRequired + } + return false +} + +func (m *GoTest) GetF_Int32Required() int32 { + if m != nil && m.F_Int32Required != nil { + return *m.F_Int32Required + } + return 0 +} + +func (m *GoTest) GetF_Int64Required() int64 { + if m != nil && m.F_Int64Required != nil { + return *m.F_Int64Required + } + return 0 +} + +func (m *GoTest) GetF_Fixed32Required() uint32 { + if m != nil && m.F_Fixed32Required != nil { + return *m.F_Fixed32Required + } + return 0 +} + +func (m *GoTest) GetF_Fixed64Required() uint64 { + if m != nil && m.F_Fixed64Required != nil { + return *m.F_Fixed64Required + } + return 0 +} + +func (m *GoTest) GetF_Uint32Required() uint32 { + if m != nil && m.F_Uint32Required != nil { + return *m.F_Uint32Required + } + return 0 +} + +func (m *GoTest) GetF_Uint64Required() uint64 { + if m != nil && m.F_Uint64Required != nil { + return *m.F_Uint64Required + } + return 0 +} + +func (m *GoTest) GetF_FloatRequired() float32 { + if m != nil && m.F_FloatRequired != nil { + return *m.F_FloatRequired + } + return 0 +} + +func (m *GoTest) GetF_DoubleRequired() float64 { + if m != nil && m.F_DoubleRequired != nil { + return *m.F_DoubleRequired + } + return 0 +} + +func (m *GoTest) GetF_StringRequired() string { + if m != nil && m.F_StringRequired != nil { + return *m.F_StringRequired + } + return "" +} + +func (m *GoTest) GetF_BytesRequired() []byte { + if m != nil { + return m.F_BytesRequired + } + return nil +} + +func (m *GoTest) GetF_Sint32Required() int32 { + if m != nil && m.F_Sint32Required != nil { + return *m.F_Sint32Required + } + return 0 +} + +func (m *GoTest) GetF_Sint64Required() int64 { + if m != nil && m.F_Sint64Required != nil { + return *m.F_Sint64Required + } + return 0 +} + +func (m *GoTest) GetF_Sfixed32Required() int32 { + if m != nil && m.F_Sfixed32Required != nil { + return *m.F_Sfixed32Required + } + return 0 +} + +func (m *GoTest) GetF_Sfixed64Required() int64 { + if m != nil && m.F_Sfixed64Required != nil { + return *m.F_Sfixed64Required + } + return 0 +} + +func (m *GoTest) GetF_BoolRepeated() []bool { + if m != nil { + return m.F_BoolRepeated + } + return nil +} + +func (m *GoTest) GetF_Int32Repeated() []int32 { + if m != nil { + return m.F_Int32Repeated + } + return nil +} + +func (m *GoTest) GetF_Int64Repeated() []int64 { + if m != nil { + return m.F_Int64Repeated + } + return nil +} + +func (m *GoTest) GetF_Fixed32Repeated() []uint32 { + if m != nil { + return m.F_Fixed32Repeated + } + return nil +} + +func (m *GoTest) GetF_Fixed64Repeated() []uint64 { + if m != nil { + return m.F_Fixed64Repeated + } + return nil +} + +func (m *GoTest) GetF_Uint32Repeated() []uint32 { + if m != nil { + return m.F_Uint32Repeated + } + return nil +} + +func (m *GoTest) GetF_Uint64Repeated() []uint64 { + if m != nil { + return m.F_Uint64Repeated + } + return nil +} + +func (m *GoTest) GetF_FloatRepeated() []float32 { + if m != nil { + return m.F_FloatRepeated + } + return nil +} + +func (m *GoTest) GetF_DoubleRepeated() []float64 { + if m != nil { + return m.F_DoubleRepeated + } + return nil +} + +func (m *GoTest) GetF_StringRepeated() []string { + if m != nil { + return m.F_StringRepeated + } + return nil +} + +func (m *GoTest) GetF_BytesRepeated() [][]byte { + if m != nil { + return m.F_BytesRepeated + } + return nil +} + +func (m *GoTest) GetF_Sint32Repeated() []int32 { + if m != nil { + return m.F_Sint32Repeated + } + return nil +} + +func (m *GoTest) GetF_Sint64Repeated() []int64 { + if m != nil { + return m.F_Sint64Repeated + } + return nil +} + +func (m *GoTest) GetF_Sfixed32Repeated() []int32 { + if m != nil { + return m.F_Sfixed32Repeated + } + return nil +} + +func (m *GoTest) GetF_Sfixed64Repeated() []int64 { + if m != nil { + return m.F_Sfixed64Repeated + } + return nil +} + +func (m *GoTest) GetF_BoolOptional() bool { + if m != nil && m.F_BoolOptional != nil { + return *m.F_BoolOptional + } + return false +} + +func (m *GoTest) GetF_Int32Optional() int32 { + if m != nil && m.F_Int32Optional != nil { + return *m.F_Int32Optional + } + return 0 +} + +func (m *GoTest) GetF_Int64Optional() int64 { + if m != nil && m.F_Int64Optional != nil { + return *m.F_Int64Optional + } + return 0 +} + +func (m *GoTest) GetF_Fixed32Optional() uint32 { + if m != nil && m.F_Fixed32Optional != nil { + return *m.F_Fixed32Optional + } + return 0 +} + +func (m *GoTest) GetF_Fixed64Optional() uint64 { + if m != nil && m.F_Fixed64Optional != nil { + return *m.F_Fixed64Optional + } + return 0 +} + +func (m *GoTest) GetF_Uint32Optional() uint32 { + if m != nil && m.F_Uint32Optional != nil { + return *m.F_Uint32Optional + } + return 0 +} + +func (m *GoTest) GetF_Uint64Optional() uint64 { + if m != nil && m.F_Uint64Optional != nil { + return *m.F_Uint64Optional + } + return 0 +} + +func (m *GoTest) GetF_FloatOptional() float32 { + if m != nil && m.F_FloatOptional != nil { + return *m.F_FloatOptional + } + return 0 +} + +func (m *GoTest) GetF_DoubleOptional() float64 { + if m != nil && m.F_DoubleOptional != nil { + return *m.F_DoubleOptional + } + return 0 +} + +func (m *GoTest) GetF_StringOptional() string { + if m != nil && m.F_StringOptional != nil { + return *m.F_StringOptional + } + return "" +} + +func (m *GoTest) GetF_BytesOptional() []byte { + if m != nil { + return m.F_BytesOptional + } + return nil +} + +func (m *GoTest) GetF_Sint32Optional() int32 { + if m != nil && m.F_Sint32Optional != nil { + return *m.F_Sint32Optional + } + return 0 +} + +func (m *GoTest) GetF_Sint64Optional() int64 { + if m != nil && m.F_Sint64Optional != nil { + return *m.F_Sint64Optional + } + return 0 +} + +func (m *GoTest) GetF_Sfixed32Optional() int32 { + if m != nil && m.F_Sfixed32Optional != nil { + return *m.F_Sfixed32Optional + } + return 0 +} + +func (m *GoTest) GetF_Sfixed64Optional() int64 { + if m != nil && m.F_Sfixed64Optional != nil { + return *m.F_Sfixed64Optional + } + return 0 +} + +func (m *GoTest) GetF_BoolDefaulted() bool { + if m != nil && m.F_BoolDefaulted != nil { + return *m.F_BoolDefaulted + } + return Default_GoTest_F_BoolDefaulted +} + +func (m *GoTest) GetF_Int32Defaulted() int32 { + if m != nil && m.F_Int32Defaulted != nil { + return *m.F_Int32Defaulted + } + return Default_GoTest_F_Int32Defaulted +} + +func (m *GoTest) GetF_Int64Defaulted() int64 { + if m != nil && m.F_Int64Defaulted != nil { + return *m.F_Int64Defaulted + } + return Default_GoTest_F_Int64Defaulted +} + +func (m *GoTest) GetF_Fixed32Defaulted() uint32 { + if m != nil && m.F_Fixed32Defaulted != nil { + return *m.F_Fixed32Defaulted + } + return Default_GoTest_F_Fixed32Defaulted +} + +func (m *GoTest) GetF_Fixed64Defaulted() uint64 { + if m != nil && m.F_Fixed64Defaulted != nil { + return *m.F_Fixed64Defaulted + } + return Default_GoTest_F_Fixed64Defaulted +} + +func (m *GoTest) GetF_Uint32Defaulted() uint32 { + if m != nil && m.F_Uint32Defaulted != nil { + return *m.F_Uint32Defaulted + } + return Default_GoTest_F_Uint32Defaulted +} + +func (m *GoTest) GetF_Uint64Defaulted() uint64 { + if m != nil && m.F_Uint64Defaulted != nil { + return *m.F_Uint64Defaulted + } + return Default_GoTest_F_Uint64Defaulted +} + +func (m *GoTest) GetF_FloatDefaulted() float32 { + if m != nil && m.F_FloatDefaulted != nil { + return *m.F_FloatDefaulted + } + return Default_GoTest_F_FloatDefaulted +} + +func (m *GoTest) GetF_DoubleDefaulted() float64 { + if m != nil && m.F_DoubleDefaulted != nil { + return *m.F_DoubleDefaulted + } + return Default_GoTest_F_DoubleDefaulted +} + +func (m *GoTest) GetF_StringDefaulted() string { + if m != nil && m.F_StringDefaulted != nil { + return *m.F_StringDefaulted + } + return Default_GoTest_F_StringDefaulted +} + +func (m *GoTest) GetF_BytesDefaulted() []byte { + if m != nil && m.F_BytesDefaulted != nil { + return m.F_BytesDefaulted + } + return append([]byte(nil), Default_GoTest_F_BytesDefaulted...) +} + +func (m *GoTest) GetF_Sint32Defaulted() int32 { + if m != nil && m.F_Sint32Defaulted != nil { + return *m.F_Sint32Defaulted + } + return Default_GoTest_F_Sint32Defaulted +} + +func (m *GoTest) GetF_Sint64Defaulted() int64 { + if m != nil && m.F_Sint64Defaulted != nil { + return *m.F_Sint64Defaulted + } + return Default_GoTest_F_Sint64Defaulted +} + +func (m *GoTest) GetF_Sfixed32Defaulted() int32 { + if m != nil && m.F_Sfixed32Defaulted != nil { + return *m.F_Sfixed32Defaulted + } + return Default_GoTest_F_Sfixed32Defaulted +} + +func (m *GoTest) GetF_Sfixed64Defaulted() int64 { + if m != nil && m.F_Sfixed64Defaulted != nil { + return *m.F_Sfixed64Defaulted + } + return Default_GoTest_F_Sfixed64Defaulted +} + +func (m *GoTest) GetF_BoolRepeatedPacked() []bool { + if m != nil { + return m.F_BoolRepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Int32RepeatedPacked() []int32 { + if m != nil { + return m.F_Int32RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Int64RepeatedPacked() []int64 { + if m != nil { + return m.F_Int64RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Fixed32RepeatedPacked() []uint32 { + if m != nil { + return m.F_Fixed32RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Fixed64RepeatedPacked() []uint64 { + if m != nil { + return m.F_Fixed64RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Uint32RepeatedPacked() []uint32 { + if m != nil { + return m.F_Uint32RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Uint64RepeatedPacked() []uint64 { + if m != nil { + return m.F_Uint64RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_FloatRepeatedPacked() []float32 { + if m != nil { + return m.F_FloatRepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_DoubleRepeatedPacked() []float64 { + if m != nil { + return m.F_DoubleRepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Sint32RepeatedPacked() []int32 { + if m != nil { + return m.F_Sint32RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Sint64RepeatedPacked() []int64 { + if m != nil { + return m.F_Sint64RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Sfixed32RepeatedPacked() []int32 { + if m != nil { + return m.F_Sfixed32RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Sfixed64RepeatedPacked() []int64 { + if m != nil { + return m.F_Sfixed64RepeatedPacked + } + return nil +} + +func (m *GoTest) GetRequiredgroup() *GoTest_RequiredGroup { + if m != nil { + return m.Requiredgroup + } + return nil +} + +func (m *GoTest) GetRepeatedgroup() []*GoTest_RepeatedGroup { + if m != nil { + return m.Repeatedgroup + } + return nil +} + +func (m *GoTest) GetOptionalgroup() *GoTest_OptionalGroup { + if m != nil { + return m.Optionalgroup + } + return nil +} + +// Required, repeated, and optional groups. +type GoTest_RequiredGroup struct { + RequiredField *string `protobuf:"bytes,71,req,name=RequiredField" json:"RequiredField,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GoTest_RequiredGroup) Reset() { *m = GoTest_RequiredGroup{} } +func (m *GoTest_RequiredGroup) String() string { return proto.CompactTextString(m) } +func (*GoTest_RequiredGroup) ProtoMessage() {} +func (*GoTest_RequiredGroup) Descriptor() ([]byte, []int) { + return fileDescriptor_test_ee9f66cbbebc227c, []int{2, 0} +} +func (m *GoTest_RequiredGroup) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GoTest_RequiredGroup.Unmarshal(m, b) +} +func (m *GoTest_RequiredGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GoTest_RequiredGroup.Marshal(b, m, deterministic) +} +func (dst *GoTest_RequiredGroup) XXX_Merge(src proto.Message) { + xxx_messageInfo_GoTest_RequiredGroup.Merge(dst, src) +} +func (m *GoTest_RequiredGroup) XXX_Size() int { + return xxx_messageInfo_GoTest_RequiredGroup.Size(m) +} +func (m *GoTest_RequiredGroup) XXX_DiscardUnknown() { + xxx_messageInfo_GoTest_RequiredGroup.DiscardUnknown(m) +} + +var xxx_messageInfo_GoTest_RequiredGroup proto.InternalMessageInfo + +func (m *GoTest_RequiredGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField + } + return "" +} + +type GoTest_RepeatedGroup struct { + RequiredField *string `protobuf:"bytes,81,req,name=RequiredField" json:"RequiredField,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GoTest_RepeatedGroup) Reset() { *m = GoTest_RepeatedGroup{} } +func (m *GoTest_RepeatedGroup) String() string { return proto.CompactTextString(m) } +func (*GoTest_RepeatedGroup) ProtoMessage() {} +func (*GoTest_RepeatedGroup) Descriptor() ([]byte, []int) { + return fileDescriptor_test_ee9f66cbbebc227c, []int{2, 1} +} +func (m *GoTest_RepeatedGroup) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GoTest_RepeatedGroup.Unmarshal(m, b) +} +func (m *GoTest_RepeatedGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GoTest_RepeatedGroup.Marshal(b, m, deterministic) +} +func (dst *GoTest_RepeatedGroup) XXX_Merge(src proto.Message) { + xxx_messageInfo_GoTest_RepeatedGroup.Merge(dst, src) +} +func (m *GoTest_RepeatedGroup) XXX_Size() int { + return xxx_messageInfo_GoTest_RepeatedGroup.Size(m) +} +func (m *GoTest_RepeatedGroup) XXX_DiscardUnknown() { + xxx_messageInfo_GoTest_RepeatedGroup.DiscardUnknown(m) +} + +var xxx_messageInfo_GoTest_RepeatedGroup proto.InternalMessageInfo + +func (m *GoTest_RepeatedGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField + } + return "" +} + +type GoTest_OptionalGroup struct { + RequiredField *string `protobuf:"bytes,91,req,name=RequiredField" json:"RequiredField,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GoTest_OptionalGroup) Reset() { *m = GoTest_OptionalGroup{} } +func (m *GoTest_OptionalGroup) String() string { return proto.CompactTextString(m) } +func (*GoTest_OptionalGroup) ProtoMessage() {} +func (*GoTest_OptionalGroup) Descriptor() ([]byte, []int) { + return fileDescriptor_test_ee9f66cbbebc227c, []int{2, 2} +} +func (m *GoTest_OptionalGroup) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GoTest_OptionalGroup.Unmarshal(m, b) +} +func (m *GoTest_OptionalGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GoTest_OptionalGroup.Marshal(b, m, deterministic) +} +func (dst *GoTest_OptionalGroup) XXX_Merge(src proto.Message) { + xxx_messageInfo_GoTest_OptionalGroup.Merge(dst, src) +} +func (m *GoTest_OptionalGroup) XXX_Size() int { + return xxx_messageInfo_GoTest_OptionalGroup.Size(m) +} +func (m *GoTest_OptionalGroup) XXX_DiscardUnknown() { + xxx_messageInfo_GoTest_OptionalGroup.DiscardUnknown(m) +} + +var xxx_messageInfo_GoTest_OptionalGroup proto.InternalMessageInfo + +func (m *GoTest_OptionalGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField + } + return "" +} + +// For testing a group containing a required field. +type GoTestRequiredGroupField struct { + Group *GoTestRequiredGroupField_Group `protobuf:"group,1,req,name=Group,json=group" json:"group,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GoTestRequiredGroupField) Reset() { *m = GoTestRequiredGroupField{} } +func (m *GoTestRequiredGroupField) String() string { return proto.CompactTextString(m) } +func (*GoTestRequiredGroupField) ProtoMessage() {} +func (*GoTestRequiredGroupField) Descriptor() ([]byte, []int) { + return fileDescriptor_test_ee9f66cbbebc227c, []int{3} +} +func (m *GoTestRequiredGroupField) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GoTestRequiredGroupField.Unmarshal(m, b) +} +func (m *GoTestRequiredGroupField) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GoTestRequiredGroupField.Marshal(b, m, deterministic) +} +func (dst *GoTestRequiredGroupField) XXX_Merge(src proto.Message) { + xxx_messageInfo_GoTestRequiredGroupField.Merge(dst, src) +} +func (m *GoTestRequiredGroupField) XXX_Size() int { + return xxx_messageInfo_GoTestRequiredGroupField.Size(m) +} +func (m *GoTestRequiredGroupField) XXX_DiscardUnknown() { + xxx_messageInfo_GoTestRequiredGroupField.DiscardUnknown(m) +} + +var xxx_messageInfo_GoTestRequiredGroupField proto.InternalMessageInfo + +func (m *GoTestRequiredGroupField) GetGroup() *GoTestRequiredGroupField_Group { + if m != nil { + return m.Group + } + return nil +} + +type GoTestRequiredGroupField_Group struct { + Field *int32 `protobuf:"varint,2,req,name=Field" json:"Field,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GoTestRequiredGroupField_Group) Reset() { *m = GoTestRequiredGroupField_Group{} } +func (m *GoTestRequiredGroupField_Group) String() string { return proto.CompactTextString(m) } +func (*GoTestRequiredGroupField_Group) ProtoMessage() {} +func (*GoTestRequiredGroupField_Group) Descriptor() ([]byte, []int) { + return fileDescriptor_test_ee9f66cbbebc227c, []int{3, 0} +} +func (m *GoTestRequiredGroupField_Group) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GoTestRequiredGroupField_Group.Unmarshal(m, b) +} +func (m *GoTestRequiredGroupField_Group) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GoTestRequiredGroupField_Group.Marshal(b, m, deterministic) +} +func (dst *GoTestRequiredGroupField_Group) XXX_Merge(src proto.Message) { + xxx_messageInfo_GoTestRequiredGroupField_Group.Merge(dst, src) +} +func (m *GoTestRequiredGroupField_Group) XXX_Size() int { + return xxx_messageInfo_GoTestRequiredGroupField_Group.Size(m) +} +func (m *GoTestRequiredGroupField_Group) XXX_DiscardUnknown() { + xxx_messageInfo_GoTestRequiredGroupField_Group.DiscardUnknown(m) +} + +var xxx_messageInfo_GoTestRequiredGroupField_Group proto.InternalMessageInfo + +func (m *GoTestRequiredGroupField_Group) GetField() int32 { + if m != nil && m.Field != nil { + return *m.Field + } + return 0 +} + +// For testing skipping of unrecognized fields. +// Numbers are all big, larger than tag numbers in GoTestField, +// the message used in the corresponding test. +type GoSkipTest struct { + SkipInt32 *int32 `protobuf:"varint,11,req,name=skip_int32,json=skipInt32" json:"skip_int32,omitempty"` + SkipFixed32 *uint32 `protobuf:"fixed32,12,req,name=skip_fixed32,json=skipFixed32" json:"skip_fixed32,omitempty"` + SkipFixed64 *uint64 `protobuf:"fixed64,13,req,name=skip_fixed64,json=skipFixed64" json:"skip_fixed64,omitempty"` + SkipString *string `protobuf:"bytes,14,req,name=skip_string,json=skipString" json:"skip_string,omitempty"` + Skipgroup *GoSkipTest_SkipGroup `protobuf:"group,15,req,name=SkipGroup,json=skipgroup" json:"skipgroup,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GoSkipTest) Reset() { *m = GoSkipTest{} } +func (m *GoSkipTest) String() string { return proto.CompactTextString(m) } +func (*GoSkipTest) ProtoMessage() {} +func (*GoSkipTest) Descriptor() ([]byte, []int) { + return fileDescriptor_test_ee9f66cbbebc227c, []int{4} +} +func (m *GoSkipTest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GoSkipTest.Unmarshal(m, b) +} +func (m *GoSkipTest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GoSkipTest.Marshal(b, m, deterministic) +} +func (dst *GoSkipTest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GoSkipTest.Merge(dst, src) +} +func (m *GoSkipTest) XXX_Size() int { + return xxx_messageInfo_GoSkipTest.Size(m) +} +func (m *GoSkipTest) XXX_DiscardUnknown() { + xxx_messageInfo_GoSkipTest.DiscardUnknown(m) +} + +var xxx_messageInfo_GoSkipTest proto.InternalMessageInfo + +func (m *GoSkipTest) GetSkipInt32() int32 { + if m != nil && m.SkipInt32 != nil { + return *m.SkipInt32 + } + return 0 +} + +func (m *GoSkipTest) GetSkipFixed32() uint32 { + if m != nil && m.SkipFixed32 != nil { + return *m.SkipFixed32 + } + return 0 +} + +func (m *GoSkipTest) GetSkipFixed64() uint64 { + if m != nil && m.SkipFixed64 != nil { + return *m.SkipFixed64 + } + return 0 +} + +func (m *GoSkipTest) GetSkipString() string { + if m != nil && m.SkipString != nil { + return *m.SkipString + } + return "" +} + +func (m *GoSkipTest) GetSkipgroup() *GoSkipTest_SkipGroup { + if m != nil { + return m.Skipgroup + } + return nil +} + +type GoSkipTest_SkipGroup struct { + GroupInt32 *int32 `protobuf:"varint,16,req,name=group_int32,json=groupInt32" json:"group_int32,omitempty"` + GroupString *string `protobuf:"bytes,17,req,name=group_string,json=groupString" json:"group_string,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GoSkipTest_SkipGroup) Reset() { *m = GoSkipTest_SkipGroup{} } +func (m *GoSkipTest_SkipGroup) String() string { return proto.CompactTextString(m) } +func (*GoSkipTest_SkipGroup) ProtoMessage() {} +func (*GoSkipTest_SkipGroup) Descriptor() ([]byte, []int) { + return fileDescriptor_test_ee9f66cbbebc227c, []int{4, 0} +} +func (m *GoSkipTest_SkipGroup) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GoSkipTest_SkipGroup.Unmarshal(m, b) +} +func (m *GoSkipTest_SkipGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GoSkipTest_SkipGroup.Marshal(b, m, deterministic) +} +func (dst *GoSkipTest_SkipGroup) XXX_Merge(src proto.Message) { + xxx_messageInfo_GoSkipTest_SkipGroup.Merge(dst, src) +} +func (m *GoSkipTest_SkipGroup) XXX_Size() int { + return xxx_messageInfo_GoSkipTest_SkipGroup.Size(m) +} +func (m *GoSkipTest_SkipGroup) XXX_DiscardUnknown() { + xxx_messageInfo_GoSkipTest_SkipGroup.DiscardUnknown(m) +} + +var xxx_messageInfo_GoSkipTest_SkipGroup proto.InternalMessageInfo + +func (m *GoSkipTest_SkipGroup) GetGroupInt32() int32 { + if m != nil && m.GroupInt32 != nil { + return *m.GroupInt32 + } + return 0 +} + +func (m *GoSkipTest_SkipGroup) GetGroupString() string { + if m != nil && m.GroupString != nil { + return *m.GroupString + } + return "" +} + +// For testing packed/non-packed decoder switching. +// A serialized instance of one should be deserializable as the other. +type NonPackedTest struct { + A []int32 `protobuf:"varint,1,rep,name=a" json:"a,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NonPackedTest) Reset() { *m = NonPackedTest{} } +func (m *NonPackedTest) String() string { return proto.CompactTextString(m) } +func (*NonPackedTest) ProtoMessage() {} +func (*NonPackedTest) Descriptor() ([]byte, []int) { + return fileDescriptor_test_ee9f66cbbebc227c, []int{5} +} +func (m *NonPackedTest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NonPackedTest.Unmarshal(m, b) +} +func (m *NonPackedTest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NonPackedTest.Marshal(b, m, deterministic) +} +func (dst *NonPackedTest) XXX_Merge(src proto.Message) { + xxx_messageInfo_NonPackedTest.Merge(dst, src) +} +func (m *NonPackedTest) XXX_Size() int { + return xxx_messageInfo_NonPackedTest.Size(m) +} +func (m *NonPackedTest) XXX_DiscardUnknown() { + xxx_messageInfo_NonPackedTest.DiscardUnknown(m) +} + +var xxx_messageInfo_NonPackedTest proto.InternalMessageInfo + +func (m *NonPackedTest) GetA() []int32 { + if m != nil { + return m.A + } + return nil +} + +type PackedTest struct { + B []int32 `protobuf:"varint,1,rep,packed,name=b" json:"b,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PackedTest) Reset() { *m = PackedTest{} } +func (m *PackedTest) String() string { return proto.CompactTextString(m) } +func (*PackedTest) ProtoMessage() {} +func (*PackedTest) Descriptor() ([]byte, []int) { + return fileDescriptor_test_ee9f66cbbebc227c, []int{6} +} +func (m *PackedTest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PackedTest.Unmarshal(m, b) +} +func (m *PackedTest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PackedTest.Marshal(b, m, deterministic) +} +func (dst *PackedTest) XXX_Merge(src proto.Message) { + xxx_messageInfo_PackedTest.Merge(dst, src) +} +func (m *PackedTest) XXX_Size() int { + return xxx_messageInfo_PackedTest.Size(m) +} +func (m *PackedTest) XXX_DiscardUnknown() { + xxx_messageInfo_PackedTest.DiscardUnknown(m) +} + +var xxx_messageInfo_PackedTest proto.InternalMessageInfo + +func (m *PackedTest) GetB() []int32 { + if m != nil { + return m.B + } + return nil +} + +type MaxTag struct { + // Maximum possible tag number. + LastField *string `protobuf:"bytes,536870911,opt,name=last_field,json=lastField" json:"last_field,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MaxTag) Reset() { *m = MaxTag{} } +func (m *MaxTag) String() string { return proto.CompactTextString(m) } +func (*MaxTag) ProtoMessage() {} +func (*MaxTag) Descriptor() ([]byte, []int) { + return fileDescriptor_test_ee9f66cbbebc227c, []int{7} +} +func (m *MaxTag) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MaxTag.Unmarshal(m, b) +} +func (m *MaxTag) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MaxTag.Marshal(b, m, deterministic) +} +func (dst *MaxTag) XXX_Merge(src proto.Message) { + xxx_messageInfo_MaxTag.Merge(dst, src) +} +func (m *MaxTag) XXX_Size() int { + return xxx_messageInfo_MaxTag.Size(m) +} +func (m *MaxTag) XXX_DiscardUnknown() { + xxx_messageInfo_MaxTag.DiscardUnknown(m) +} + +var xxx_messageInfo_MaxTag proto.InternalMessageInfo + +func (m *MaxTag) GetLastField() string { + if m != nil && m.LastField != nil { + return *m.LastField + } + return "" +} + +type OldMessage struct { + Nested *OldMessage_Nested `protobuf:"bytes,1,opt,name=nested" json:"nested,omitempty"` + Num *int32 `protobuf:"varint,2,opt,name=num" json:"num,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *OldMessage) Reset() { *m = OldMessage{} } +func (m *OldMessage) String() string { return proto.CompactTextString(m) } +func (*OldMessage) ProtoMessage() {} +func (*OldMessage) Descriptor() ([]byte, []int) { + return fileDescriptor_test_ee9f66cbbebc227c, []int{8} +} +func (m *OldMessage) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OldMessage.Unmarshal(m, b) +} +func (m *OldMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OldMessage.Marshal(b, m, deterministic) +} +func (dst *OldMessage) XXX_Merge(src proto.Message) { + xxx_messageInfo_OldMessage.Merge(dst, src) +} +func (m *OldMessage) XXX_Size() int { + return xxx_messageInfo_OldMessage.Size(m) +} +func (m *OldMessage) XXX_DiscardUnknown() { + xxx_messageInfo_OldMessage.DiscardUnknown(m) +} + +var xxx_messageInfo_OldMessage proto.InternalMessageInfo + +func (m *OldMessage) GetNested() *OldMessage_Nested { + if m != nil { + return m.Nested + } + return nil +} + +func (m *OldMessage) GetNum() int32 { + if m != nil && m.Num != nil { + return *m.Num + } + return 0 +} + +type OldMessage_Nested struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *OldMessage_Nested) Reset() { *m = OldMessage_Nested{} } +func (m *OldMessage_Nested) String() string { return proto.CompactTextString(m) } +func (*OldMessage_Nested) ProtoMessage() {} +func (*OldMessage_Nested) Descriptor() ([]byte, []int) { + return fileDescriptor_test_ee9f66cbbebc227c, []int{8, 0} +} +func (m *OldMessage_Nested) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OldMessage_Nested.Unmarshal(m, b) +} +func (m *OldMessage_Nested) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OldMessage_Nested.Marshal(b, m, deterministic) +} +func (dst *OldMessage_Nested) XXX_Merge(src proto.Message) { + xxx_messageInfo_OldMessage_Nested.Merge(dst, src) +} +func (m *OldMessage_Nested) XXX_Size() int { + return xxx_messageInfo_OldMessage_Nested.Size(m) +} +func (m *OldMessage_Nested) XXX_DiscardUnknown() { + xxx_messageInfo_OldMessage_Nested.DiscardUnknown(m) +} + +var xxx_messageInfo_OldMessage_Nested proto.InternalMessageInfo + +func (m *OldMessage_Nested) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +// NewMessage is wire compatible with OldMessage; +// imagine it as a future version. +type NewMessage struct { + Nested *NewMessage_Nested `protobuf:"bytes,1,opt,name=nested" json:"nested,omitempty"` + // This is an int32 in OldMessage. + Num *int64 `protobuf:"varint,2,opt,name=num" json:"num,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NewMessage) Reset() { *m = NewMessage{} } +func (m *NewMessage) String() string { return proto.CompactTextString(m) } +func (*NewMessage) ProtoMessage() {} +func (*NewMessage) Descriptor() ([]byte, []int) { + return fileDescriptor_test_ee9f66cbbebc227c, []int{9} +} +func (m *NewMessage) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NewMessage.Unmarshal(m, b) +} +func (m *NewMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NewMessage.Marshal(b, m, deterministic) +} +func (dst *NewMessage) XXX_Merge(src proto.Message) { + xxx_messageInfo_NewMessage.Merge(dst, src) +} +func (m *NewMessage) XXX_Size() int { + return xxx_messageInfo_NewMessage.Size(m) +} +func (m *NewMessage) XXX_DiscardUnknown() { + xxx_messageInfo_NewMessage.DiscardUnknown(m) +} + +var xxx_messageInfo_NewMessage proto.InternalMessageInfo + +func (m *NewMessage) GetNested() *NewMessage_Nested { + if m != nil { + return m.Nested + } + return nil +} + +func (m *NewMessage) GetNum() int64 { + if m != nil && m.Num != nil { + return *m.Num + } + return 0 +} + +type NewMessage_Nested struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + FoodGroup *string `protobuf:"bytes,2,opt,name=food_group,json=foodGroup" json:"food_group,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NewMessage_Nested) Reset() { *m = NewMessage_Nested{} } +func (m *NewMessage_Nested) String() string { return proto.CompactTextString(m) } +func (*NewMessage_Nested) ProtoMessage() {} +func (*NewMessage_Nested) Descriptor() ([]byte, []int) { + return fileDescriptor_test_ee9f66cbbebc227c, []int{9, 0} +} +func (m *NewMessage_Nested) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NewMessage_Nested.Unmarshal(m, b) +} +func (m *NewMessage_Nested) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NewMessage_Nested.Marshal(b, m, deterministic) +} +func (dst *NewMessage_Nested) XXX_Merge(src proto.Message) { + xxx_messageInfo_NewMessage_Nested.Merge(dst, src) +} +func (m *NewMessage_Nested) XXX_Size() int { + return xxx_messageInfo_NewMessage_Nested.Size(m) +} +func (m *NewMessage_Nested) XXX_DiscardUnknown() { + xxx_messageInfo_NewMessage_Nested.DiscardUnknown(m) +} + +var xxx_messageInfo_NewMessage_Nested proto.InternalMessageInfo + +func (m *NewMessage_Nested) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *NewMessage_Nested) GetFoodGroup() string { + if m != nil && m.FoodGroup != nil { + return *m.FoodGroup + } + return "" +} + +type InnerMessage struct { + Host *string `protobuf:"bytes,1,req,name=host" json:"host,omitempty"` + Port *int32 `protobuf:"varint,2,opt,name=port,def=4000" json:"port,omitempty"` + Connected *bool `protobuf:"varint,3,opt,name=connected" json:"connected,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *InnerMessage) Reset() { *m = InnerMessage{} } +func (m *InnerMessage) String() string { return proto.CompactTextString(m) } +func (*InnerMessage) ProtoMessage() {} +func (*InnerMessage) Descriptor() ([]byte, []int) { + return fileDescriptor_test_ee9f66cbbebc227c, []int{10} +} +func (m *InnerMessage) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_InnerMessage.Unmarshal(m, b) +} +func (m *InnerMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_InnerMessage.Marshal(b, m, deterministic) +} +func (dst *InnerMessage) XXX_Merge(src proto.Message) { + xxx_messageInfo_InnerMessage.Merge(dst, src) +} +func (m *InnerMessage) XXX_Size() int { + return xxx_messageInfo_InnerMessage.Size(m) +} +func (m *InnerMessage) XXX_DiscardUnknown() { + xxx_messageInfo_InnerMessage.DiscardUnknown(m) +} + +var xxx_messageInfo_InnerMessage proto.InternalMessageInfo + +const Default_InnerMessage_Port int32 = 4000 + +func (m *InnerMessage) GetHost() string { + if m != nil && m.Host != nil { + return *m.Host + } + return "" +} + +func (m *InnerMessage) GetPort() int32 { + if m != nil && m.Port != nil { + return *m.Port + } + return Default_InnerMessage_Port +} + +func (m *InnerMessage) GetConnected() bool { + if m != nil && m.Connected != nil { + return *m.Connected + } + return false +} + +type OtherMessage struct { + Key *int64 `protobuf:"varint,1,opt,name=key" json:"key,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` + Weight *float32 `protobuf:"fixed32,3,opt,name=weight" json:"weight,omitempty"` + Inner *InnerMessage `protobuf:"bytes,4,opt,name=inner" json:"inner,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *OtherMessage) Reset() { *m = OtherMessage{} } +func (m *OtherMessage) String() string { return proto.CompactTextString(m) } +func (*OtherMessage) ProtoMessage() {} +func (*OtherMessage) Descriptor() ([]byte, []int) { + return fileDescriptor_test_ee9f66cbbebc227c, []int{11} +} + +var extRange_OtherMessage = []proto.ExtensionRange{ + {Start: 100, End: 536870911}, +} + +func (*OtherMessage) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_OtherMessage +} +func (m *OtherMessage) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OtherMessage.Unmarshal(m, b) +} +func (m *OtherMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OtherMessage.Marshal(b, m, deterministic) +} +func (dst *OtherMessage) XXX_Merge(src proto.Message) { + xxx_messageInfo_OtherMessage.Merge(dst, src) +} +func (m *OtherMessage) XXX_Size() int { + return xxx_messageInfo_OtherMessage.Size(m) +} +func (m *OtherMessage) XXX_DiscardUnknown() { + xxx_messageInfo_OtherMessage.DiscardUnknown(m) +} + +var xxx_messageInfo_OtherMessage proto.InternalMessageInfo + +func (m *OtherMessage) GetKey() int64 { + if m != nil && m.Key != nil { + return *m.Key + } + return 0 +} + +func (m *OtherMessage) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func (m *OtherMessage) GetWeight() float32 { + if m != nil && m.Weight != nil { + return *m.Weight + } + return 0 +} + +func (m *OtherMessage) GetInner() *InnerMessage { + if m != nil { + return m.Inner + } + return nil +} + +type RequiredInnerMessage struct { + LeoFinallyWonAnOscar *InnerMessage `protobuf:"bytes,1,req,name=leo_finally_won_an_oscar,json=leoFinallyWonAnOscar" json:"leo_finally_won_an_oscar,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RequiredInnerMessage) Reset() { *m = RequiredInnerMessage{} } +func (m *RequiredInnerMessage) String() string { return proto.CompactTextString(m) } +func (*RequiredInnerMessage) ProtoMessage() {} +func (*RequiredInnerMessage) Descriptor() ([]byte, []int) { + return fileDescriptor_test_ee9f66cbbebc227c, []int{12} +} +func (m *RequiredInnerMessage) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RequiredInnerMessage.Unmarshal(m, b) +} +func (m *RequiredInnerMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RequiredInnerMessage.Marshal(b, m, deterministic) +} +func (dst *RequiredInnerMessage) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequiredInnerMessage.Merge(dst, src) +} +func (m *RequiredInnerMessage) XXX_Size() int { + return xxx_messageInfo_RequiredInnerMessage.Size(m) +} +func (m *RequiredInnerMessage) XXX_DiscardUnknown() { + xxx_messageInfo_RequiredInnerMessage.DiscardUnknown(m) +} + +var xxx_messageInfo_RequiredInnerMessage proto.InternalMessageInfo + +func (m *RequiredInnerMessage) GetLeoFinallyWonAnOscar() *InnerMessage { + if m != nil { + return m.LeoFinallyWonAnOscar + } + return nil +} + +type MyMessage struct { + Count *int32 `protobuf:"varint,1,req,name=count" json:"count,omitempty"` + Name *string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` + Quote *string `protobuf:"bytes,3,opt,name=quote" json:"quote,omitempty"` + Pet []string `protobuf:"bytes,4,rep,name=pet" json:"pet,omitempty"` + Inner *InnerMessage `protobuf:"bytes,5,opt,name=inner" json:"inner,omitempty"` + Others []*OtherMessage `protobuf:"bytes,6,rep,name=others" json:"others,omitempty"` + WeMustGoDeeper *RequiredInnerMessage `protobuf:"bytes,13,opt,name=we_must_go_deeper,json=weMustGoDeeper" json:"we_must_go_deeper,omitempty"` + RepInner []*InnerMessage `protobuf:"bytes,12,rep,name=rep_inner,json=repInner" json:"rep_inner,omitempty"` + Bikeshed *MyMessage_Color `protobuf:"varint,7,opt,name=bikeshed,enum=test_proto.MyMessage_Color" json:"bikeshed,omitempty"` + Somegroup *MyMessage_SomeGroup `protobuf:"group,8,opt,name=SomeGroup,json=somegroup" json:"somegroup,omitempty"` + // This field becomes [][]byte in the generated code. + RepBytes [][]byte `protobuf:"bytes,10,rep,name=rep_bytes,json=repBytes" json:"rep_bytes,omitempty"` + Bigfloat *float64 `protobuf:"fixed64,11,opt,name=bigfloat" json:"bigfloat,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MyMessage) Reset() { *m = MyMessage{} } +func (m *MyMessage) String() string { return proto.CompactTextString(m) } +func (*MyMessage) ProtoMessage() {} +func (*MyMessage) Descriptor() ([]byte, []int) { + return fileDescriptor_test_ee9f66cbbebc227c, []int{13} +} + +var extRange_MyMessage = []proto.ExtensionRange{ + {Start: 100, End: 536870911}, +} + +func (*MyMessage) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MyMessage +} +func (m *MyMessage) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MyMessage.Unmarshal(m, b) +} +func (m *MyMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MyMessage.Marshal(b, m, deterministic) +} +func (dst *MyMessage) XXX_Merge(src proto.Message) { + xxx_messageInfo_MyMessage.Merge(dst, src) +} +func (m *MyMessage) XXX_Size() int { + return xxx_messageInfo_MyMessage.Size(m) +} +func (m *MyMessage) XXX_DiscardUnknown() { + xxx_messageInfo_MyMessage.DiscardUnknown(m) +} + +var xxx_messageInfo_MyMessage proto.InternalMessageInfo + +func (m *MyMessage) GetCount() int32 { + if m != nil && m.Count != nil { + return *m.Count + } + return 0 +} + +func (m *MyMessage) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *MyMessage) GetQuote() string { + if m != nil && m.Quote != nil { + return *m.Quote + } + return "" +} + +func (m *MyMessage) GetPet() []string { + if m != nil { + return m.Pet + } + return nil +} + +func (m *MyMessage) GetInner() *InnerMessage { + if m != nil { + return m.Inner + } + return nil +} + +func (m *MyMessage) GetOthers() []*OtherMessage { + if m != nil { + return m.Others + } + return nil +} + +func (m *MyMessage) GetWeMustGoDeeper() *RequiredInnerMessage { + if m != nil { + return m.WeMustGoDeeper + } + return nil +} + +func (m *MyMessage) GetRepInner() []*InnerMessage { + if m != nil { + return m.RepInner + } + return nil +} + +func (m *MyMessage) GetBikeshed() MyMessage_Color { + if m != nil && m.Bikeshed != nil { + return *m.Bikeshed + } + return MyMessage_RED +} + +func (m *MyMessage) GetSomegroup() *MyMessage_SomeGroup { + if m != nil { + return m.Somegroup + } + return nil +} + +func (m *MyMessage) GetRepBytes() [][]byte { + if m != nil { + return m.RepBytes + } + return nil +} + +func (m *MyMessage) GetBigfloat() float64 { + if m != nil && m.Bigfloat != nil { + return *m.Bigfloat + } + return 0 +} + +type MyMessage_SomeGroup struct { + GroupField *int32 `protobuf:"varint,9,opt,name=group_field,json=groupField" json:"group_field,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MyMessage_SomeGroup) Reset() { *m = MyMessage_SomeGroup{} } +func (m *MyMessage_SomeGroup) String() string { return proto.CompactTextString(m) } +func (*MyMessage_SomeGroup) ProtoMessage() {} +func (*MyMessage_SomeGroup) Descriptor() ([]byte, []int) { + return fileDescriptor_test_ee9f66cbbebc227c, []int{13, 0} +} +func (m *MyMessage_SomeGroup) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MyMessage_SomeGroup.Unmarshal(m, b) +} +func (m *MyMessage_SomeGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MyMessage_SomeGroup.Marshal(b, m, deterministic) +} +func (dst *MyMessage_SomeGroup) XXX_Merge(src proto.Message) { + xxx_messageInfo_MyMessage_SomeGroup.Merge(dst, src) +} +func (m *MyMessage_SomeGroup) XXX_Size() int { + return xxx_messageInfo_MyMessage_SomeGroup.Size(m) +} +func (m *MyMessage_SomeGroup) XXX_DiscardUnknown() { + xxx_messageInfo_MyMessage_SomeGroup.DiscardUnknown(m) +} + +var xxx_messageInfo_MyMessage_SomeGroup proto.InternalMessageInfo + +func (m *MyMessage_SomeGroup) GetGroupField() int32 { + if m != nil && m.GroupField != nil { + return *m.GroupField + } + return 0 +} + +type Ext struct { + Data *string `protobuf:"bytes,1,opt,name=data" json:"data,omitempty"` + MapField map[int32]int32 `protobuf:"bytes,2,rep,name=map_field,json=mapField" json:"map_field,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Ext) Reset() { *m = Ext{} } +func (m *Ext) String() string { return proto.CompactTextString(m) } +func (*Ext) ProtoMessage() {} +func (*Ext) Descriptor() ([]byte, []int) { + return fileDescriptor_test_ee9f66cbbebc227c, []int{14} +} +func (m *Ext) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Ext.Unmarshal(m, b) +} +func (m *Ext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Ext.Marshal(b, m, deterministic) +} +func (dst *Ext) XXX_Merge(src proto.Message) { + xxx_messageInfo_Ext.Merge(dst, src) +} +func (m *Ext) XXX_Size() int { + return xxx_messageInfo_Ext.Size(m) +} +func (m *Ext) XXX_DiscardUnknown() { + xxx_messageInfo_Ext.DiscardUnknown(m) +} + +var xxx_messageInfo_Ext proto.InternalMessageInfo + +func (m *Ext) GetData() string { + if m != nil && m.Data != nil { + return *m.Data + } + return "" +} + +func (m *Ext) GetMapField() map[int32]int32 { + if m != nil { + return m.MapField + } + return nil +} + +var E_Ext_More = &proto.ExtensionDesc{ + ExtendedType: (*MyMessage)(nil), + ExtensionType: (*Ext)(nil), + Field: 103, + Name: "test_proto.Ext.more", + Tag: "bytes,103,opt,name=more", + Filename: "test_proto/test.proto", +} + +var E_Ext_Text = &proto.ExtensionDesc{ + ExtendedType: (*MyMessage)(nil), + ExtensionType: (*string)(nil), + Field: 104, + Name: "test_proto.Ext.text", + Tag: "bytes,104,opt,name=text", + Filename: "test_proto/test.proto", +} + +var E_Ext_Number = &proto.ExtensionDesc{ + ExtendedType: (*MyMessage)(nil), + ExtensionType: (*int32)(nil), + Field: 105, + Name: "test_proto.Ext.number", + Tag: "varint,105,opt,name=number", + Filename: "test_proto/test.proto", +} + +type ComplexExtension struct { + First *int32 `protobuf:"varint,1,opt,name=first" json:"first,omitempty"` + Second *int32 `protobuf:"varint,2,opt,name=second" json:"second,omitempty"` + Third []int32 `protobuf:"varint,3,rep,name=third" json:"third,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ComplexExtension) Reset() { *m = ComplexExtension{} } +func (m *ComplexExtension) String() string { return proto.CompactTextString(m) } +func (*ComplexExtension) ProtoMessage() {} +func (*ComplexExtension) Descriptor() ([]byte, []int) { + return fileDescriptor_test_ee9f66cbbebc227c, []int{15} +} +func (m *ComplexExtension) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ComplexExtension.Unmarshal(m, b) +} +func (m *ComplexExtension) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ComplexExtension.Marshal(b, m, deterministic) +} +func (dst *ComplexExtension) XXX_Merge(src proto.Message) { + xxx_messageInfo_ComplexExtension.Merge(dst, src) +} +func (m *ComplexExtension) XXX_Size() int { + return xxx_messageInfo_ComplexExtension.Size(m) +} +func (m *ComplexExtension) XXX_DiscardUnknown() { + xxx_messageInfo_ComplexExtension.DiscardUnknown(m) +} + +var xxx_messageInfo_ComplexExtension proto.InternalMessageInfo + +func (m *ComplexExtension) GetFirst() int32 { + if m != nil && m.First != nil { + return *m.First + } + return 0 +} + +func (m *ComplexExtension) GetSecond() int32 { + if m != nil && m.Second != nil { + return *m.Second + } + return 0 +} + +func (m *ComplexExtension) GetThird() []int32 { + if m != nil { + return m.Third + } + return nil +} + +type DefaultsMessage struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DefaultsMessage) Reset() { *m = DefaultsMessage{} } +func (m *DefaultsMessage) String() string { return proto.CompactTextString(m) } +func (*DefaultsMessage) ProtoMessage() {} +func (*DefaultsMessage) Descriptor() ([]byte, []int) { + return fileDescriptor_test_ee9f66cbbebc227c, []int{16} +} + +var extRange_DefaultsMessage = []proto.ExtensionRange{ + {Start: 100, End: 536870911}, +} + +func (*DefaultsMessage) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_DefaultsMessage +} +func (m *DefaultsMessage) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DefaultsMessage.Unmarshal(m, b) +} +func (m *DefaultsMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DefaultsMessage.Marshal(b, m, deterministic) +} +func (dst *DefaultsMessage) XXX_Merge(src proto.Message) { + xxx_messageInfo_DefaultsMessage.Merge(dst, src) +} +func (m *DefaultsMessage) XXX_Size() int { + return xxx_messageInfo_DefaultsMessage.Size(m) +} +func (m *DefaultsMessage) XXX_DiscardUnknown() { + xxx_messageInfo_DefaultsMessage.DiscardUnknown(m) +} + +var xxx_messageInfo_DefaultsMessage proto.InternalMessageInfo + +type MyMessageSet struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `protobuf_messageset:"1" json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MyMessageSet) Reset() { *m = MyMessageSet{} } +func (m *MyMessageSet) String() string { return proto.CompactTextString(m) } +func (*MyMessageSet) ProtoMessage() {} +func (*MyMessageSet) Descriptor() ([]byte, []int) { + return fileDescriptor_test_ee9f66cbbebc227c, []int{17} +} + +func (m *MyMessageSet) MarshalJSON() ([]byte, error) { + return proto.MarshalMessageSetJSON(&m.XXX_InternalExtensions) +} +func (m *MyMessageSet) UnmarshalJSON(buf []byte) error { + return proto.UnmarshalMessageSetJSON(buf, &m.XXX_InternalExtensions) +} + +var extRange_MyMessageSet = []proto.ExtensionRange{ + {Start: 100, End: 2147483646}, +} + +func (*MyMessageSet) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MyMessageSet +} +func (m *MyMessageSet) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MyMessageSet.Unmarshal(m, b) +} +func (m *MyMessageSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MyMessageSet.Marshal(b, m, deterministic) +} +func (dst *MyMessageSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_MyMessageSet.Merge(dst, src) +} +func (m *MyMessageSet) XXX_Size() int { + return xxx_messageInfo_MyMessageSet.Size(m) +} +func (m *MyMessageSet) XXX_DiscardUnknown() { + xxx_messageInfo_MyMessageSet.DiscardUnknown(m) +} + +var xxx_messageInfo_MyMessageSet proto.InternalMessageInfo + +type Empty struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Empty) Reset() { *m = Empty{} } +func (m *Empty) String() string { return proto.CompactTextString(m) } +func (*Empty) ProtoMessage() {} +func (*Empty) Descriptor() ([]byte, []int) { + return fileDescriptor_test_ee9f66cbbebc227c, []int{18} +} +func (m *Empty) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Empty.Unmarshal(m, b) +} +func (m *Empty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Empty.Marshal(b, m, deterministic) +} +func (dst *Empty) XXX_Merge(src proto.Message) { + xxx_messageInfo_Empty.Merge(dst, src) +} +func (m *Empty) XXX_Size() int { + return xxx_messageInfo_Empty.Size(m) +} +func (m *Empty) XXX_DiscardUnknown() { + xxx_messageInfo_Empty.DiscardUnknown(m) +} + +var xxx_messageInfo_Empty proto.InternalMessageInfo + +type MessageList struct { + Message []*MessageList_Message `protobuf:"group,1,rep,name=Message,json=message" json:"message,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MessageList) Reset() { *m = MessageList{} } +func (m *MessageList) String() string { return proto.CompactTextString(m) } +func (*MessageList) ProtoMessage() {} +func (*MessageList) Descriptor() ([]byte, []int) { + return fileDescriptor_test_ee9f66cbbebc227c, []int{19} +} +func (m *MessageList) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MessageList.Unmarshal(m, b) +} +func (m *MessageList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MessageList.Marshal(b, m, deterministic) +} +func (dst *MessageList) XXX_Merge(src proto.Message) { + xxx_messageInfo_MessageList.Merge(dst, src) +} +func (m *MessageList) XXX_Size() int { + return xxx_messageInfo_MessageList.Size(m) +} +func (m *MessageList) XXX_DiscardUnknown() { + xxx_messageInfo_MessageList.DiscardUnknown(m) +} + +var xxx_messageInfo_MessageList proto.InternalMessageInfo + +func (m *MessageList) GetMessage() []*MessageList_Message { + if m != nil { + return m.Message + } + return nil +} + +type MessageList_Message struct { + Name *string `protobuf:"bytes,2,req,name=name" json:"name,omitempty"` + Count *int32 `protobuf:"varint,3,req,name=count" json:"count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MessageList_Message) Reset() { *m = MessageList_Message{} } +func (m *MessageList_Message) String() string { return proto.CompactTextString(m) } +func (*MessageList_Message) ProtoMessage() {} +func (*MessageList_Message) Descriptor() ([]byte, []int) { + return fileDescriptor_test_ee9f66cbbebc227c, []int{19, 0} +} +func (m *MessageList_Message) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MessageList_Message.Unmarshal(m, b) +} +func (m *MessageList_Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MessageList_Message.Marshal(b, m, deterministic) +} +func (dst *MessageList_Message) XXX_Merge(src proto.Message) { + xxx_messageInfo_MessageList_Message.Merge(dst, src) +} +func (m *MessageList_Message) XXX_Size() int { + return xxx_messageInfo_MessageList_Message.Size(m) +} +func (m *MessageList_Message) XXX_DiscardUnknown() { + xxx_messageInfo_MessageList_Message.DiscardUnknown(m) +} + +var xxx_messageInfo_MessageList_Message proto.InternalMessageInfo + +func (m *MessageList_Message) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *MessageList_Message) GetCount() int32 { + if m != nil && m.Count != nil { + return *m.Count + } + return 0 +} + +type Strings struct { + StringField *string `protobuf:"bytes,1,opt,name=string_field,json=stringField" json:"string_field,omitempty"` + BytesField []byte `protobuf:"bytes,2,opt,name=bytes_field,json=bytesField" json:"bytes_field,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Strings) Reset() { *m = Strings{} } +func (m *Strings) String() string { return proto.CompactTextString(m) } +func (*Strings) ProtoMessage() {} +func (*Strings) Descriptor() ([]byte, []int) { + return fileDescriptor_test_ee9f66cbbebc227c, []int{20} +} +func (m *Strings) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Strings.Unmarshal(m, b) +} +func (m *Strings) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Strings.Marshal(b, m, deterministic) +} +func (dst *Strings) XXX_Merge(src proto.Message) { + xxx_messageInfo_Strings.Merge(dst, src) +} +func (m *Strings) XXX_Size() int { + return xxx_messageInfo_Strings.Size(m) +} +func (m *Strings) XXX_DiscardUnknown() { + xxx_messageInfo_Strings.DiscardUnknown(m) +} + +var xxx_messageInfo_Strings proto.InternalMessageInfo + +func (m *Strings) GetStringField() string { + if m != nil && m.StringField != nil { + return *m.StringField + } + return "" +} + +func (m *Strings) GetBytesField() []byte { + if m != nil { + return m.BytesField + } + return nil +} + +type Defaults struct { + // Default-valued fields of all basic types. + // Same as GoTest, but copied here to make testing easier. + F_Bool *bool `protobuf:"varint,1,opt,name=F_Bool,json=FBool,def=1" json:"F_Bool,omitempty"` + F_Int32 *int32 `protobuf:"varint,2,opt,name=F_Int32,json=FInt32,def=32" json:"F_Int32,omitempty"` + F_Int64 *int64 `protobuf:"varint,3,opt,name=F_Int64,json=FInt64,def=64" json:"F_Int64,omitempty"` + F_Fixed32 *uint32 `protobuf:"fixed32,4,opt,name=F_Fixed32,json=FFixed32,def=320" json:"F_Fixed32,omitempty"` + F_Fixed64 *uint64 `protobuf:"fixed64,5,opt,name=F_Fixed64,json=FFixed64,def=640" json:"F_Fixed64,omitempty"` + F_Uint32 *uint32 `protobuf:"varint,6,opt,name=F_Uint32,json=FUint32,def=3200" json:"F_Uint32,omitempty"` + F_Uint64 *uint64 `protobuf:"varint,7,opt,name=F_Uint64,json=FUint64,def=6400" json:"F_Uint64,omitempty"` + F_Float *float32 `protobuf:"fixed32,8,opt,name=F_Float,json=FFloat,def=314159" json:"F_Float,omitempty"` + F_Double *float64 `protobuf:"fixed64,9,opt,name=F_Double,json=FDouble,def=271828" json:"F_Double,omitempty"` + F_String *string `protobuf:"bytes,10,opt,name=F_String,json=FString,def=hello, \"world!\"\n" json:"F_String,omitempty"` + F_Bytes []byte `protobuf:"bytes,11,opt,name=F_Bytes,json=FBytes,def=Bignose" json:"F_Bytes,omitempty"` + F_Sint32 *int32 `protobuf:"zigzag32,12,opt,name=F_Sint32,json=FSint32,def=-32" json:"F_Sint32,omitempty"` + F_Sint64 *int64 `protobuf:"zigzag64,13,opt,name=F_Sint64,json=FSint64,def=-64" json:"F_Sint64,omitempty"` + F_Enum *Defaults_Color `protobuf:"varint,14,opt,name=F_Enum,json=FEnum,enum=test_proto.Defaults_Color,def=1" json:"F_Enum,omitempty"` + // More fields with crazy defaults. + F_Pinf *float32 `protobuf:"fixed32,15,opt,name=F_Pinf,json=FPinf,def=inf" json:"F_Pinf,omitempty"` + F_Ninf *float32 `protobuf:"fixed32,16,opt,name=F_Ninf,json=FNinf,def=-inf" json:"F_Ninf,omitempty"` + F_Nan *float32 `protobuf:"fixed32,17,opt,name=F_Nan,json=FNan,def=nan" json:"F_Nan,omitempty"` + // Sub-message. + Sub *SubDefaults `protobuf:"bytes,18,opt,name=sub" json:"sub,omitempty"` + // Redundant but explicit defaults. + StrZero *string `protobuf:"bytes,19,opt,name=str_zero,json=strZero,def=" json:"str_zero,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Defaults) Reset() { *m = Defaults{} } +func (m *Defaults) String() string { return proto.CompactTextString(m) } +func (*Defaults) ProtoMessage() {} +func (*Defaults) Descriptor() ([]byte, []int) { + return fileDescriptor_test_ee9f66cbbebc227c, []int{21} +} +func (m *Defaults) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Defaults.Unmarshal(m, b) +} +func (m *Defaults) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Defaults.Marshal(b, m, deterministic) +} +func (dst *Defaults) XXX_Merge(src proto.Message) { + xxx_messageInfo_Defaults.Merge(dst, src) +} +func (m *Defaults) XXX_Size() int { + return xxx_messageInfo_Defaults.Size(m) +} +func (m *Defaults) XXX_DiscardUnknown() { + xxx_messageInfo_Defaults.DiscardUnknown(m) +} + +var xxx_messageInfo_Defaults proto.InternalMessageInfo + +const Default_Defaults_F_Bool bool = true +const Default_Defaults_F_Int32 int32 = 32 +const Default_Defaults_F_Int64 int64 = 64 +const Default_Defaults_F_Fixed32 uint32 = 320 +const Default_Defaults_F_Fixed64 uint64 = 640 +const Default_Defaults_F_Uint32 uint32 = 3200 +const Default_Defaults_F_Uint64 uint64 = 6400 +const Default_Defaults_F_Float float32 = 314159 +const Default_Defaults_F_Double float64 = 271828 +const Default_Defaults_F_String string = "hello, \"world!\"\n" + +var Default_Defaults_F_Bytes []byte = []byte("Bignose") + +const Default_Defaults_F_Sint32 int32 = -32 +const Default_Defaults_F_Sint64 int64 = -64 +const Default_Defaults_F_Enum Defaults_Color = Defaults_GREEN + +var Default_Defaults_F_Pinf float32 = float32(math.Inf(1)) +var Default_Defaults_F_Ninf float32 = float32(math.Inf(-1)) +var Default_Defaults_F_Nan float32 = float32(math.NaN()) + +func (m *Defaults) GetF_Bool() bool { + if m != nil && m.F_Bool != nil { + return *m.F_Bool + } + return Default_Defaults_F_Bool +} + +func (m *Defaults) GetF_Int32() int32 { + if m != nil && m.F_Int32 != nil { + return *m.F_Int32 + } + return Default_Defaults_F_Int32 +} + +func (m *Defaults) GetF_Int64() int64 { + if m != nil && m.F_Int64 != nil { + return *m.F_Int64 + } + return Default_Defaults_F_Int64 +} + +func (m *Defaults) GetF_Fixed32() uint32 { + if m != nil && m.F_Fixed32 != nil { + return *m.F_Fixed32 + } + return Default_Defaults_F_Fixed32 +} + +func (m *Defaults) GetF_Fixed64() uint64 { + if m != nil && m.F_Fixed64 != nil { + return *m.F_Fixed64 + } + return Default_Defaults_F_Fixed64 +} + +func (m *Defaults) GetF_Uint32() uint32 { + if m != nil && m.F_Uint32 != nil { + return *m.F_Uint32 + } + return Default_Defaults_F_Uint32 +} + +func (m *Defaults) GetF_Uint64() uint64 { + if m != nil && m.F_Uint64 != nil { + return *m.F_Uint64 + } + return Default_Defaults_F_Uint64 +} + +func (m *Defaults) GetF_Float() float32 { + if m != nil && m.F_Float != nil { + return *m.F_Float + } + return Default_Defaults_F_Float +} + +func (m *Defaults) GetF_Double() float64 { + if m != nil && m.F_Double != nil { + return *m.F_Double + } + return Default_Defaults_F_Double +} + +func (m *Defaults) GetF_String() string { + if m != nil && m.F_String != nil { + return *m.F_String + } + return Default_Defaults_F_String +} + +func (m *Defaults) GetF_Bytes() []byte { + if m != nil && m.F_Bytes != nil { + return m.F_Bytes + } + return append([]byte(nil), Default_Defaults_F_Bytes...) +} + +func (m *Defaults) GetF_Sint32() int32 { + if m != nil && m.F_Sint32 != nil { + return *m.F_Sint32 + } + return Default_Defaults_F_Sint32 +} + +func (m *Defaults) GetF_Sint64() int64 { + if m != nil && m.F_Sint64 != nil { + return *m.F_Sint64 + } + return Default_Defaults_F_Sint64 +} + +func (m *Defaults) GetF_Enum() Defaults_Color { + if m != nil && m.F_Enum != nil { + return *m.F_Enum + } + return Default_Defaults_F_Enum +} + +func (m *Defaults) GetF_Pinf() float32 { + if m != nil && m.F_Pinf != nil { + return *m.F_Pinf + } + return Default_Defaults_F_Pinf +} + +func (m *Defaults) GetF_Ninf() float32 { + if m != nil && m.F_Ninf != nil { + return *m.F_Ninf + } + return Default_Defaults_F_Ninf +} + +func (m *Defaults) GetF_Nan() float32 { + if m != nil && m.F_Nan != nil { + return *m.F_Nan + } + return Default_Defaults_F_Nan +} + +func (m *Defaults) GetSub() *SubDefaults { + if m != nil { + return m.Sub + } + return nil +} + +func (m *Defaults) GetStrZero() string { + if m != nil && m.StrZero != nil { + return *m.StrZero + } + return "" +} + +type SubDefaults struct { + N *int64 `protobuf:"varint,1,opt,name=n,def=7" json:"n,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SubDefaults) Reset() { *m = SubDefaults{} } +func (m *SubDefaults) String() string { return proto.CompactTextString(m) } +func (*SubDefaults) ProtoMessage() {} +func (*SubDefaults) Descriptor() ([]byte, []int) { + return fileDescriptor_test_ee9f66cbbebc227c, []int{22} +} +func (m *SubDefaults) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SubDefaults.Unmarshal(m, b) +} +func (m *SubDefaults) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SubDefaults.Marshal(b, m, deterministic) +} +func (dst *SubDefaults) XXX_Merge(src proto.Message) { + xxx_messageInfo_SubDefaults.Merge(dst, src) +} +func (m *SubDefaults) XXX_Size() int { + return xxx_messageInfo_SubDefaults.Size(m) +} +func (m *SubDefaults) XXX_DiscardUnknown() { + xxx_messageInfo_SubDefaults.DiscardUnknown(m) +} + +var xxx_messageInfo_SubDefaults proto.InternalMessageInfo + +const Default_SubDefaults_N int64 = 7 + +func (m *SubDefaults) GetN() int64 { + if m != nil && m.N != nil { + return *m.N + } + return Default_SubDefaults_N +} + +type RepeatedEnum struct { + Color []RepeatedEnum_Color `protobuf:"varint,1,rep,name=color,enum=test_proto.RepeatedEnum_Color" json:"color,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RepeatedEnum) Reset() { *m = RepeatedEnum{} } +func (m *RepeatedEnum) String() string { return proto.CompactTextString(m) } +func (*RepeatedEnum) ProtoMessage() {} +func (*RepeatedEnum) Descriptor() ([]byte, []int) { + return fileDescriptor_test_ee9f66cbbebc227c, []int{23} +} +func (m *RepeatedEnum) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RepeatedEnum.Unmarshal(m, b) +} +func (m *RepeatedEnum) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RepeatedEnum.Marshal(b, m, deterministic) +} +func (dst *RepeatedEnum) XXX_Merge(src proto.Message) { + xxx_messageInfo_RepeatedEnum.Merge(dst, src) +} +func (m *RepeatedEnum) XXX_Size() int { + return xxx_messageInfo_RepeatedEnum.Size(m) +} +func (m *RepeatedEnum) XXX_DiscardUnknown() { + xxx_messageInfo_RepeatedEnum.DiscardUnknown(m) +} + +var xxx_messageInfo_RepeatedEnum proto.InternalMessageInfo + +func (m *RepeatedEnum) GetColor() []RepeatedEnum_Color { + if m != nil { + return m.Color + } + return nil +} + +type MoreRepeated struct { + Bools []bool `protobuf:"varint,1,rep,name=bools" json:"bools,omitempty"` + BoolsPacked []bool `protobuf:"varint,2,rep,packed,name=bools_packed,json=boolsPacked" json:"bools_packed,omitempty"` + Ints []int32 `protobuf:"varint,3,rep,name=ints" json:"ints,omitempty"` + IntsPacked []int32 `protobuf:"varint,4,rep,packed,name=ints_packed,json=intsPacked" json:"ints_packed,omitempty"` + Int64SPacked []int64 `protobuf:"varint,7,rep,packed,name=int64s_packed,json=int64sPacked" json:"int64s_packed,omitempty"` + Strings []string `protobuf:"bytes,5,rep,name=strings" json:"strings,omitempty"` + Fixeds []uint32 `protobuf:"fixed32,6,rep,name=fixeds" json:"fixeds,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MoreRepeated) Reset() { *m = MoreRepeated{} } +func (m *MoreRepeated) String() string { return proto.CompactTextString(m) } +func (*MoreRepeated) ProtoMessage() {} +func (*MoreRepeated) Descriptor() ([]byte, []int) { + return fileDescriptor_test_ee9f66cbbebc227c, []int{24} +} +func (m *MoreRepeated) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MoreRepeated.Unmarshal(m, b) +} +func (m *MoreRepeated) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MoreRepeated.Marshal(b, m, deterministic) +} +func (dst *MoreRepeated) XXX_Merge(src proto.Message) { + xxx_messageInfo_MoreRepeated.Merge(dst, src) +} +func (m *MoreRepeated) XXX_Size() int { + return xxx_messageInfo_MoreRepeated.Size(m) +} +func (m *MoreRepeated) XXX_DiscardUnknown() { + xxx_messageInfo_MoreRepeated.DiscardUnknown(m) +} + +var xxx_messageInfo_MoreRepeated proto.InternalMessageInfo + +func (m *MoreRepeated) GetBools() []bool { + if m != nil { + return m.Bools + } + return nil +} + +func (m *MoreRepeated) GetBoolsPacked() []bool { + if m != nil { + return m.BoolsPacked + } + return nil +} + +func (m *MoreRepeated) GetInts() []int32 { + if m != nil { + return m.Ints + } + return nil +} + +func (m *MoreRepeated) GetIntsPacked() []int32 { + if m != nil { + return m.IntsPacked + } + return nil +} + +func (m *MoreRepeated) GetInt64SPacked() []int64 { + if m != nil { + return m.Int64SPacked + } + return nil +} + +func (m *MoreRepeated) GetStrings() []string { + if m != nil { + return m.Strings + } + return nil +} + +func (m *MoreRepeated) GetFixeds() []uint32 { + if m != nil { + return m.Fixeds + } + return nil +} + +type GroupOld struct { + G *GroupOld_G `protobuf:"group,101,opt,name=G,json=g" json:"g,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GroupOld) Reset() { *m = GroupOld{} } +func (m *GroupOld) String() string { return proto.CompactTextString(m) } +func (*GroupOld) ProtoMessage() {} +func (*GroupOld) Descriptor() ([]byte, []int) { + return fileDescriptor_test_ee9f66cbbebc227c, []int{25} +} +func (m *GroupOld) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GroupOld.Unmarshal(m, b) +} +func (m *GroupOld) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GroupOld.Marshal(b, m, deterministic) +} +func (dst *GroupOld) XXX_Merge(src proto.Message) { + xxx_messageInfo_GroupOld.Merge(dst, src) +} +func (m *GroupOld) XXX_Size() int { + return xxx_messageInfo_GroupOld.Size(m) +} +func (m *GroupOld) XXX_DiscardUnknown() { + xxx_messageInfo_GroupOld.DiscardUnknown(m) +} + +var xxx_messageInfo_GroupOld proto.InternalMessageInfo + +func (m *GroupOld) GetG() *GroupOld_G { + if m != nil { + return m.G + } + return nil +} + +type GroupOld_G struct { + X *int32 `protobuf:"varint,2,opt,name=x" json:"x,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GroupOld_G) Reset() { *m = GroupOld_G{} } +func (m *GroupOld_G) String() string { return proto.CompactTextString(m) } +func (*GroupOld_G) ProtoMessage() {} +func (*GroupOld_G) Descriptor() ([]byte, []int) { + return fileDescriptor_test_ee9f66cbbebc227c, []int{25, 0} +} +func (m *GroupOld_G) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GroupOld_G.Unmarshal(m, b) +} +func (m *GroupOld_G) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GroupOld_G.Marshal(b, m, deterministic) +} +func (dst *GroupOld_G) XXX_Merge(src proto.Message) { + xxx_messageInfo_GroupOld_G.Merge(dst, src) +} +func (m *GroupOld_G) XXX_Size() int { + return xxx_messageInfo_GroupOld_G.Size(m) +} +func (m *GroupOld_G) XXX_DiscardUnknown() { + xxx_messageInfo_GroupOld_G.DiscardUnknown(m) +} + +var xxx_messageInfo_GroupOld_G proto.InternalMessageInfo + +func (m *GroupOld_G) GetX() int32 { + if m != nil && m.X != nil { + return *m.X + } + return 0 +} + +type GroupNew struct { + G *GroupNew_G `protobuf:"group,101,opt,name=G,json=g" json:"g,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GroupNew) Reset() { *m = GroupNew{} } +func (m *GroupNew) String() string { return proto.CompactTextString(m) } +func (*GroupNew) ProtoMessage() {} +func (*GroupNew) Descriptor() ([]byte, []int) { + return fileDescriptor_test_ee9f66cbbebc227c, []int{26} +} +func (m *GroupNew) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GroupNew.Unmarshal(m, b) +} +func (m *GroupNew) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GroupNew.Marshal(b, m, deterministic) +} +func (dst *GroupNew) XXX_Merge(src proto.Message) { + xxx_messageInfo_GroupNew.Merge(dst, src) +} +func (m *GroupNew) XXX_Size() int { + return xxx_messageInfo_GroupNew.Size(m) +} +func (m *GroupNew) XXX_DiscardUnknown() { + xxx_messageInfo_GroupNew.DiscardUnknown(m) +} + +var xxx_messageInfo_GroupNew proto.InternalMessageInfo + +func (m *GroupNew) GetG() *GroupNew_G { + if m != nil { + return m.G + } + return nil +} + +type GroupNew_G struct { + X *int32 `protobuf:"varint,2,opt,name=x" json:"x,omitempty"` + Y *int32 `protobuf:"varint,3,opt,name=y" json:"y,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GroupNew_G) Reset() { *m = GroupNew_G{} } +func (m *GroupNew_G) String() string { return proto.CompactTextString(m) } +func (*GroupNew_G) ProtoMessage() {} +func (*GroupNew_G) Descriptor() ([]byte, []int) { + return fileDescriptor_test_ee9f66cbbebc227c, []int{26, 0} +} +func (m *GroupNew_G) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GroupNew_G.Unmarshal(m, b) +} +func (m *GroupNew_G) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GroupNew_G.Marshal(b, m, deterministic) +} +func (dst *GroupNew_G) XXX_Merge(src proto.Message) { + xxx_messageInfo_GroupNew_G.Merge(dst, src) +} +func (m *GroupNew_G) XXX_Size() int { + return xxx_messageInfo_GroupNew_G.Size(m) +} +func (m *GroupNew_G) XXX_DiscardUnknown() { + xxx_messageInfo_GroupNew_G.DiscardUnknown(m) +} + +var xxx_messageInfo_GroupNew_G proto.InternalMessageInfo + +func (m *GroupNew_G) GetX() int32 { + if m != nil && m.X != nil { + return *m.X + } + return 0 +} + +func (m *GroupNew_G) GetY() int32 { + if m != nil && m.Y != nil { + return *m.Y + } + return 0 +} + +type FloatingPoint struct { + F *float64 `protobuf:"fixed64,1,req,name=f" json:"f,omitempty"` + Exact *bool `protobuf:"varint,2,opt,name=exact" json:"exact,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FloatingPoint) Reset() { *m = FloatingPoint{} } +func (m *FloatingPoint) String() string { return proto.CompactTextString(m) } +func (*FloatingPoint) ProtoMessage() {} +func (*FloatingPoint) Descriptor() ([]byte, []int) { + return fileDescriptor_test_ee9f66cbbebc227c, []int{27} +} +func (m *FloatingPoint) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FloatingPoint.Unmarshal(m, b) +} +func (m *FloatingPoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FloatingPoint.Marshal(b, m, deterministic) +} +func (dst *FloatingPoint) XXX_Merge(src proto.Message) { + xxx_messageInfo_FloatingPoint.Merge(dst, src) +} +func (m *FloatingPoint) XXX_Size() int { + return xxx_messageInfo_FloatingPoint.Size(m) +} +func (m *FloatingPoint) XXX_DiscardUnknown() { + xxx_messageInfo_FloatingPoint.DiscardUnknown(m) +} + +var xxx_messageInfo_FloatingPoint proto.InternalMessageInfo + +func (m *FloatingPoint) GetF() float64 { + if m != nil && m.F != nil { + return *m.F + } + return 0 +} + +func (m *FloatingPoint) GetExact() bool { + if m != nil && m.Exact != nil { + return *m.Exact + } + return false +} + +type MessageWithMap struct { + NameMapping map[int32]string `protobuf:"bytes,1,rep,name=name_mapping,json=nameMapping" json:"name_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + MsgMapping map[int64]*FloatingPoint `protobuf:"bytes,2,rep,name=msg_mapping,json=msgMapping" json:"msg_mapping,omitempty" protobuf_key:"zigzag64,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + ByteMapping map[bool][]byte `protobuf:"bytes,3,rep,name=byte_mapping,json=byteMapping" json:"byte_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + StrToStr map[string]string `protobuf:"bytes,4,rep,name=str_to_str,json=strToStr" json:"str_to_str,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MessageWithMap) Reset() { *m = MessageWithMap{} } +func (m *MessageWithMap) String() string { return proto.CompactTextString(m) } +func (*MessageWithMap) ProtoMessage() {} +func (*MessageWithMap) Descriptor() ([]byte, []int) { + return fileDescriptor_test_ee9f66cbbebc227c, []int{28} +} +func (m *MessageWithMap) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MessageWithMap.Unmarshal(m, b) +} +func (m *MessageWithMap) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MessageWithMap.Marshal(b, m, deterministic) +} +func (dst *MessageWithMap) XXX_Merge(src proto.Message) { + xxx_messageInfo_MessageWithMap.Merge(dst, src) +} +func (m *MessageWithMap) XXX_Size() int { + return xxx_messageInfo_MessageWithMap.Size(m) +} +func (m *MessageWithMap) XXX_DiscardUnknown() { + xxx_messageInfo_MessageWithMap.DiscardUnknown(m) +} + +var xxx_messageInfo_MessageWithMap proto.InternalMessageInfo + +func (m *MessageWithMap) GetNameMapping() map[int32]string { + if m != nil { + return m.NameMapping + } + return nil +} + +func (m *MessageWithMap) GetMsgMapping() map[int64]*FloatingPoint { + if m != nil { + return m.MsgMapping + } + return nil +} + +func (m *MessageWithMap) GetByteMapping() map[bool][]byte { + if m != nil { + return m.ByteMapping + } + return nil +} + +func (m *MessageWithMap) GetStrToStr() map[string]string { + if m != nil { + return m.StrToStr + } + return nil +} + +type Oneof struct { + // Types that are valid to be assigned to Union: + // *Oneof_F_Bool + // *Oneof_F_Int32 + // *Oneof_F_Int64 + // *Oneof_F_Fixed32 + // *Oneof_F_Fixed64 + // *Oneof_F_Uint32 + // *Oneof_F_Uint64 + // *Oneof_F_Float + // *Oneof_F_Double + // *Oneof_F_String + // *Oneof_F_Bytes + // *Oneof_F_Sint32 + // *Oneof_F_Sint64 + // *Oneof_F_Enum + // *Oneof_F_Message + // *Oneof_FGroup + // *Oneof_F_Largest_Tag + Union isOneof_Union `protobuf_oneof:"union"` + // Types that are valid to be assigned to Tormato: + // *Oneof_Value + Tormato isOneof_Tormato `protobuf_oneof:"tormato"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Oneof) Reset() { *m = Oneof{} } +func (m *Oneof) String() string { return proto.CompactTextString(m) } +func (*Oneof) ProtoMessage() {} +func (*Oneof) Descriptor() ([]byte, []int) { + return fileDescriptor_test_ee9f66cbbebc227c, []int{29} +} +func (m *Oneof) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Oneof.Unmarshal(m, b) +} +func (m *Oneof) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Oneof.Marshal(b, m, deterministic) +} +func (dst *Oneof) XXX_Merge(src proto.Message) { + xxx_messageInfo_Oneof.Merge(dst, src) +} +func (m *Oneof) XXX_Size() int { + return xxx_messageInfo_Oneof.Size(m) +} +func (m *Oneof) XXX_DiscardUnknown() { + xxx_messageInfo_Oneof.DiscardUnknown(m) +} + +var xxx_messageInfo_Oneof proto.InternalMessageInfo + +type isOneof_Union interface { + isOneof_Union() +} +type isOneof_Tormato interface { + isOneof_Tormato() +} + +type Oneof_F_Bool struct { + F_Bool bool `protobuf:"varint,1,opt,name=F_Bool,json=FBool,oneof"` +} +type Oneof_F_Int32 struct { + F_Int32 int32 `protobuf:"varint,2,opt,name=F_Int32,json=FInt32,oneof"` +} +type Oneof_F_Int64 struct { + F_Int64 int64 `protobuf:"varint,3,opt,name=F_Int64,json=FInt64,oneof"` +} +type Oneof_F_Fixed32 struct { + F_Fixed32 uint32 `protobuf:"fixed32,4,opt,name=F_Fixed32,json=FFixed32,oneof"` +} +type Oneof_F_Fixed64 struct { + F_Fixed64 uint64 `protobuf:"fixed64,5,opt,name=F_Fixed64,json=FFixed64,oneof"` +} +type Oneof_F_Uint32 struct { + F_Uint32 uint32 `protobuf:"varint,6,opt,name=F_Uint32,json=FUint32,oneof"` +} +type Oneof_F_Uint64 struct { + F_Uint64 uint64 `protobuf:"varint,7,opt,name=F_Uint64,json=FUint64,oneof"` +} +type Oneof_F_Float struct { + F_Float float32 `protobuf:"fixed32,8,opt,name=F_Float,json=FFloat,oneof"` +} +type Oneof_F_Double struct { + F_Double float64 `protobuf:"fixed64,9,opt,name=F_Double,json=FDouble,oneof"` +} +type Oneof_F_String struct { + F_String string `protobuf:"bytes,10,opt,name=F_String,json=FString,oneof"` +} +type Oneof_F_Bytes struct { + F_Bytes []byte `protobuf:"bytes,11,opt,name=F_Bytes,json=FBytes,oneof"` +} +type Oneof_F_Sint32 struct { + F_Sint32 int32 `protobuf:"zigzag32,12,opt,name=F_Sint32,json=FSint32,oneof"` +} +type Oneof_F_Sint64 struct { + F_Sint64 int64 `protobuf:"zigzag64,13,opt,name=F_Sint64,json=FSint64,oneof"` +} +type Oneof_F_Enum struct { + F_Enum MyMessage_Color `protobuf:"varint,14,opt,name=F_Enum,json=FEnum,enum=test_proto.MyMessage_Color,oneof"` +} +type Oneof_F_Message struct { + F_Message *GoTestField `protobuf:"bytes,15,opt,name=F_Message,json=FMessage,oneof"` +} +type Oneof_FGroup struct { + FGroup *Oneof_F_Group `protobuf:"group,16,opt,name=F_Group,json=fGroup,oneof"` +} +type Oneof_F_Largest_Tag struct { + F_Largest_Tag int32 `protobuf:"varint,536870911,opt,name=F_Largest_Tag,json=FLargestTag,oneof"` +} +type Oneof_Value struct { + Value int32 `protobuf:"varint,100,opt,name=value,oneof"` +} + +func (*Oneof_F_Bool) isOneof_Union() {} +func (*Oneof_F_Int32) isOneof_Union() {} +func (*Oneof_F_Int64) isOneof_Union() {} +func (*Oneof_F_Fixed32) isOneof_Union() {} +func (*Oneof_F_Fixed64) isOneof_Union() {} +func (*Oneof_F_Uint32) isOneof_Union() {} +func (*Oneof_F_Uint64) isOneof_Union() {} +func (*Oneof_F_Float) isOneof_Union() {} +func (*Oneof_F_Double) isOneof_Union() {} +func (*Oneof_F_String) isOneof_Union() {} +func (*Oneof_F_Bytes) isOneof_Union() {} +func (*Oneof_F_Sint32) isOneof_Union() {} +func (*Oneof_F_Sint64) isOneof_Union() {} +func (*Oneof_F_Enum) isOneof_Union() {} +func (*Oneof_F_Message) isOneof_Union() {} +func (*Oneof_FGroup) isOneof_Union() {} +func (*Oneof_F_Largest_Tag) isOneof_Union() {} +func (*Oneof_Value) isOneof_Tormato() {} + +func (m *Oneof) GetUnion() isOneof_Union { + if m != nil { + return m.Union + } + return nil +} +func (m *Oneof) GetTormato() isOneof_Tormato { + if m != nil { + return m.Tormato + } + return nil +} + +func (m *Oneof) GetF_Bool() bool { + if x, ok := m.GetUnion().(*Oneof_F_Bool); ok { + return x.F_Bool + } + return false +} + +func (m *Oneof) GetF_Int32() int32 { + if x, ok := m.GetUnion().(*Oneof_F_Int32); ok { + return x.F_Int32 + } + return 0 +} + +func (m *Oneof) GetF_Int64() int64 { + if x, ok := m.GetUnion().(*Oneof_F_Int64); ok { + return x.F_Int64 + } + return 0 +} + +func (m *Oneof) GetF_Fixed32() uint32 { + if x, ok := m.GetUnion().(*Oneof_F_Fixed32); ok { + return x.F_Fixed32 + } + return 0 +} + +func (m *Oneof) GetF_Fixed64() uint64 { + if x, ok := m.GetUnion().(*Oneof_F_Fixed64); ok { + return x.F_Fixed64 + } + return 0 +} + +func (m *Oneof) GetF_Uint32() uint32 { + if x, ok := m.GetUnion().(*Oneof_F_Uint32); ok { + return x.F_Uint32 + } + return 0 +} + +func (m *Oneof) GetF_Uint64() uint64 { + if x, ok := m.GetUnion().(*Oneof_F_Uint64); ok { + return x.F_Uint64 + } + return 0 +} + +func (m *Oneof) GetF_Float() float32 { + if x, ok := m.GetUnion().(*Oneof_F_Float); ok { + return x.F_Float + } + return 0 +} + +func (m *Oneof) GetF_Double() float64 { + if x, ok := m.GetUnion().(*Oneof_F_Double); ok { + return x.F_Double + } + return 0 +} + +func (m *Oneof) GetF_String() string { + if x, ok := m.GetUnion().(*Oneof_F_String); ok { + return x.F_String + } + return "" +} + +func (m *Oneof) GetF_Bytes() []byte { + if x, ok := m.GetUnion().(*Oneof_F_Bytes); ok { + return x.F_Bytes + } + return nil +} + +func (m *Oneof) GetF_Sint32() int32 { + if x, ok := m.GetUnion().(*Oneof_F_Sint32); ok { + return x.F_Sint32 + } + return 0 +} + +func (m *Oneof) GetF_Sint64() int64 { + if x, ok := m.GetUnion().(*Oneof_F_Sint64); ok { + return x.F_Sint64 + } + return 0 +} + +func (m *Oneof) GetF_Enum() MyMessage_Color { + if x, ok := m.GetUnion().(*Oneof_F_Enum); ok { + return x.F_Enum + } + return MyMessage_RED +} + +func (m *Oneof) GetF_Message() *GoTestField { + if x, ok := m.GetUnion().(*Oneof_F_Message); ok { + return x.F_Message + } + return nil +} + +func (m *Oneof) GetFGroup() *Oneof_F_Group { + if x, ok := m.GetUnion().(*Oneof_FGroup); ok { + return x.FGroup + } + return nil +} + +func (m *Oneof) GetF_Largest_Tag() int32 { + if x, ok := m.GetUnion().(*Oneof_F_Largest_Tag); ok { + return x.F_Largest_Tag + } + return 0 +} + +func (m *Oneof) GetValue() int32 { + if x, ok := m.GetTormato().(*Oneof_Value); ok { + return x.Value + } + return 0 +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Oneof) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Oneof_OneofMarshaler, _Oneof_OneofUnmarshaler, _Oneof_OneofSizer, []interface{}{ + (*Oneof_F_Bool)(nil), + (*Oneof_F_Int32)(nil), + (*Oneof_F_Int64)(nil), + (*Oneof_F_Fixed32)(nil), + (*Oneof_F_Fixed64)(nil), + (*Oneof_F_Uint32)(nil), + (*Oneof_F_Uint64)(nil), + (*Oneof_F_Float)(nil), + (*Oneof_F_Double)(nil), + (*Oneof_F_String)(nil), + (*Oneof_F_Bytes)(nil), + (*Oneof_F_Sint32)(nil), + (*Oneof_F_Sint64)(nil), + (*Oneof_F_Enum)(nil), + (*Oneof_F_Message)(nil), + (*Oneof_FGroup)(nil), + (*Oneof_F_Largest_Tag)(nil), + (*Oneof_Value)(nil), + } +} + +func _Oneof_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Oneof) + // union + switch x := m.Union.(type) { + case *Oneof_F_Bool: + t := uint64(0) + if x.F_Bool { + t = 1 + } + b.EncodeVarint(1<<3 | proto.WireVarint) + b.EncodeVarint(t) + case *Oneof_F_Int32: + b.EncodeVarint(2<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.F_Int32)) + case *Oneof_F_Int64: + b.EncodeVarint(3<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.F_Int64)) + case *Oneof_F_Fixed32: + b.EncodeVarint(4<<3 | proto.WireFixed32) + b.EncodeFixed32(uint64(x.F_Fixed32)) + case *Oneof_F_Fixed64: + b.EncodeVarint(5<<3 | proto.WireFixed64) + b.EncodeFixed64(uint64(x.F_Fixed64)) + case *Oneof_F_Uint32: + b.EncodeVarint(6<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.F_Uint32)) + case *Oneof_F_Uint64: + b.EncodeVarint(7<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.F_Uint64)) + case *Oneof_F_Float: + b.EncodeVarint(8<<3 | proto.WireFixed32) + b.EncodeFixed32(uint64(math.Float32bits(x.F_Float))) + case *Oneof_F_Double: + b.EncodeVarint(9<<3 | proto.WireFixed64) + b.EncodeFixed64(math.Float64bits(x.F_Double)) + case *Oneof_F_String: + b.EncodeVarint(10<<3 | proto.WireBytes) + b.EncodeStringBytes(x.F_String) + case *Oneof_F_Bytes: + b.EncodeVarint(11<<3 | proto.WireBytes) + b.EncodeRawBytes(x.F_Bytes) + case *Oneof_F_Sint32: + b.EncodeVarint(12<<3 | proto.WireVarint) + b.EncodeZigzag32(uint64(x.F_Sint32)) + case *Oneof_F_Sint64: + b.EncodeVarint(13<<3 | proto.WireVarint) + b.EncodeZigzag64(uint64(x.F_Sint64)) + case *Oneof_F_Enum: + b.EncodeVarint(14<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.F_Enum)) + case *Oneof_F_Message: + b.EncodeVarint(15<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.F_Message); err != nil { + return err + } + case *Oneof_FGroup: + b.EncodeVarint(16<<3 | proto.WireStartGroup) + if err := b.Marshal(x.FGroup); err != nil { + return err + } + b.EncodeVarint(16<<3 | proto.WireEndGroup) + case *Oneof_F_Largest_Tag: + b.EncodeVarint(536870911<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.F_Largest_Tag)) + case nil: + default: + return fmt.Errorf("Oneof.Union has unexpected type %T", x) + } + // tormato + switch x := m.Tormato.(type) { + case *Oneof_Value: + b.EncodeVarint(100<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.Value)) + case nil: + default: + return fmt.Errorf("Oneof.Tormato has unexpected type %T", x) + } + return nil +} + +func _Oneof_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Oneof) + switch tag { + case 1: // union.F_Bool + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Oneof_F_Bool{x != 0} + return true, err + case 2: // union.F_Int32 + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Oneof_F_Int32{int32(x)} + return true, err + case 3: // union.F_Int64 + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Oneof_F_Int64{int64(x)} + return true, err + case 4: // union.F_Fixed32 + if wire != proto.WireFixed32 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed32() + m.Union = &Oneof_F_Fixed32{uint32(x)} + return true, err + case 5: // union.F_Fixed64 + if wire != proto.WireFixed64 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed64() + m.Union = &Oneof_F_Fixed64{x} + return true, err + case 6: // union.F_Uint32 + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Oneof_F_Uint32{uint32(x)} + return true, err + case 7: // union.F_Uint64 + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Oneof_F_Uint64{x} + return true, err + case 8: // union.F_Float + if wire != proto.WireFixed32 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed32() + m.Union = &Oneof_F_Float{math.Float32frombits(uint32(x))} + return true, err + case 9: // union.F_Double + if wire != proto.WireFixed64 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed64() + m.Union = &Oneof_F_Double{math.Float64frombits(x)} + return true, err + case 10: // union.F_String + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Union = &Oneof_F_String{x} + return true, err + case 11: // union.F_Bytes + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.Union = &Oneof_F_Bytes{x} + return true, err + case 12: // union.F_Sint32 + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeZigzag32() + m.Union = &Oneof_F_Sint32{int32(x)} + return true, err + case 13: // union.F_Sint64 + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeZigzag64() + m.Union = &Oneof_F_Sint64{int64(x)} + return true, err + case 14: // union.F_Enum + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Oneof_F_Enum{MyMessage_Color(x)} + return true, err + case 15: // union.F_Message + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(GoTestField) + err := b.DecodeMessage(msg) + m.Union = &Oneof_F_Message{msg} + return true, err + case 16: // union.f_group + if wire != proto.WireStartGroup { + return true, proto.ErrInternalBadWireType + } + msg := new(Oneof_F_Group) + err := b.DecodeGroup(msg) + m.Union = &Oneof_FGroup{msg} + return true, err + case 536870911: // union.F_Largest_Tag + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Oneof_F_Largest_Tag{int32(x)} + return true, err + case 100: // tormato.value + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Tormato = &Oneof_Value{int32(x)} + return true, err + default: + return false, nil + } +} + +func _Oneof_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Oneof) + // union + switch x := m.Union.(type) { + case *Oneof_F_Bool: + n += 1 // tag and wire + n += 1 + case *Oneof_F_Int32: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(x.F_Int32)) + case *Oneof_F_Int64: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(x.F_Int64)) + case *Oneof_F_Fixed32: + n += 1 // tag and wire + n += 4 + case *Oneof_F_Fixed64: + n += 1 // tag and wire + n += 8 + case *Oneof_F_Uint32: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(x.F_Uint32)) + case *Oneof_F_Uint64: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(x.F_Uint64)) + case *Oneof_F_Float: + n += 1 // tag and wire + n += 4 + case *Oneof_F_Double: + n += 1 // tag and wire + n += 8 + case *Oneof_F_String: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(len(x.F_String))) + n += len(x.F_String) + case *Oneof_F_Bytes: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(len(x.F_Bytes))) + n += len(x.F_Bytes) + case *Oneof_F_Sint32: + n += 1 // tag and wire + n += proto.SizeVarint(uint64((uint32(x.F_Sint32) << 1) ^ uint32((int32(x.F_Sint32) >> 31)))) + case *Oneof_F_Sint64: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(uint64(x.F_Sint64<<1) ^ uint64((int64(x.F_Sint64) >> 63)))) + case *Oneof_F_Enum: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(x.F_Enum)) + case *Oneof_F_Message: + s := proto.Size(x.F_Message) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *Oneof_FGroup: + n += 2 // tag and wire + n += proto.Size(x.FGroup) + n += 2 // tag and wire + case *Oneof_F_Largest_Tag: + n += 10 // tag and wire + n += proto.SizeVarint(uint64(x.F_Largest_Tag)) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + // tormato + switch x := m.Tormato.(type) { + case *Oneof_Value: + n += 2 // tag and wire + n += proto.SizeVarint(uint64(x.Value)) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type Oneof_F_Group struct { + X *int32 `protobuf:"varint,17,opt,name=x" json:"x,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Oneof_F_Group) Reset() { *m = Oneof_F_Group{} } +func (m *Oneof_F_Group) String() string { return proto.CompactTextString(m) } +func (*Oneof_F_Group) ProtoMessage() {} +func (*Oneof_F_Group) Descriptor() ([]byte, []int) { + return fileDescriptor_test_ee9f66cbbebc227c, []int{29, 0} +} +func (m *Oneof_F_Group) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Oneof_F_Group.Unmarshal(m, b) +} +func (m *Oneof_F_Group) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Oneof_F_Group.Marshal(b, m, deterministic) +} +func (dst *Oneof_F_Group) XXX_Merge(src proto.Message) { + xxx_messageInfo_Oneof_F_Group.Merge(dst, src) +} +func (m *Oneof_F_Group) XXX_Size() int { + return xxx_messageInfo_Oneof_F_Group.Size(m) +} +func (m *Oneof_F_Group) XXX_DiscardUnknown() { + xxx_messageInfo_Oneof_F_Group.DiscardUnknown(m) +} + +var xxx_messageInfo_Oneof_F_Group proto.InternalMessageInfo + +func (m *Oneof_F_Group) GetX() int32 { + if m != nil && m.X != nil { + return *m.X + } + return 0 +} + +type Communique struct { + MakeMeCry *bool `protobuf:"varint,1,opt,name=make_me_cry,json=makeMeCry" json:"make_me_cry,omitempty"` + // This is a oneof, called "union". + // + // Types that are valid to be assigned to Union: + // *Communique_Number + // *Communique_Name + // *Communique_Data + // *Communique_TempC + // *Communique_Col + // *Communique_Msg + Union isCommunique_Union `protobuf_oneof:"union"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Communique) Reset() { *m = Communique{} } +func (m *Communique) String() string { return proto.CompactTextString(m) } +func (*Communique) ProtoMessage() {} +func (*Communique) Descriptor() ([]byte, []int) { + return fileDescriptor_test_ee9f66cbbebc227c, []int{30} +} +func (m *Communique) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Communique.Unmarshal(m, b) +} +func (m *Communique) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Communique.Marshal(b, m, deterministic) +} +func (dst *Communique) XXX_Merge(src proto.Message) { + xxx_messageInfo_Communique.Merge(dst, src) +} +func (m *Communique) XXX_Size() int { + return xxx_messageInfo_Communique.Size(m) +} +func (m *Communique) XXX_DiscardUnknown() { + xxx_messageInfo_Communique.DiscardUnknown(m) +} + +var xxx_messageInfo_Communique proto.InternalMessageInfo + +type isCommunique_Union interface { + isCommunique_Union() +} + +type Communique_Number struct { + Number int32 `protobuf:"varint,5,opt,name=number,oneof"` +} +type Communique_Name struct { + Name string `protobuf:"bytes,6,opt,name=name,oneof"` +} +type Communique_Data struct { + Data []byte `protobuf:"bytes,7,opt,name=data,oneof"` +} +type Communique_TempC struct { + TempC float64 `protobuf:"fixed64,8,opt,name=temp_c,json=tempC,oneof"` +} +type Communique_Col struct { + Col MyMessage_Color `protobuf:"varint,9,opt,name=col,enum=test_proto.MyMessage_Color,oneof"` +} +type Communique_Msg struct { + Msg *Strings `protobuf:"bytes,10,opt,name=msg,oneof"` +} + +func (*Communique_Number) isCommunique_Union() {} +func (*Communique_Name) isCommunique_Union() {} +func (*Communique_Data) isCommunique_Union() {} +func (*Communique_TempC) isCommunique_Union() {} +func (*Communique_Col) isCommunique_Union() {} +func (*Communique_Msg) isCommunique_Union() {} + +func (m *Communique) GetUnion() isCommunique_Union { + if m != nil { + return m.Union + } + return nil +} + +func (m *Communique) GetMakeMeCry() bool { + if m != nil && m.MakeMeCry != nil { + return *m.MakeMeCry + } + return false +} + +func (m *Communique) GetNumber() int32 { + if x, ok := m.GetUnion().(*Communique_Number); ok { + return x.Number + } + return 0 +} + +func (m *Communique) GetName() string { + if x, ok := m.GetUnion().(*Communique_Name); ok { + return x.Name + } + return "" +} + +func (m *Communique) GetData() []byte { + if x, ok := m.GetUnion().(*Communique_Data); ok { + return x.Data + } + return nil +} + +func (m *Communique) GetTempC() float64 { + if x, ok := m.GetUnion().(*Communique_TempC); ok { + return x.TempC + } + return 0 +} + +func (m *Communique) GetCol() MyMessage_Color { + if x, ok := m.GetUnion().(*Communique_Col); ok { + return x.Col + } + return MyMessage_RED +} + +func (m *Communique) GetMsg() *Strings { + if x, ok := m.GetUnion().(*Communique_Msg); ok { + return x.Msg + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Communique) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Communique_OneofMarshaler, _Communique_OneofUnmarshaler, _Communique_OneofSizer, []interface{}{ + (*Communique_Number)(nil), + (*Communique_Name)(nil), + (*Communique_Data)(nil), + (*Communique_TempC)(nil), + (*Communique_Col)(nil), + (*Communique_Msg)(nil), + } +} + +func _Communique_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Communique) + // union + switch x := m.Union.(type) { + case *Communique_Number: + b.EncodeVarint(5<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.Number)) + case *Communique_Name: + b.EncodeVarint(6<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Name) + case *Communique_Data: + b.EncodeVarint(7<<3 | proto.WireBytes) + b.EncodeRawBytes(x.Data) + case *Communique_TempC: + b.EncodeVarint(8<<3 | proto.WireFixed64) + b.EncodeFixed64(math.Float64bits(x.TempC)) + case *Communique_Col: + b.EncodeVarint(9<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.Col)) + case *Communique_Msg: + b.EncodeVarint(10<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Msg); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Communique.Union has unexpected type %T", x) + } + return nil +} + +func _Communique_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Communique) + switch tag { + case 5: // union.number + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Communique_Number{int32(x)} + return true, err + case 6: // union.name + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Union = &Communique_Name{x} + return true, err + case 7: // union.data + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.Union = &Communique_Data{x} + return true, err + case 8: // union.temp_c + if wire != proto.WireFixed64 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed64() + m.Union = &Communique_TempC{math.Float64frombits(x)} + return true, err + case 9: // union.col + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Communique_Col{MyMessage_Color(x)} + return true, err + case 10: // union.msg + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Strings) + err := b.DecodeMessage(msg) + m.Union = &Communique_Msg{msg} + return true, err + default: + return false, nil + } +} + +func _Communique_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Communique) + // union + switch x := m.Union.(type) { + case *Communique_Number: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(x.Number)) + case *Communique_Name: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(len(x.Name))) + n += len(x.Name) + case *Communique_Data: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(len(x.Data))) + n += len(x.Data) + case *Communique_TempC: + n += 1 // tag and wire + n += 8 + case *Communique_Col: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(x.Col)) + case *Communique_Msg: + s := proto.Size(x.Msg) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type TestUTF8 struct { + Scalar *string `protobuf:"bytes,1,opt,name=scalar" json:"scalar,omitempty"` + Vector []string `protobuf:"bytes,2,rep,name=vector" json:"vector,omitempty"` + // Types that are valid to be assigned to Oneof: + // *TestUTF8_Field + Oneof isTestUTF8_Oneof `protobuf_oneof:"oneof"` + MapKey map[string]int64 `protobuf:"bytes,4,rep,name=map_key,json=mapKey" json:"map_key,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` + MapValue map[int64]string `protobuf:"bytes,5,rep,name=map_value,json=mapValue" json:"map_value,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TestUTF8) Reset() { *m = TestUTF8{} } +func (m *TestUTF8) String() string { return proto.CompactTextString(m) } +func (*TestUTF8) ProtoMessage() {} +func (*TestUTF8) Descriptor() ([]byte, []int) { + return fileDescriptor_test_ee9f66cbbebc227c, []int{31} +} +func (m *TestUTF8) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TestUTF8.Unmarshal(m, b) +} +func (m *TestUTF8) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TestUTF8.Marshal(b, m, deterministic) +} +func (dst *TestUTF8) XXX_Merge(src proto.Message) { + xxx_messageInfo_TestUTF8.Merge(dst, src) +} +func (m *TestUTF8) XXX_Size() int { + return xxx_messageInfo_TestUTF8.Size(m) +} +func (m *TestUTF8) XXX_DiscardUnknown() { + xxx_messageInfo_TestUTF8.DiscardUnknown(m) +} + +var xxx_messageInfo_TestUTF8 proto.InternalMessageInfo + +type isTestUTF8_Oneof interface { + isTestUTF8_Oneof() +} + +type TestUTF8_Field struct { + Field string `protobuf:"bytes,3,opt,name=field,oneof"` +} + +func (*TestUTF8_Field) isTestUTF8_Oneof() {} + +func (m *TestUTF8) GetOneof() isTestUTF8_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (m *TestUTF8) GetScalar() string { + if m != nil && m.Scalar != nil { + return *m.Scalar + } + return "" +} + +func (m *TestUTF8) GetVector() []string { + if m != nil { + return m.Vector + } + return nil +} + +func (m *TestUTF8) GetField() string { + if x, ok := m.GetOneof().(*TestUTF8_Field); ok { + return x.Field + } + return "" +} + +func (m *TestUTF8) GetMapKey() map[string]int64 { + if m != nil { + return m.MapKey + } + return nil +} + +func (m *TestUTF8) GetMapValue() map[int64]string { + if m != nil { + return m.MapValue + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*TestUTF8) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _TestUTF8_OneofMarshaler, _TestUTF8_OneofUnmarshaler, _TestUTF8_OneofSizer, []interface{}{ + (*TestUTF8_Field)(nil), + } +} + +func _TestUTF8_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*TestUTF8) + // oneof + switch x := m.Oneof.(type) { + case *TestUTF8_Field: + b.EncodeVarint(3<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Field) + case nil: + default: + return fmt.Errorf("TestUTF8.Oneof has unexpected type %T", x) + } + return nil +} + +func _TestUTF8_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*TestUTF8) + switch tag { + case 3: // oneof.field + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Oneof = &TestUTF8_Field{x} + return true, err + default: + return false, nil + } +} + +func _TestUTF8_OneofSizer(msg proto.Message) (n int) { + m := msg.(*TestUTF8) + // oneof + switch x := m.Oneof.(type) { + case *TestUTF8_Field: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(len(x.Field))) + n += len(x.Field) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +var E_Greeting = &proto.ExtensionDesc{ + ExtendedType: (*MyMessage)(nil), + ExtensionType: ([]string)(nil), + Field: 106, + Name: "test_proto.greeting", + Tag: "bytes,106,rep,name=greeting", + Filename: "test_proto/test.proto", +} + +var E_Complex = &proto.ExtensionDesc{ + ExtendedType: (*OtherMessage)(nil), + ExtensionType: (*ComplexExtension)(nil), + Field: 200, + Name: "test_proto.complex", + Tag: "bytes,200,opt,name=complex", + Filename: "test_proto/test.proto", +} + +var E_RComplex = &proto.ExtensionDesc{ + ExtendedType: (*OtherMessage)(nil), + ExtensionType: ([]*ComplexExtension)(nil), + Field: 201, + Name: "test_proto.r_complex", + Tag: "bytes,201,rep,name=r_complex,json=rComplex", + Filename: "test_proto/test.proto", +} + +var E_NoDefaultDouble = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*float64)(nil), + Field: 101, + Name: "test_proto.no_default_double", + Tag: "fixed64,101,opt,name=no_default_double,json=noDefaultDouble", + Filename: "test_proto/test.proto", +} + +var E_NoDefaultFloat = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*float32)(nil), + Field: 102, + Name: "test_proto.no_default_float", + Tag: "fixed32,102,opt,name=no_default_float,json=noDefaultFloat", + Filename: "test_proto/test.proto", +} + +var E_NoDefaultInt32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int32)(nil), + Field: 103, + Name: "test_proto.no_default_int32", + Tag: "varint,103,opt,name=no_default_int32,json=noDefaultInt32", + Filename: "test_proto/test.proto", +} + +var E_NoDefaultInt64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int64)(nil), + Field: 104, + Name: "test_proto.no_default_int64", + Tag: "varint,104,opt,name=no_default_int64,json=noDefaultInt64", + Filename: "test_proto/test.proto", +} + +var E_NoDefaultUint32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*uint32)(nil), + Field: 105, + Name: "test_proto.no_default_uint32", + Tag: "varint,105,opt,name=no_default_uint32,json=noDefaultUint32", + Filename: "test_proto/test.proto", +} + +var E_NoDefaultUint64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*uint64)(nil), + Field: 106, + Name: "test_proto.no_default_uint64", + Tag: "varint,106,opt,name=no_default_uint64,json=noDefaultUint64", + Filename: "test_proto/test.proto", +} + +var E_NoDefaultSint32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int32)(nil), + Field: 107, + Name: "test_proto.no_default_sint32", + Tag: "zigzag32,107,opt,name=no_default_sint32,json=noDefaultSint32", + Filename: "test_proto/test.proto", +} + +var E_NoDefaultSint64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int64)(nil), + Field: 108, + Name: "test_proto.no_default_sint64", + Tag: "zigzag64,108,opt,name=no_default_sint64,json=noDefaultSint64", + Filename: "test_proto/test.proto", +} + +var E_NoDefaultFixed32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*uint32)(nil), + Field: 109, + Name: "test_proto.no_default_fixed32", + Tag: "fixed32,109,opt,name=no_default_fixed32,json=noDefaultFixed32", + Filename: "test_proto/test.proto", +} + +var E_NoDefaultFixed64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*uint64)(nil), + Field: 110, + Name: "test_proto.no_default_fixed64", + Tag: "fixed64,110,opt,name=no_default_fixed64,json=noDefaultFixed64", + Filename: "test_proto/test.proto", +} + +var E_NoDefaultSfixed32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int32)(nil), + Field: 111, + Name: "test_proto.no_default_sfixed32", + Tag: "fixed32,111,opt,name=no_default_sfixed32,json=noDefaultSfixed32", + Filename: "test_proto/test.proto", +} + +var E_NoDefaultSfixed64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int64)(nil), + Field: 112, + Name: "test_proto.no_default_sfixed64", + Tag: "fixed64,112,opt,name=no_default_sfixed64,json=noDefaultSfixed64", + Filename: "test_proto/test.proto", +} + +var E_NoDefaultBool = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*bool)(nil), + Field: 113, + Name: "test_proto.no_default_bool", + Tag: "varint,113,opt,name=no_default_bool,json=noDefaultBool", + Filename: "test_proto/test.proto", +} + +var E_NoDefaultString = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*string)(nil), + Field: 114, + Name: "test_proto.no_default_string", + Tag: "bytes,114,opt,name=no_default_string,json=noDefaultString", + Filename: "test_proto/test.proto", +} + +var E_NoDefaultBytes = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: ([]byte)(nil), + Field: 115, + Name: "test_proto.no_default_bytes", + Tag: "bytes,115,opt,name=no_default_bytes,json=noDefaultBytes", + Filename: "test_proto/test.proto", +} + +var E_NoDefaultEnum = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*DefaultsMessage_DefaultsEnum)(nil), + Field: 116, + Name: "test_proto.no_default_enum", + Tag: "varint,116,opt,name=no_default_enum,json=noDefaultEnum,enum=test_proto.DefaultsMessage_DefaultsEnum", + Filename: "test_proto/test.proto", +} + +var E_DefaultDouble = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*float64)(nil), + Field: 201, + Name: "test_proto.default_double", + Tag: "fixed64,201,opt,name=default_double,json=defaultDouble,def=3.1415", + Filename: "test_proto/test.proto", +} + +var E_DefaultFloat = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*float32)(nil), + Field: 202, + Name: "test_proto.default_float", + Tag: "fixed32,202,opt,name=default_float,json=defaultFloat,def=3.14", + Filename: "test_proto/test.proto", +} + +var E_DefaultInt32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int32)(nil), + Field: 203, + Name: "test_proto.default_int32", + Tag: "varint,203,opt,name=default_int32,json=defaultInt32,def=42", + Filename: "test_proto/test.proto", +} + +var E_DefaultInt64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int64)(nil), + Field: 204, + Name: "test_proto.default_int64", + Tag: "varint,204,opt,name=default_int64,json=defaultInt64,def=43", + Filename: "test_proto/test.proto", +} + +var E_DefaultUint32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*uint32)(nil), + Field: 205, + Name: "test_proto.default_uint32", + Tag: "varint,205,opt,name=default_uint32,json=defaultUint32,def=44", + Filename: "test_proto/test.proto", +} + +var E_DefaultUint64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*uint64)(nil), + Field: 206, + Name: "test_proto.default_uint64", + Tag: "varint,206,opt,name=default_uint64,json=defaultUint64,def=45", + Filename: "test_proto/test.proto", +} + +var E_DefaultSint32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int32)(nil), + Field: 207, + Name: "test_proto.default_sint32", + Tag: "zigzag32,207,opt,name=default_sint32,json=defaultSint32,def=46", + Filename: "test_proto/test.proto", +} + +var E_DefaultSint64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int64)(nil), + Field: 208, + Name: "test_proto.default_sint64", + Tag: "zigzag64,208,opt,name=default_sint64,json=defaultSint64,def=47", + Filename: "test_proto/test.proto", +} + +var E_DefaultFixed32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*uint32)(nil), + Field: 209, + Name: "test_proto.default_fixed32", + Tag: "fixed32,209,opt,name=default_fixed32,json=defaultFixed32,def=48", + Filename: "test_proto/test.proto", +} + +var E_DefaultFixed64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*uint64)(nil), + Field: 210, + Name: "test_proto.default_fixed64", + Tag: "fixed64,210,opt,name=default_fixed64,json=defaultFixed64,def=49", + Filename: "test_proto/test.proto", +} + +var E_DefaultSfixed32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int32)(nil), + Field: 211, + Name: "test_proto.default_sfixed32", + Tag: "fixed32,211,opt,name=default_sfixed32,json=defaultSfixed32,def=50", + Filename: "test_proto/test.proto", +} + +var E_DefaultSfixed64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int64)(nil), + Field: 212, + Name: "test_proto.default_sfixed64", + Tag: "fixed64,212,opt,name=default_sfixed64,json=defaultSfixed64,def=51", + Filename: "test_proto/test.proto", +} + +var E_DefaultBool = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*bool)(nil), + Field: 213, + Name: "test_proto.default_bool", + Tag: "varint,213,opt,name=default_bool,json=defaultBool,def=1", + Filename: "test_proto/test.proto", +} + +var E_DefaultString = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*string)(nil), + Field: 214, + Name: "test_proto.default_string", + Tag: "bytes,214,opt,name=default_string,json=defaultString,def=Hello, string,def=foo", + Filename: "test_proto/test.proto", +} + +var E_DefaultBytes = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: ([]byte)(nil), + Field: 215, + Name: "test_proto.default_bytes", + Tag: "bytes,215,opt,name=default_bytes,json=defaultBytes,def=Hello, bytes", + Filename: "test_proto/test.proto", +} + +var E_DefaultEnum = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*DefaultsMessage_DefaultsEnum)(nil), + Field: 216, + Name: "test_proto.default_enum", + Tag: "varint,216,opt,name=default_enum,json=defaultEnum,enum=test_proto.DefaultsMessage_DefaultsEnum,def=1", + Filename: "test_proto/test.proto", +} + +var E_X201 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 201, + Name: "test_proto.x201", + Tag: "bytes,201,opt,name=x201", + Filename: "test_proto/test.proto", +} + +var E_X202 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 202, + Name: "test_proto.x202", + Tag: "bytes,202,opt,name=x202", + Filename: "test_proto/test.proto", +} + +var E_X203 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 203, + Name: "test_proto.x203", + Tag: "bytes,203,opt,name=x203", + Filename: "test_proto/test.proto", +} + +var E_X204 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 204, + Name: "test_proto.x204", + Tag: "bytes,204,opt,name=x204", + Filename: "test_proto/test.proto", +} + +var E_X205 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 205, + Name: "test_proto.x205", + Tag: "bytes,205,opt,name=x205", + Filename: "test_proto/test.proto", +} + +var E_X206 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 206, + Name: "test_proto.x206", + Tag: "bytes,206,opt,name=x206", + Filename: "test_proto/test.proto", +} + +var E_X207 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 207, + Name: "test_proto.x207", + Tag: "bytes,207,opt,name=x207", + Filename: "test_proto/test.proto", +} + +var E_X208 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 208, + Name: "test_proto.x208", + Tag: "bytes,208,opt,name=x208", + Filename: "test_proto/test.proto", +} + +var E_X209 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 209, + Name: "test_proto.x209", + Tag: "bytes,209,opt,name=x209", + Filename: "test_proto/test.proto", +} + +var E_X210 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 210, + Name: "test_proto.x210", + Tag: "bytes,210,opt,name=x210", + Filename: "test_proto/test.proto", +} + +var E_X211 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 211, + Name: "test_proto.x211", + Tag: "bytes,211,opt,name=x211", + Filename: "test_proto/test.proto", +} + +var E_X212 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 212, + Name: "test_proto.x212", + Tag: "bytes,212,opt,name=x212", + Filename: "test_proto/test.proto", +} + +var E_X213 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 213, + Name: "test_proto.x213", + Tag: "bytes,213,opt,name=x213", + Filename: "test_proto/test.proto", +} + +var E_X214 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 214, + Name: "test_proto.x214", + Tag: "bytes,214,opt,name=x214", + Filename: "test_proto/test.proto", +} + +var E_X215 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 215, + Name: "test_proto.x215", + Tag: "bytes,215,opt,name=x215", + Filename: "test_proto/test.proto", +} + +var E_X216 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 216, + Name: "test_proto.x216", + Tag: "bytes,216,opt,name=x216", + Filename: "test_proto/test.proto", +} + +var E_X217 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 217, + Name: "test_proto.x217", + Tag: "bytes,217,opt,name=x217", + Filename: "test_proto/test.proto", +} + +var E_X218 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 218, + Name: "test_proto.x218", + Tag: "bytes,218,opt,name=x218", + Filename: "test_proto/test.proto", +} + +var E_X219 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 219, + Name: "test_proto.x219", + Tag: "bytes,219,opt,name=x219", + Filename: "test_proto/test.proto", +} + +var E_X220 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 220, + Name: "test_proto.x220", + Tag: "bytes,220,opt,name=x220", + Filename: "test_proto/test.proto", +} + +var E_X221 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 221, + Name: "test_proto.x221", + Tag: "bytes,221,opt,name=x221", + Filename: "test_proto/test.proto", +} + +var E_X222 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 222, + Name: "test_proto.x222", + Tag: "bytes,222,opt,name=x222", + Filename: "test_proto/test.proto", +} + +var E_X223 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 223, + Name: "test_proto.x223", + Tag: "bytes,223,opt,name=x223", + Filename: "test_proto/test.proto", +} + +var E_X224 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 224, + Name: "test_proto.x224", + Tag: "bytes,224,opt,name=x224", + Filename: "test_proto/test.proto", +} + +var E_X225 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 225, + Name: "test_proto.x225", + Tag: "bytes,225,opt,name=x225", + Filename: "test_proto/test.proto", +} + +var E_X226 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 226, + Name: "test_proto.x226", + Tag: "bytes,226,opt,name=x226", + Filename: "test_proto/test.proto", +} + +var E_X227 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 227, + Name: "test_proto.x227", + Tag: "bytes,227,opt,name=x227", + Filename: "test_proto/test.proto", +} + +var E_X228 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 228, + Name: "test_proto.x228", + Tag: "bytes,228,opt,name=x228", + Filename: "test_proto/test.proto", +} + +var E_X229 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 229, + Name: "test_proto.x229", + Tag: "bytes,229,opt,name=x229", + Filename: "test_proto/test.proto", +} + +var E_X230 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 230, + Name: "test_proto.x230", + Tag: "bytes,230,opt,name=x230", + Filename: "test_proto/test.proto", +} + +var E_X231 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 231, + Name: "test_proto.x231", + Tag: "bytes,231,opt,name=x231", + Filename: "test_proto/test.proto", +} + +var E_X232 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 232, + Name: "test_proto.x232", + Tag: "bytes,232,opt,name=x232", + Filename: "test_proto/test.proto", +} + +var E_X233 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 233, + Name: "test_proto.x233", + Tag: "bytes,233,opt,name=x233", + Filename: "test_proto/test.proto", +} + +var E_X234 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 234, + Name: "test_proto.x234", + Tag: "bytes,234,opt,name=x234", + Filename: "test_proto/test.proto", +} + +var E_X235 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 235, + Name: "test_proto.x235", + Tag: "bytes,235,opt,name=x235", + Filename: "test_proto/test.proto", +} + +var E_X236 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 236, + Name: "test_proto.x236", + Tag: "bytes,236,opt,name=x236", + Filename: "test_proto/test.proto", +} + +var E_X237 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 237, + Name: "test_proto.x237", + Tag: "bytes,237,opt,name=x237", + Filename: "test_proto/test.proto", +} + +var E_X238 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 238, + Name: "test_proto.x238", + Tag: "bytes,238,opt,name=x238", + Filename: "test_proto/test.proto", +} + +var E_X239 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 239, + Name: "test_proto.x239", + Tag: "bytes,239,opt,name=x239", + Filename: "test_proto/test.proto", +} + +var E_X240 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 240, + Name: "test_proto.x240", + Tag: "bytes,240,opt,name=x240", + Filename: "test_proto/test.proto", +} + +var E_X241 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 241, + Name: "test_proto.x241", + Tag: "bytes,241,opt,name=x241", + Filename: "test_proto/test.proto", +} + +var E_X242 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 242, + Name: "test_proto.x242", + Tag: "bytes,242,opt,name=x242", + Filename: "test_proto/test.proto", +} + +var E_X243 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 243, + Name: "test_proto.x243", + Tag: "bytes,243,opt,name=x243", + Filename: "test_proto/test.proto", +} + +var E_X244 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 244, + Name: "test_proto.x244", + Tag: "bytes,244,opt,name=x244", + Filename: "test_proto/test.proto", +} + +var E_X245 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 245, + Name: "test_proto.x245", + Tag: "bytes,245,opt,name=x245", + Filename: "test_proto/test.proto", +} + +var E_X246 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 246, + Name: "test_proto.x246", + Tag: "bytes,246,opt,name=x246", + Filename: "test_proto/test.proto", +} + +var E_X247 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 247, + Name: "test_proto.x247", + Tag: "bytes,247,opt,name=x247", + Filename: "test_proto/test.proto", +} + +var E_X248 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 248, + Name: "test_proto.x248", + Tag: "bytes,248,opt,name=x248", + Filename: "test_proto/test.proto", +} + +var E_X249 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 249, + Name: "test_proto.x249", + Tag: "bytes,249,opt,name=x249", + Filename: "test_proto/test.proto", +} + +var E_X250 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 250, + Name: "test_proto.x250", + Tag: "bytes,250,opt,name=x250", + Filename: "test_proto/test.proto", +} + +func init() { + proto.RegisterType((*GoEnum)(nil), "test_proto.GoEnum") + proto.RegisterType((*GoTestField)(nil), "test_proto.GoTestField") + proto.RegisterType((*GoTest)(nil), "test_proto.GoTest") + proto.RegisterType((*GoTest_RequiredGroup)(nil), "test_proto.GoTest.RequiredGroup") + proto.RegisterType((*GoTest_RepeatedGroup)(nil), "test_proto.GoTest.RepeatedGroup") + proto.RegisterType((*GoTest_OptionalGroup)(nil), "test_proto.GoTest.OptionalGroup") + proto.RegisterType((*GoTestRequiredGroupField)(nil), "test_proto.GoTestRequiredGroupField") + proto.RegisterType((*GoTestRequiredGroupField_Group)(nil), "test_proto.GoTestRequiredGroupField.Group") + proto.RegisterType((*GoSkipTest)(nil), "test_proto.GoSkipTest") + proto.RegisterType((*GoSkipTest_SkipGroup)(nil), "test_proto.GoSkipTest.SkipGroup") + proto.RegisterType((*NonPackedTest)(nil), "test_proto.NonPackedTest") + proto.RegisterType((*PackedTest)(nil), "test_proto.PackedTest") + proto.RegisterType((*MaxTag)(nil), "test_proto.MaxTag") + proto.RegisterType((*OldMessage)(nil), "test_proto.OldMessage") + proto.RegisterType((*OldMessage_Nested)(nil), "test_proto.OldMessage.Nested") + proto.RegisterType((*NewMessage)(nil), "test_proto.NewMessage") + proto.RegisterType((*NewMessage_Nested)(nil), "test_proto.NewMessage.Nested") + proto.RegisterType((*InnerMessage)(nil), "test_proto.InnerMessage") + proto.RegisterType((*OtherMessage)(nil), "test_proto.OtherMessage") + proto.RegisterType((*RequiredInnerMessage)(nil), "test_proto.RequiredInnerMessage") + proto.RegisterType((*MyMessage)(nil), "test_proto.MyMessage") + proto.RegisterType((*MyMessage_SomeGroup)(nil), "test_proto.MyMessage.SomeGroup") + proto.RegisterType((*Ext)(nil), "test_proto.Ext") + proto.RegisterMapType((map[int32]int32)(nil), "test_proto.Ext.MapFieldEntry") + proto.RegisterType((*ComplexExtension)(nil), "test_proto.ComplexExtension") + proto.RegisterType((*DefaultsMessage)(nil), "test_proto.DefaultsMessage") + proto.RegisterType((*MyMessageSet)(nil), "test_proto.MyMessageSet") + proto.RegisterType((*Empty)(nil), "test_proto.Empty") + proto.RegisterType((*MessageList)(nil), "test_proto.MessageList") + proto.RegisterType((*MessageList_Message)(nil), "test_proto.MessageList.Message") + proto.RegisterType((*Strings)(nil), "test_proto.Strings") + proto.RegisterType((*Defaults)(nil), "test_proto.Defaults") + proto.RegisterType((*SubDefaults)(nil), "test_proto.SubDefaults") + proto.RegisterType((*RepeatedEnum)(nil), "test_proto.RepeatedEnum") + proto.RegisterType((*MoreRepeated)(nil), "test_proto.MoreRepeated") + proto.RegisterType((*GroupOld)(nil), "test_proto.GroupOld") + proto.RegisterType((*GroupOld_G)(nil), "test_proto.GroupOld.G") + proto.RegisterType((*GroupNew)(nil), "test_proto.GroupNew") + proto.RegisterType((*GroupNew_G)(nil), "test_proto.GroupNew.G") + proto.RegisterType((*FloatingPoint)(nil), "test_proto.FloatingPoint") + proto.RegisterType((*MessageWithMap)(nil), "test_proto.MessageWithMap") + proto.RegisterMapType((map[bool][]byte)(nil), "test_proto.MessageWithMap.ByteMappingEntry") + proto.RegisterMapType((map[int64]*FloatingPoint)(nil), "test_proto.MessageWithMap.MsgMappingEntry") + proto.RegisterMapType((map[int32]string)(nil), "test_proto.MessageWithMap.NameMappingEntry") + proto.RegisterMapType((map[string]string)(nil), "test_proto.MessageWithMap.StrToStrEntry") + proto.RegisterType((*Oneof)(nil), "test_proto.Oneof") + proto.RegisterType((*Oneof_F_Group)(nil), "test_proto.Oneof.F_Group") + proto.RegisterType((*Communique)(nil), "test_proto.Communique") + proto.RegisterType((*TestUTF8)(nil), "test_proto.TestUTF8") + proto.RegisterMapType((map[string]int64)(nil), "test_proto.TestUTF8.MapKeyEntry") + proto.RegisterMapType((map[int64]string)(nil), "test_proto.TestUTF8.MapValueEntry") + proto.RegisterEnum("test_proto.FOO", FOO_name, FOO_value) + proto.RegisterEnum("test_proto.GoTest_KIND", GoTest_KIND_name, GoTest_KIND_value) + proto.RegisterEnum("test_proto.MyMessage_Color", MyMessage_Color_name, MyMessage_Color_value) + proto.RegisterEnum("test_proto.DefaultsMessage_DefaultsEnum", DefaultsMessage_DefaultsEnum_name, DefaultsMessage_DefaultsEnum_value) + proto.RegisterEnum("test_proto.Defaults_Color", Defaults_Color_name, Defaults_Color_value) + proto.RegisterEnum("test_proto.RepeatedEnum_Color", RepeatedEnum_Color_name, RepeatedEnum_Color_value) + proto.RegisterExtension(E_Ext_More) + proto.RegisterExtension(E_Ext_Text) + proto.RegisterExtension(E_Ext_Number) + proto.RegisterExtension(E_Greeting) + proto.RegisterExtension(E_Complex) + proto.RegisterExtension(E_RComplex) + proto.RegisterExtension(E_NoDefaultDouble) + proto.RegisterExtension(E_NoDefaultFloat) + proto.RegisterExtension(E_NoDefaultInt32) + proto.RegisterExtension(E_NoDefaultInt64) + proto.RegisterExtension(E_NoDefaultUint32) + proto.RegisterExtension(E_NoDefaultUint64) + proto.RegisterExtension(E_NoDefaultSint32) + proto.RegisterExtension(E_NoDefaultSint64) + proto.RegisterExtension(E_NoDefaultFixed32) + proto.RegisterExtension(E_NoDefaultFixed64) + proto.RegisterExtension(E_NoDefaultSfixed32) + proto.RegisterExtension(E_NoDefaultSfixed64) + proto.RegisterExtension(E_NoDefaultBool) + proto.RegisterExtension(E_NoDefaultString) + proto.RegisterExtension(E_NoDefaultBytes) + proto.RegisterExtension(E_NoDefaultEnum) + proto.RegisterExtension(E_DefaultDouble) + proto.RegisterExtension(E_DefaultFloat) + proto.RegisterExtension(E_DefaultInt32) + proto.RegisterExtension(E_DefaultInt64) + proto.RegisterExtension(E_DefaultUint32) + proto.RegisterExtension(E_DefaultUint64) + proto.RegisterExtension(E_DefaultSint32) + proto.RegisterExtension(E_DefaultSint64) + proto.RegisterExtension(E_DefaultFixed32) + proto.RegisterExtension(E_DefaultFixed64) + proto.RegisterExtension(E_DefaultSfixed32) + proto.RegisterExtension(E_DefaultSfixed64) + proto.RegisterExtension(E_DefaultBool) + proto.RegisterExtension(E_DefaultString) + proto.RegisterExtension(E_DefaultBytes) + proto.RegisterExtension(E_DefaultEnum) + proto.RegisterExtension(E_X201) + proto.RegisterExtension(E_X202) + proto.RegisterExtension(E_X203) + proto.RegisterExtension(E_X204) + proto.RegisterExtension(E_X205) + proto.RegisterExtension(E_X206) + proto.RegisterExtension(E_X207) + proto.RegisterExtension(E_X208) + proto.RegisterExtension(E_X209) + proto.RegisterExtension(E_X210) + proto.RegisterExtension(E_X211) + proto.RegisterExtension(E_X212) + proto.RegisterExtension(E_X213) + proto.RegisterExtension(E_X214) + proto.RegisterExtension(E_X215) + proto.RegisterExtension(E_X216) + proto.RegisterExtension(E_X217) + proto.RegisterExtension(E_X218) + proto.RegisterExtension(E_X219) + proto.RegisterExtension(E_X220) + proto.RegisterExtension(E_X221) + proto.RegisterExtension(E_X222) + proto.RegisterExtension(E_X223) + proto.RegisterExtension(E_X224) + proto.RegisterExtension(E_X225) + proto.RegisterExtension(E_X226) + proto.RegisterExtension(E_X227) + proto.RegisterExtension(E_X228) + proto.RegisterExtension(E_X229) + proto.RegisterExtension(E_X230) + proto.RegisterExtension(E_X231) + proto.RegisterExtension(E_X232) + proto.RegisterExtension(E_X233) + proto.RegisterExtension(E_X234) + proto.RegisterExtension(E_X235) + proto.RegisterExtension(E_X236) + proto.RegisterExtension(E_X237) + proto.RegisterExtension(E_X238) + proto.RegisterExtension(E_X239) + proto.RegisterExtension(E_X240) + proto.RegisterExtension(E_X241) + proto.RegisterExtension(E_X242) + proto.RegisterExtension(E_X243) + proto.RegisterExtension(E_X244) + proto.RegisterExtension(E_X245) + proto.RegisterExtension(E_X246) + proto.RegisterExtension(E_X247) + proto.RegisterExtension(E_X248) + proto.RegisterExtension(E_X249) + proto.RegisterExtension(E_X250) +} + +func init() { proto.RegisterFile("test_proto/test.proto", fileDescriptor_test_ee9f66cbbebc227c) } + +var fileDescriptor_test_ee9f66cbbebc227c = []byte{ + // 4795 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x5b, 0xd9, 0x73, 0x1b, 0x47, + 0x7a, 0xd7, 0x0c, 0xee, 0x0f, 0x20, 0x31, 0x6c, 0xc9, 0x12, 0x44, 0x59, 0xd2, 0x08, 0x6b, 0xaf, + 0x61, 0xc9, 0xa2, 0x48, 0x60, 0x08, 0x49, 0x70, 0xec, 0x58, 0x07, 0x41, 0xb3, 0x24, 0x12, 0xf2, + 0x90, 0xb6, 0xb3, 0xca, 0x03, 0x0a, 0x24, 0x06, 0x20, 0x56, 0xc0, 0x0c, 0x0c, 0x0c, 0x56, 0x64, + 0x52, 0xa9, 0xf2, 0x63, 0xaa, 0xf2, 0x94, 0x4d, 0x52, 0x95, 0xf7, 0xbc, 0xe4, 0x25, 0xd7, 0x43, + 0xf2, 0x37, 0xc4, 0xd7, 0x7a, 0x77, 0xbd, 0x57, 0x92, 0x4d, 0x36, 0xf7, 0x9d, 0xcd, 0xbd, 0x47, + 0x5e, 0x9c, 0xea, 0xaf, 0x7b, 0x66, 0x7a, 0x06, 0x50, 0x93, 0x7c, 0xe2, 0x74, 0xf7, 0xef, 0xfb, + 0xf5, 0xf5, 0x9b, 0xef, 0xfb, 0xba, 0x31, 0x84, 0xe7, 0x5c, 0x6b, 0xec, 0x36, 0x87, 0x23, 0xc7, + 0x75, 0x6e, 0xd0, 0xc7, 0x25, 0x7c, 0x24, 0x10, 0x54, 0x17, 0xaf, 0x41, 0x72, 0xdd, 0x59, 0xb3, + 0x27, 0x03, 0x72, 0x05, 0x62, 0x1d, 0xc7, 0x29, 0x28, 0xba, 0x5a, 0x9a, 0x2f, 0xe7, 0x97, 0x02, + 0xcc, 0x52, 0xbd, 0xd1, 0x30, 0x69, 0x5b, 0xf1, 0x26, 0x64, 0xd7, 0x9d, 0x1d, 0x6b, 0xec, 0xd6, + 0x7b, 0x56, 0xbf, 0x4d, 0xce, 0x40, 0xe2, 0x61, 0x6b, 0xd7, 0xea, 0xa3, 0x4d, 0xc6, 0x64, 0x05, + 0x42, 0x20, 0xbe, 0x73, 0x38, 0xb4, 0x0a, 0x2a, 0x56, 0xe2, 0x73, 0xf1, 0x0f, 0x8b, 0xb4, 0x1b, + 0x6a, 0x49, 0xae, 0x41, 0xfc, 0x41, 0xcf, 0x6e, 0xf3, 0x7e, 0xce, 0x89, 0xfd, 0x30, 0xc4, 0xd2, + 0x83, 0x8d, 0xad, 0xfb, 0x26, 0x82, 0x68, 0x0f, 0x3b, 0xad, 0xdd, 0x3e, 0x25, 0x53, 0x68, 0x0f, + 0x58, 0xa0, 0xb5, 0x8f, 0x5a, 0xa3, 0xd6, 0xa0, 0x10, 0xd3, 0x95, 0x52, 0xc2, 0x64, 0x05, 0xf2, + 0x1a, 0xcc, 0x99, 0xd6, 0x7b, 0x93, 0xde, 0xc8, 0x6a, 0xe3, 0xf0, 0x0a, 0x71, 0x5d, 0x2d, 0x65, + 0x67, 0xf5, 0x80, 0xcd, 0x66, 0x18, 0xcd, 0xcc, 0x87, 0x56, 0xcb, 0xf5, 0xcc, 0x13, 0x7a, 0xec, + 0x08, 0x73, 0x01, 0x4d, 0xcd, 0x1b, 0x43, 0xb7, 0xe7, 0xd8, 0xad, 0x3e, 0x33, 0x4f, 0xea, 0x8a, + 0xd4, 0x3c, 0x84, 0x26, 0x5f, 0x84, 0x7c, 0xbd, 0x79, 0xd7, 0x71, 0xfa, 0xcd, 0x11, 0x1f, 0x55, + 0x01, 0x74, 0xb5, 0x94, 0x36, 0xe7, 0xea, 0xb4, 0xd6, 0x1b, 0x2a, 0x29, 0x81, 0x56, 0x6f, 0x6e, + 0xd8, 0x6e, 0xa5, 0x1c, 0x00, 0xb3, 0xba, 0x5a, 0x4a, 0x98, 0xf3, 0x75, 0xac, 0x9e, 0x42, 0x56, + 0x8d, 0x00, 0x99, 0xd3, 0xd5, 0x52, 0x8c, 0x21, 0xab, 0x86, 0x8f, 0x7c, 0x05, 0x48, 0xbd, 0x59, + 0xef, 0x1d, 0x58, 0x6d, 0x91, 0x75, 0x4e, 0x57, 0x4b, 0x29, 0x53, 0xab, 0xf3, 0x86, 0x19, 0x68, + 0x91, 0x79, 0x5e, 0x57, 0x4b, 0x49, 0x0f, 0x2d, 0x70, 0x5f, 0x85, 0x85, 0x7a, 0xf3, 0xed, 0x5e, + 0x78, 0xc0, 0x79, 0x5d, 0x2d, 0xcd, 0x99, 0xf9, 0x3a, 0xab, 0x9f, 0xc6, 0x8a, 0xc4, 0x9a, 0xae, + 0x96, 0xe2, 0x1c, 0x2b, 0xf0, 0xe2, 0xec, 0xea, 0x7d, 0xa7, 0xe5, 0x06, 0xd0, 0x05, 0x5d, 0x2d, + 0xa9, 0xe6, 0x7c, 0x1d, 0xab, 0xc3, 0xac, 0xf7, 0x9d, 0xc9, 0x6e, 0xdf, 0x0a, 0xa0, 0x44, 0x57, + 0x4b, 0x8a, 0x99, 0xaf, 0xb3, 0xfa, 0x30, 0x76, 0xdb, 0x1d, 0xf5, 0xec, 0x6e, 0x80, 0x3d, 0x8d, + 0x3a, 0xce, 0xd7, 0x59, 0x7d, 0x78, 0x04, 0x77, 0x0f, 0x5d, 0x6b, 0x1c, 0x40, 0x2d, 0x5d, 0x2d, + 0xe5, 0xcc, 0xf9, 0x3a, 0x56, 0x47, 0x58, 0x23, 0x6b, 0xd0, 0xd1, 0xd5, 0xd2, 0x02, 0x65, 0x9d, + 0xb1, 0x06, 0xdb, 0x91, 0x35, 0xe8, 0xea, 0x6a, 0x89, 0x70, 0xac, 0xb0, 0x06, 0x4b, 0x70, 0xba, + 0xde, 0xdc, 0xee, 0x44, 0x37, 0x6e, 0x5f, 0x57, 0x4b, 0x79, 0x73, 0xa1, 0xee, 0xb5, 0xcc, 0xc2, + 0x8b, 0xec, 0x3d, 0x5d, 0x2d, 0x69, 0x3e, 0x5e, 0xe0, 0x17, 0x35, 0xc9, 0xa4, 0x5e, 0x38, 0xa3, + 0xc7, 0x04, 0x4d, 0xb2, 0xca, 0xb0, 0x26, 0x39, 0xf0, 0x39, 0x3d, 0x26, 0x6a, 0x32, 0x82, 0xc4, + 0xee, 0x39, 0xf2, 0xac, 0x1e, 0x13, 0x35, 0xc9, 0x91, 0x11, 0x4d, 0x72, 0xec, 0x39, 0x3d, 0x16, + 0xd6, 0xe4, 0x14, 0x5a, 0x64, 0x2e, 0xe8, 0xb1, 0xb0, 0x26, 0x39, 0x3a, 0xac, 0x49, 0x0e, 0x3e, + 0xaf, 0xc7, 0x42, 0x9a, 0x8c, 0x62, 0x45, 0xe2, 0x45, 0x3d, 0x16, 0xd2, 0xa4, 0x38, 0x3b, 0x4f, + 0x93, 0x1c, 0x7a, 0x41, 0x8f, 0x89, 0x9a, 0x14, 0x59, 0x7d, 0x4d, 0x72, 0xe8, 0xf3, 0x7a, 0x2c, + 0xa4, 0x49, 0x11, 0xeb, 0x6b, 0x92, 0x63, 0x2f, 0xea, 0xb1, 0x90, 0x26, 0x39, 0xf6, 0x65, 0x51, + 0x93, 0x1c, 0xfa, 0x81, 0xa2, 0xc7, 0x44, 0x51, 0x72, 0xe8, 0xb5, 0x90, 0x28, 0x39, 0xf6, 0x43, + 0x8a, 0x15, 0x55, 0x19, 0x05, 0x8b, 0xab, 0xf0, 0x11, 0x05, 0x8b, 0xb2, 0xe4, 0xe0, 0x1b, 0x11, + 0x59, 0x72, 0xf8, 0xc7, 0x14, 0x1e, 0xd6, 0xe5, 0xb4, 0x81, 0xc8, 0xff, 0x09, 0x35, 0x08, 0x0b, + 0x93, 0x1b, 0x04, 0xc2, 0x74, 0xb8, 0x13, 0x2d, 0x5c, 0xd2, 0x15, 0x5f, 0x98, 0x9e, 0x67, 0x15, + 0x85, 0xe9, 0x03, 0x2f, 0x63, 0xc8, 0xe0, 0xc2, 0x9c, 0x42, 0x56, 0x8d, 0x00, 0xa9, 0xeb, 0x4a, + 0x20, 0x4c, 0x1f, 0x19, 0x12, 0xa6, 0x8f, 0xbd, 0xa2, 0x2b, 0xa2, 0x30, 0x67, 0xa0, 0x45, 0xe6, + 0xa2, 0xae, 0x88, 0xc2, 0xf4, 0xd1, 0xa2, 0x30, 0x7d, 0xf0, 0x17, 0x74, 0x45, 0x10, 0xe6, 0x34, + 0x56, 0x24, 0x7e, 0x41, 0x57, 0x04, 0x61, 0x86, 0x67, 0xc7, 0x84, 0xe9, 0x43, 0x5f, 0xd4, 0x95, + 0x40, 0x98, 0x61, 0x56, 0x2e, 0x4c, 0x1f, 0xfa, 0x45, 0x5d, 0x11, 0x84, 0x19, 0xc6, 0x72, 0x61, + 0xfa, 0xd8, 0x97, 0x30, 0x4e, 0x7b, 0xc2, 0xf4, 0xb1, 0x82, 0x30, 0x7d, 0xe8, 0xef, 0xd0, 0x98, + 0xee, 0x0b, 0xd3, 0x87, 0x8a, 0xc2, 0xf4, 0xb1, 0xbf, 0x4b, 0xb1, 0x81, 0x30, 0xa7, 0xc1, 0xe2, + 0x2a, 0xfc, 0x1e, 0x05, 0x07, 0xc2, 0xf4, 0xc1, 0x61, 0x61, 0xfa, 0xf0, 0xdf, 0xa7, 0x70, 0x51, + 0x98, 0xb3, 0x0c, 0x44, 0xfe, 0x3f, 0xa0, 0x06, 0xa2, 0x30, 0x7d, 0x83, 0x25, 0x9c, 0x26, 0x15, + 0x66, 0xdb, 0xea, 0xb4, 0x26, 0x7d, 0x2a, 0xe3, 0x12, 0x55, 0x66, 0x2d, 0xee, 0x8e, 0x26, 0x16, + 0x9d, 0xab, 0xe3, 0xf4, 0xef, 0x7b, 0x6d, 0x64, 0x89, 0x0e, 0x9f, 0x09, 0x34, 0x30, 0x78, 0x99, + 0x2a, 0xb4, 0xa6, 0x56, 0xca, 0x66, 0x9e, 0xa9, 0x74, 0x1a, 0x5f, 0x35, 0x04, 0xfc, 0x55, 0xaa, + 0xd3, 0x9a, 0x5a, 0x35, 0x18, 0xbe, 0x6a, 0x04, 0xf8, 0x0a, 0x9d, 0x80, 0x27, 0xd6, 0xc0, 0xe2, + 0x1a, 0x55, 0x6b, 0x2d, 0x56, 0x29, 0x2f, 0x9b, 0x0b, 0x9e, 0x64, 0x67, 0x19, 0x85, 0xba, 0x79, + 0x85, 0x8a, 0xb6, 0x16, 0xab, 0x1a, 0xbe, 0x91, 0xd8, 0x53, 0x99, 0x0a, 0x9d, 0x4b, 0x37, 0xb0, + 0xb9, 0x4e, 0xb5, 0x5b, 0x8b, 0x57, 0xca, 0xcb, 0xcb, 0xa6, 0xc6, 0x15, 0x3c, 0xc3, 0x26, 0xd4, + 0xcf, 0x12, 0xd5, 0x70, 0x2d, 0x5e, 0x35, 0x7c, 0x9b, 0x70, 0x3f, 0x0b, 0x9e, 0x94, 0x03, 0x93, + 0x1b, 0x54, 0xcb, 0xb5, 0x64, 0x65, 0xc5, 0x58, 0x59, 0xbd, 0x6d, 0xe6, 0x99, 0xa6, 0x03, 0x1b, + 0x83, 0xf6, 0xc3, 0x45, 0x1d, 0x18, 0x2d, 0x53, 0x55, 0xd7, 0x92, 0xe5, 0x9b, 0x2b, 0xb7, 0xca, + 0xb7, 0x4c, 0x8d, 0xab, 0x3b, 0xb0, 0x7a, 0x9d, 0x5a, 0x71, 0x79, 0x07, 0x56, 0x2b, 0x54, 0xdf, + 0x35, 0x6d, 0xdf, 0xea, 0xf7, 0x9d, 0x57, 0xf4, 0xe2, 0x53, 0x67, 0xd4, 0x6f, 0x5f, 0x29, 0x82, + 0xa9, 0x71, 0xc5, 0x8b, 0xbd, 0x2e, 0x78, 0x92, 0x0f, 0xcc, 0x7f, 0x95, 0x66, 0xac, 0xb9, 0x5a, + 0xea, 0x6e, 0xaf, 0x6b, 0x3b, 0x63, 0xcb, 0xcc, 0x33, 0xf1, 0x47, 0xd6, 0x64, 0x3b, 0xba, 0x8e, + 0x5f, 0xa5, 0x66, 0x0b, 0xb5, 0xd8, 0xf5, 0x4a, 0x99, 0xf6, 0x34, 0x6b, 0x1d, 0xb7, 0xa3, 0xeb, + 0xf8, 0x6b, 0xd4, 0x86, 0xd4, 0x62, 0xd7, 0xab, 0x06, 0xb7, 0x11, 0xd7, 0xb1, 0x0a, 0x67, 0x84, + 0x77, 0x21, 0xb0, 0xfa, 0x75, 0x6a, 0x95, 0x67, 0x3d, 0x11, 0xff, 0x8d, 0x98, 0x69, 0x17, 0xea, + 0xed, 0x37, 0xa8, 0x9d, 0xc6, 0x7a, 0x23, 0xfe, 0x8b, 0x11, 0xd8, 0xdd, 0x84, 0xb3, 0x91, 0x5c, + 0xa2, 0x39, 0x6c, 0xed, 0x3d, 0xb1, 0xda, 0x85, 0x32, 0x4d, 0x29, 0xee, 0xaa, 0x9a, 0x62, 0x9e, + 0x0e, 0xa5, 0x15, 0x8f, 0xb0, 0x99, 0xdc, 0x86, 0x73, 0xd1, 0xe4, 0xc2, 0xb3, 0xac, 0xd0, 0x1c, + 0x03, 0x2d, 0xcf, 0x84, 0xf3, 0x8c, 0x88, 0xa9, 0x10, 0x54, 0x3c, 0x53, 0x83, 0x26, 0x1d, 0x81, + 0x69, 0x10, 0x5b, 0xb8, 0xe9, 0x6b, 0x70, 0x7e, 0x3a, 0xfd, 0xf0, 0x8c, 0x57, 0x69, 0x16, 0x82, + 0xc6, 0x67, 0xa3, 0x99, 0xc8, 0x94, 0xf9, 0x8c, 0xbe, 0xab, 0x34, 0x2d, 0x11, 0xcd, 0xa7, 0x7a, + 0x7f, 0x15, 0x0a, 0x53, 0x09, 0x8a, 0x67, 0x7d, 0x93, 0xe6, 0x29, 0x68, 0xfd, 0x5c, 0x24, 0x57, + 0x89, 0x1a, 0xcf, 0xe8, 0xfa, 0x16, 0x4d, 0x5c, 0x04, 0xe3, 0xa9, 0x9e, 0x71, 0xc9, 0xc2, 0x29, + 0x8c, 0x67, 0x7b, 0x9b, 0x66, 0x32, 0x7c, 0xc9, 0x42, 0xd9, 0x8c, 0xd8, 0x6f, 0x24, 0xa7, 0xf1, + 0x6c, 0x6b, 0x34, 0xb5, 0xe1, 0xfd, 0x86, 0xd3, 0x1b, 0x6e, 0xfc, 0x33, 0xd4, 0x78, 0x7b, 0xf6, + 0x8c, 0x7f, 0x14, 0xa3, 0x49, 0x09, 0xb7, 0xde, 0x9e, 0x35, 0x65, 0xdf, 0x7a, 0xc6, 0x94, 0x7f, + 0x4c, 0xad, 0x89, 0x60, 0x3d, 0x35, 0xe7, 0x37, 0x60, 0x71, 0x46, 0xbe, 0xe2, 0xd9, 0xff, 0x84, + 0xda, 0xe7, 0xd1, 0xfe, 0xdc, 0x54, 0xea, 0x32, 0xcd, 0x30, 0x63, 0x04, 0x3f, 0xa5, 0x0c, 0x5a, + 0x88, 0x61, 0x6a, 0x0c, 0x75, 0x98, 0xf3, 0xf2, 0xf1, 0xee, 0xc8, 0x99, 0x0c, 0x0b, 0x75, 0x5d, + 0x2d, 0x41, 0x59, 0x9f, 0x71, 0x3a, 0xf6, 0xd2, 0xf3, 0x75, 0x8a, 0x33, 0xc3, 0x66, 0x8c, 0x87, + 0x31, 0x33, 0x9e, 0x47, 0x7a, 0xec, 0x99, 0x3c, 0x0c, 0xe7, 0xf3, 0x08, 0x66, 0x94, 0xc7, 0x0b, + 0x77, 0x8c, 0xe7, 0xb1, 0xae, 0x3c, 0x83, 0xc7, 0x0b, 0x7e, 0x9c, 0x27, 0x64, 0xb6, 0xb8, 0x1a, + 0x9c, 0xc9, 0xb1, 0x9d, 0xbc, 0x10, 0x3d, 0xa4, 0xaf, 0xe3, 0xe9, 0x2a, 0x5c, 0xc9, 0xcc, 0x84, + 0xe1, 0x4d, 0x9b, 0xbd, 0xf5, 0x0c, 0xb3, 0xd0, 0x68, 0xa6, 0xcd, 0x7e, 0x7e, 0x86, 0x59, 0xf1, + 0x37, 0x15, 0x88, 0x3f, 0xd8, 0xd8, 0xba, 0x4f, 0xd2, 0x10, 0x7f, 0xa7, 0xb1, 0x71, 0x5f, 0x3b, + 0x45, 0x9f, 0xee, 0x36, 0x1a, 0x0f, 0x35, 0x85, 0x64, 0x20, 0x71, 0xf7, 0x4b, 0x3b, 0x6b, 0xdb, + 0x9a, 0x4a, 0xf2, 0x90, 0xad, 0x6f, 0x6c, 0xad, 0xaf, 0x99, 0x8f, 0xcc, 0x8d, 0xad, 0x1d, 0x2d, + 0x46, 0xdb, 0xea, 0x0f, 0x1b, 0x77, 0x76, 0xb4, 0x38, 0x49, 0x41, 0x8c, 0xd6, 0x25, 0x08, 0x40, + 0x72, 0x7b, 0xc7, 0xdc, 0xd8, 0x5a, 0xd7, 0x92, 0x94, 0x65, 0x67, 0x63, 0x73, 0x4d, 0x4b, 0x51, + 0xe4, 0xce, 0xdb, 0x8f, 0x1e, 0xae, 0x69, 0x69, 0xfa, 0x78, 0xc7, 0x34, 0xef, 0x7c, 0x49, 0xcb, + 0x50, 0xa3, 0xcd, 0x3b, 0x8f, 0x34, 0xc0, 0xe6, 0x3b, 0x77, 0x1f, 0xae, 0x69, 0x59, 0x92, 0x83, + 0x74, 0xfd, 0xed, 0xad, 0x7b, 0x3b, 0x1b, 0x8d, 0x2d, 0x2d, 0x57, 0xfc, 0x45, 0x28, 0xb0, 0x65, + 0x0e, 0xad, 0x22, 0xbb, 0x32, 0x78, 0x03, 0x12, 0x6c, 0x6f, 0x14, 0xd4, 0xca, 0xd5, 0xe9, 0xbd, + 0x99, 0x36, 0x5a, 0x62, 0xbb, 0xc4, 0x0c, 0x17, 0x2f, 0x42, 0x82, 0xad, 0xd3, 0x19, 0x48, 0xb0, + 0xf5, 0x51, 0xf1, 0x2a, 0x81, 0x15, 0x8a, 0xbf, 0xa5, 0x02, 0xac, 0x3b, 0xdb, 0x4f, 0x7a, 0x43, + 0xbc, 0xb8, 0xb9, 0x08, 0x30, 0x7e, 0xd2, 0x1b, 0x36, 0xf1, 0x0d, 0xe4, 0x97, 0x0e, 0x19, 0x5a, + 0x83, 0xbe, 0x97, 0x5c, 0x81, 0x1c, 0x36, 0xf3, 0x57, 0x04, 0xef, 0x1a, 0x52, 0x66, 0x96, 0xd6, + 0x71, 0x27, 0x19, 0x86, 0x54, 0x0d, 0xbc, 0x62, 0x48, 0x0a, 0x90, 0xaa, 0x41, 0x2e, 0x03, 0x16, + 0x9b, 0x63, 0x8c, 0xa6, 0x78, 0xad, 0x90, 0x31, 0xb1, 0x5f, 0x16, 0x5f, 0xc9, 0xeb, 0x80, 0x7d, + 0xb2, 0x99, 0xe7, 0x67, 0xbd, 0x25, 0xde, 0x80, 0x97, 0xe8, 0x03, 0x9b, 0x6f, 0x60, 0xb2, 0xd8, + 0x80, 0x8c, 0x5f, 0x4f, 0x7b, 0xc3, 0x5a, 0x3e, 0x27, 0x0d, 0xe7, 0x04, 0x58, 0xe5, 0x4f, 0x8a, + 0x01, 0xf8, 0x78, 0x16, 0x70, 0x3c, 0xcc, 0x88, 0x0d, 0xa8, 0x78, 0x11, 0xe6, 0xb6, 0x1c, 0x9b, + 0xbd, 0xc7, 0xb8, 0x4e, 0x39, 0x50, 0x5a, 0x05, 0x05, 0xcf, 0xbf, 0x4a, 0xab, 0x78, 0x09, 0x40, + 0x68, 0xd3, 0x40, 0xd9, 0x65, 0x6d, 0xe8, 0x0f, 0x94, 0xdd, 0xe2, 0x35, 0x48, 0x6e, 0xb6, 0x0e, + 0x76, 0x5a, 0x5d, 0x72, 0x05, 0xa0, 0xdf, 0x1a, 0xbb, 0xcd, 0x0e, 0xee, 0xc4, 0xe7, 0x9f, 0x7f, + 0xfe, 0xb9, 0x82, 0xc9, 0x74, 0x86, 0xd6, 0xb2, 0x1d, 0x19, 0x03, 0x34, 0xfa, 0xed, 0x4d, 0x6b, + 0x3c, 0x6e, 0x75, 0x2d, 0xb2, 0x0a, 0x49, 0xdb, 0x1a, 0xd3, 0xe8, 0xab, 0xe0, 0x5d, 0xd3, 0x45, + 0x71, 0x1d, 0x02, 0xdc, 0xd2, 0x16, 0x82, 0x4c, 0x0e, 0x26, 0x1a, 0xc4, 0xec, 0xc9, 0x00, 0x6f, + 0xd4, 0x12, 0x26, 0x7d, 0x5c, 0x7c, 0x1e, 0x92, 0x0c, 0x43, 0x08, 0xc4, 0xed, 0xd6, 0xc0, 0x2a, + 0xb0, 0x9e, 0xf1, 0xb9, 0xf8, 0x55, 0x05, 0x60, 0xcb, 0x7a, 0x7a, 0xac, 0x5e, 0x03, 0x9c, 0xa4, + 0xd7, 0x18, 0xeb, 0xf5, 0x55, 0x59, 0xaf, 0x54, 0x6d, 0x1d, 0xc7, 0x69, 0x37, 0xd9, 0x46, 0xb3, + 0xeb, 0xbf, 0x0c, 0xad, 0xc1, 0x9d, 0x2b, 0x3e, 0x86, 0xdc, 0x86, 0x6d, 0x5b, 0x23, 0x6f, 0x54, + 0x04, 0xe2, 0xfb, 0xce, 0xd8, 0xe5, 0x37, 0x91, 0xf8, 0x4c, 0x0a, 0x10, 0x1f, 0x3a, 0x23, 0x97, + 0xcd, 0xb4, 0x16, 0x37, 0x96, 0x97, 0x97, 0x4d, 0xac, 0x21, 0xcf, 0x43, 0x66, 0xcf, 0xb1, 0x6d, + 0x6b, 0x8f, 0x4e, 0x23, 0x86, 0x47, 0xc7, 0xa0, 0xa2, 0xf8, 0xcb, 0x0a, 0xe4, 0x1a, 0xee, 0x7e, + 0x40, 0xae, 0x41, 0xec, 0x89, 0x75, 0x88, 0xc3, 0x8b, 0x99, 0xf4, 0x91, 0xbe, 0x30, 0x5f, 0x69, + 0xf5, 0x27, 0xec, 0x5e, 0x32, 0x67, 0xb2, 0x02, 0x39, 0x0b, 0xc9, 0xa7, 0x56, 0xaf, 0xbb, 0xef, + 0x22, 0xa7, 0x6a, 0xf2, 0x12, 0x59, 0x82, 0x44, 0x8f, 0x0e, 0xb6, 0x10, 0xc7, 0x15, 0x2b, 0x88, + 0x2b, 0x26, 0xce, 0xc2, 0x64, 0xb0, 0xab, 0xe9, 0x74, 0x5b, 0x7b, 0xff, 0xfd, 0xf7, 0xdf, 0x57, + 0x8b, 0xfb, 0x70, 0xc6, 0x7b, 0x89, 0x43, 0xd3, 0x7d, 0x04, 0x85, 0xbe, 0xe5, 0x34, 0x3b, 0x3d, + 0xbb, 0xd5, 0xef, 0x1f, 0x36, 0x9f, 0x3a, 0x76, 0xb3, 0x65, 0x37, 0x9d, 0xf1, 0x5e, 0x6b, 0x84, + 0x4b, 0x20, 0xeb, 0xe4, 0x4c, 0xdf, 0x72, 0xea, 0xcc, 0xf0, 0x5d, 0xc7, 0xbe, 0x63, 0x37, 0xa8, + 0x55, 0xf1, 0xb3, 0x38, 0x64, 0x36, 0x0f, 0x3d, 0xfe, 0x33, 0x90, 0xd8, 0x73, 0x26, 0x36, 0x5b, + 0xcf, 0x84, 0xc9, 0x0a, 0xfe, 0x3e, 0xa9, 0xc2, 0x3e, 0x9d, 0x81, 0xc4, 0x7b, 0x13, 0xc7, 0xb5, + 0x70, 0xca, 0x19, 0x93, 0x15, 0xe8, 0x8a, 0x0d, 0x2d, 0xb7, 0x10, 0xc7, 0x6b, 0x0a, 0xfa, 0x18, + 0xac, 0x41, 0xe2, 0x58, 0x6b, 0x40, 0x96, 0x21, 0xe9, 0xd0, 0x3d, 0x18, 0x17, 0x92, 0x78, 0x0f, + 0x1b, 0x32, 0x10, 0x77, 0xc7, 0xe4, 0x38, 0xf2, 0x00, 0x16, 0x9e, 0x5a, 0xcd, 0xc1, 0x64, 0xec, + 0x36, 0xbb, 0x4e, 0xb3, 0x6d, 0x59, 0x43, 0x6b, 0x54, 0x98, 0xc3, 0xde, 0x42, 0x1e, 0x62, 0xd6, + 0x82, 0x9a, 0xf3, 0x4f, 0xad, 0xcd, 0xc9, 0xd8, 0x5d, 0x77, 0xee, 0xa3, 0x1d, 0x59, 0x85, 0xcc, + 0xc8, 0xa2, 0x7e, 0x81, 0x0e, 0x39, 0x37, 0x3d, 0x82, 0x90, 0x71, 0x7a, 0x64, 0x0d, 0xb1, 0x82, + 0xdc, 0x84, 0xf4, 0x6e, 0xef, 0x89, 0x35, 0xde, 0xb7, 0xda, 0x85, 0x94, 0xae, 0x94, 0xe6, 0xcb, + 0x17, 0x44, 0x2b, 0x7f, 0x81, 0x97, 0xee, 0x39, 0x7d, 0x67, 0x64, 0xfa, 0x60, 0xf2, 0x1a, 0x64, + 0xc6, 0xce, 0xc0, 0x62, 0x6a, 0x4f, 0x63, 0xb0, 0xbd, 0x3c, 0xdb, 0x72, 0xdb, 0x19, 0x58, 0x9e, + 0x57, 0xf3, 0x2c, 0xc8, 0x05, 0x36, 0xdc, 0x5d, 0x7a, 0x98, 0x28, 0x00, 0x5e, 0xf8, 0xd0, 0x41, + 0xe1, 0xe1, 0x82, 0x2c, 0xd2, 0x41, 0x75, 0x3b, 0x34, 0x67, 0x2b, 0x64, 0xf1, 0x2c, 0xef, 0x97, + 0x17, 0x5f, 0x81, 0x8c, 0x4f, 0x18, 0xb8, 0x43, 0xe6, 0x82, 0x32, 0xe8, 0x21, 0x98, 0x3b, 0x64, + 0xfe, 0xe7, 0x45, 0x48, 0xe0, 0xc0, 0x69, 0xe4, 0x32, 0xd7, 0x68, 0xa0, 0xcc, 0x40, 0x62, 0xdd, + 0x5c, 0x5b, 0xdb, 0xd2, 0x14, 0x8c, 0x99, 0x0f, 0xdf, 0x5e, 0xd3, 0x54, 0x41, 0xbf, 0xbf, 0xad, + 0x42, 0x6c, 0xed, 0x00, 0x95, 0xd3, 0x6e, 0xb9, 0x2d, 0xef, 0x0d, 0xa7, 0xcf, 0xa4, 0x06, 0x99, + 0x41, 0xcb, 0xeb, 0x4b, 0xc5, 0x25, 0x0e, 0xf9, 0x92, 0xb5, 0x03, 0x77, 0x69, 0xb3, 0xc5, 0x7a, + 0x5e, 0xb3, 0xdd, 0xd1, 0xa1, 0x99, 0x1e, 0xf0, 0xe2, 0xe2, 0xab, 0x30, 0x17, 0x6a, 0x12, 0x5f, + 0xd1, 0xc4, 0x8c, 0x57, 0x34, 0xc1, 0x5f, 0xd1, 0x9a, 0x7a, 0x4b, 0x29, 0xd7, 0x20, 0x3e, 0x70, + 0x46, 0x16, 0x79, 0x6e, 0xe6, 0x02, 0x17, 0xba, 0x28, 0x99, 0x7c, 0x64, 0x28, 0x26, 0xda, 0x94, + 0x5f, 0x86, 0xb8, 0x6b, 0x1d, 0xb8, 0xcf, 0xb2, 0xdd, 0x67, 0xf3, 0xa3, 0x90, 0xf2, 0x75, 0x48, + 0xda, 0x93, 0xc1, 0xae, 0x35, 0x7a, 0x16, 0xb8, 0x87, 0x03, 0xe3, 0xa0, 0xe2, 0x3b, 0xa0, 0xdd, + 0x73, 0x06, 0xc3, 0xbe, 0x75, 0xb0, 0x76, 0xe0, 0x5a, 0xf6, 0xb8, 0xe7, 0xd8, 0x74, 0x0e, 0x9d, + 0xde, 0x08, 0xdd, 0x1a, 0xce, 0x01, 0x0b, 0xd4, 0xcd, 0x8c, 0xad, 0x3d, 0xc7, 0x6e, 0xf3, 0xa9, + 0xf1, 0x12, 0x45, 0xbb, 0xfb, 0xbd, 0x11, 0xf5, 0x68, 0x34, 0xf8, 0xb0, 0x42, 0x71, 0x1d, 0xf2, + 0xfc, 0x18, 0x36, 0xe6, 0x1d, 0x17, 0xaf, 0x42, 0xce, 0xab, 0xc2, 0x5f, 0x7e, 0xd2, 0x10, 0x7f, + 0xbc, 0x66, 0x36, 0xb4, 0x53, 0x74, 0x5f, 0x1b, 0x5b, 0x6b, 0x9a, 0x42, 0x1f, 0x76, 0xde, 0x6d, + 0x84, 0xf6, 0xf2, 0x79, 0xc8, 0xf9, 0x63, 0xdf, 0xb6, 0x5c, 0x6c, 0xa1, 0x51, 0x2a, 0x55, 0x53, + 0xd3, 0x4a, 0x31, 0x05, 0x89, 0xb5, 0xc1, 0xd0, 0x3d, 0x2c, 0xfe, 0x12, 0x64, 0x39, 0xe8, 0x61, + 0x6f, 0xec, 0x92, 0xdb, 0x90, 0x1a, 0xf0, 0xf9, 0x2a, 0x98, 0x8b, 0x86, 0x65, 0x1d, 0x20, 0xbd, + 0x67, 0xd3, 0xc3, 0x2f, 0x56, 0x20, 0x25, 0xb8, 0x77, 0xee, 0x79, 0x54, 0xd1, 0xf3, 0x30, 0x1f, + 0x15, 0x13, 0x7c, 0x54, 0x71, 0x13, 0x52, 0x2c, 0x30, 0x8f, 0x31, 0xdd, 0x60, 0xe7, 0x77, 0xa6, + 0x31, 0x26, 0xbe, 0x2c, 0xab, 0x63, 0x39, 0xd4, 0x65, 0xc8, 0xe2, 0x3b, 0xe3, 0xab, 0x90, 0x7a, + 0x73, 0xc0, 0x2a, 0xa6, 0xf8, 0x3f, 0x4a, 0x40, 0xda, 0x5b, 0x2b, 0x72, 0x01, 0x92, 0xec, 0x10, + 0x8b, 0x54, 0xde, 0xa5, 0x4e, 0x02, 0x8f, 0xad, 0xe4, 0x02, 0xa4, 0xf8, 0x41, 0x95, 0x07, 0x1c, + 0xb5, 0x52, 0x36, 0x93, 0xec, 0x60, 0xea, 0x37, 0x56, 0x0d, 0xf4, 0x93, 0xec, 0xba, 0x26, 0xc9, + 0x8e, 0x9e, 0x44, 0x87, 0x8c, 0x7f, 0xd8, 0xc4, 0x10, 0xc1, 0xef, 0x66, 0xd2, 0xde, 0xe9, 0x52, + 0x40, 0x54, 0x0d, 0x74, 0xa0, 0xfc, 0x22, 0x26, 0x5d, 0x0f, 0xf2, 0xa6, 0xb4, 0x77, 0x64, 0xc4, + 0x5f, 0x9e, 0xbc, 0x5b, 0x97, 0x14, 0x3f, 0x24, 0x06, 0x80, 0xaa, 0x81, 0x9e, 0xc9, 0xbb, 0x62, + 0x49, 0xf1, 0x83, 0x20, 0xb9, 0x4c, 0x87, 0x88, 0x07, 0x3b, 0xf4, 0x3f, 0xc1, 0x7d, 0x4a, 0x92, + 0x1d, 0xf7, 0xc8, 0x15, 0xca, 0xc0, 0x4e, 0x6f, 0xe8, 0x1a, 0x82, 0xcb, 0x93, 0x14, 0x3f, 0xd4, + 0x91, 0x6b, 0x14, 0xc2, 0x96, 0xbf, 0x00, 0xcf, 0xb8, 0x29, 0x49, 0xf1, 0x9b, 0x12, 0xa2, 0xd3, + 0x0e, 0xd1, 0x43, 0xa1, 0x57, 0x12, 0x6e, 0x45, 0x92, 0xec, 0x56, 0x84, 0x5c, 0x42, 0x3a, 0x36, + 0xa9, 0x5c, 0x70, 0x03, 0x92, 0xe2, 0xa7, 0xc0, 0xa0, 0x1d, 0x73, 0x49, 0xff, 0xb6, 0x23, 0xc5, + 0xcf, 0x79, 0xe4, 0x16, 0xdd, 0x2f, 0xaa, 0xf0, 0xc2, 0x3c, 0xfa, 0xe2, 0x45, 0x51, 0x7a, 0xde, + 0xae, 0x32, 0x57, 0x5c, 0x63, 0x6e, 0xcc, 0x4c, 0xd4, 0xf1, 0x8d, 0x58, 0xa4, 0x96, 0x8f, 0x7a, + 0x76, 0xa7, 0x90, 0xc7, 0xb5, 0x88, 0xf5, 0xec, 0x8e, 0x99, 0xa8, 0xd3, 0x1a, 0xa6, 0x82, 0x2d, + 0xda, 0xa6, 0x61, 0x5b, 0xfc, 0x3a, 0x6b, 0xa4, 0x55, 0xa4, 0x00, 0x89, 0x7a, 0x73, 0xab, 0x65, + 0x17, 0x16, 0x98, 0x9d, 0xdd, 0xb2, 0xcd, 0x78, 0x7d, 0xab, 0x65, 0x93, 0x97, 0x21, 0x36, 0x9e, + 0xec, 0x16, 0xc8, 0xf4, 0xcf, 0x82, 0xdb, 0x93, 0x5d, 0x6f, 0x30, 0x26, 0xc5, 0x90, 0x0b, 0x90, + 0x1e, 0xbb, 0xa3, 0xe6, 0x2f, 0x58, 0x23, 0xa7, 0x70, 0x1a, 0x97, 0xf1, 0x94, 0x99, 0x1a, 0xbb, + 0xa3, 0xc7, 0xd6, 0xc8, 0x39, 0xa6, 0x0f, 0x2e, 0x5e, 0x82, 0xac, 0xc0, 0x4b, 0xf2, 0xa0, 0xd8, + 0x2c, 0x81, 0xa9, 0x29, 0x37, 0x4d, 0xc5, 0x2e, 0xbe, 0x03, 0x39, 0xef, 0x88, 0x85, 0x33, 0x36, + 0xe8, 0xdb, 0xd4, 0x77, 0x46, 0xf8, 0x96, 0xce, 0x97, 0x2f, 0x85, 0x23, 0x66, 0x00, 0xe4, 0x91, + 0x8b, 0x81, 0x8b, 0x5a, 0x64, 0x30, 0x4a, 0xf1, 0x07, 0x0a, 0xe4, 0x36, 0x9d, 0x51, 0xf0, 0xfb, + 0xc5, 0x19, 0x48, 0xec, 0x3a, 0x4e, 0x7f, 0x8c, 0xc4, 0x69, 0x93, 0x15, 0xc8, 0x8b, 0x90, 0xc3, + 0x07, 0xef, 0x90, 0xac, 0xfa, 0xb7, 0x40, 0x59, 0xac, 0xe7, 0xe7, 0x62, 0x02, 0xf1, 0x9e, 0xed, + 0x8e, 0xb9, 0x47, 0xc3, 0x67, 0xf2, 0x05, 0xc8, 0xd2, 0xbf, 0x9e, 0x65, 0xdc, 0xcf, 0xa6, 0x81, + 0x56, 0x73, 0xc3, 0x97, 0x60, 0x0e, 0x35, 0xe0, 0xc3, 0x52, 0xfe, 0x8d, 0x4f, 0x8e, 0x35, 0x70, + 0x60, 0x01, 0x52, 0xcc, 0x21, 0x8c, 0xf1, 0x07, 0xdf, 0x8c, 0xe9, 0x15, 0xa9, 0x9b, 0xc5, 0x83, + 0x0a, 0xcb, 0x40, 0x52, 0x26, 0x2f, 0x15, 0xef, 0x41, 0x1a, 0xc3, 0x65, 0xa3, 0xdf, 0x26, 0x2f, + 0x80, 0xd2, 0x2d, 0x58, 0x18, 0xae, 0xcf, 0x86, 0x4e, 0x21, 0x1c, 0xb0, 0xb4, 0x6e, 0x2a, 0xdd, + 0xc5, 0x05, 0x50, 0xd6, 0xe9, 0xb1, 0xe0, 0x80, 0x3b, 0x6c, 0xe5, 0xa0, 0xf8, 0x16, 0x27, 0xd9, + 0xb2, 0x9e, 0xca, 0x49, 0xb6, 0xac, 0xa7, 0x8c, 0xe4, 0xf2, 0x14, 0x09, 0x2d, 0x1d, 0xf2, 0xdf, + 0xc0, 0x95, 0xc3, 0x62, 0x05, 0xe6, 0xf0, 0x45, 0xed, 0xd9, 0xdd, 0x47, 0x4e, 0xcf, 0xc6, 0x83, + 0x48, 0x07, 0x13, 0x38, 0xc5, 0x54, 0x3a, 0x74, 0x1f, 0xac, 0x83, 0xd6, 0x1e, 0x4b, 0x87, 0xd3, + 0x26, 0x2b, 0x14, 0xbf, 0x1f, 0x87, 0x79, 0xee, 0x64, 0xdf, 0xed, 0xb9, 0xfb, 0x9b, 0xad, 0x21, + 0xd9, 0x82, 0x1c, 0xf5, 0xaf, 0xcd, 0x41, 0x6b, 0x38, 0xa4, 0x2f, 0xb2, 0x82, 0xa1, 0xf9, 0xda, + 0x0c, 0xb7, 0xcd, 0x2d, 0x96, 0xb6, 0x5a, 0x03, 0x6b, 0x93, 0xa1, 0x59, 0xa0, 0xce, 0xda, 0x41, + 0x0d, 0x79, 0x00, 0xd9, 0xc1, 0xb8, 0xeb, 0xd3, 0xb1, 0x48, 0x7f, 0x55, 0x42, 0xb7, 0x39, 0xee, + 0x86, 0xd8, 0x60, 0xe0, 0x57, 0xd0, 0xc1, 0x51, 0xef, 0xec, 0xb3, 0xc5, 0x8e, 0x1c, 0x1c, 0x75, + 0x25, 0xe1, 0xc1, 0xed, 0x06, 0x35, 0xa4, 0x0e, 0x40, 0x5f, 0x35, 0xd7, 0xa1, 0x27, 0x3c, 0xd4, + 0x52, 0xb6, 0x5c, 0x92, 0xb0, 0x6d, 0xbb, 0xa3, 0x1d, 0x67, 0xdb, 0x1d, 0xf1, 0x84, 0x64, 0xcc, + 0x8b, 0x8b, 0xaf, 0x83, 0x16, 0x5d, 0x85, 0xa3, 0x72, 0x92, 0x8c, 0x90, 0x93, 0x2c, 0xfe, 0x1c, + 0xe4, 0x23, 0xd3, 0x16, 0xcd, 0x09, 0x33, 0xbf, 0x21, 0x9a, 0x67, 0xcb, 0xe7, 0x43, 0xdf, 0x68, + 0x88, 0x5b, 0x2f, 0x32, 0xbf, 0x0e, 0x5a, 0x74, 0x09, 0x44, 0xea, 0xb4, 0xe4, 0x40, 0x83, 0xf6, + 0xaf, 0xc2, 0x5c, 0x68, 0xd2, 0xa2, 0x71, 0xe6, 0x88, 0x69, 0x15, 0x7f, 0x25, 0x01, 0x89, 0x86, + 0x6d, 0x39, 0x1d, 0x72, 0x2e, 0x1c, 0x3b, 0xdf, 0x3c, 0xe5, 0xc5, 0xcd, 0xf3, 0x91, 0xb8, 0xf9, + 0xe6, 0x29, 0x3f, 0x6a, 0x9e, 0x8f, 0x44, 0x4d, 0xaf, 0xa9, 0x6a, 0x90, 0x8b, 0x53, 0x31, 0xf3, + 0xcd, 0x53, 0x42, 0xc0, 0xbc, 0x38, 0x15, 0x30, 0x83, 0xe6, 0xaa, 0x41, 0x1d, 0x6c, 0x38, 0x5a, + 0xbe, 0x79, 0x2a, 0x88, 0x94, 0x17, 0xa2, 0x91, 0xd2, 0x6f, 0xac, 0x1a, 0x6c, 0x48, 0x42, 0x94, + 0xc4, 0x21, 0xb1, 0xf8, 0x78, 0x21, 0x1a, 0x1f, 0xd1, 0x8e, 0x47, 0xc6, 0x0b, 0xd1, 0xc8, 0x88, + 0x8d, 0x3c, 0x12, 0x9e, 0x8f, 0x44, 0x42, 0x24, 0x65, 0x21, 0xf0, 0x42, 0x34, 0x04, 0x32, 0x3b, + 0x61, 0xa4, 0x62, 0xfc, 0xf3, 0x1b, 0xab, 0x06, 0x31, 0x22, 0xc1, 0x4f, 0x76, 0x10, 0xc1, 0xdd, + 0xc0, 0x30, 0x50, 0xa5, 0x0b, 0xe7, 0x25, 0xa8, 0x79, 0xe9, 0x27, 0x2c, 0xb8, 0xa2, 0x5e, 0x82, + 0x66, 0x40, 0xaa, 0xc3, 0xcf, 0xea, 0x1a, 0x7a, 0xb2, 0x90, 0x38, 0x51, 0x02, 0x4b, 0xf5, 0x26, + 0x7a, 0x34, 0x3a, 0xbb, 0x0e, 0x3b, 0x70, 0x94, 0x60, 0xae, 0xde, 0x7c, 0xd8, 0x1a, 0x75, 0x29, + 0x74, 0xa7, 0xd5, 0xf5, 0x6f, 0x3d, 0xa8, 0x0a, 0xb2, 0x75, 0xde, 0xb2, 0xd3, 0xea, 0x92, 0xb3, + 0x9e, 0xc4, 0xda, 0xd8, 0xaa, 0x70, 0x91, 0x2d, 0x9e, 0xa3, 0x4b, 0xc7, 0xc8, 0xd0, 0x37, 0x2e, + 0x70, 0xdf, 0x78, 0x37, 0x05, 0x89, 0x89, 0xdd, 0x73, 0xec, 0xbb, 0x19, 0x48, 0xb9, 0xce, 0x68, + 0xd0, 0x72, 0x9d, 0xe2, 0x0f, 0x15, 0x80, 0x7b, 0xce, 0x60, 0x30, 0xb1, 0x7b, 0xef, 0x4d, 0x2c, + 0x72, 0x09, 0xb2, 0x83, 0xd6, 0x13, 0xab, 0x39, 0xb0, 0x9a, 0x7b, 0x23, 0xef, 0x6d, 0xc8, 0xd0, + 0xaa, 0x4d, 0xeb, 0xde, 0xe8, 0x90, 0x14, 0xbc, 0x04, 0x1e, 0x15, 0x84, 0xc2, 0xe4, 0x09, 0xfd, + 0x19, 0x9e, 0x8e, 0x26, 0xf9, 0x4e, 0x7a, 0x09, 0x29, 0x3b, 0xe4, 0xa4, 0xf8, 0x1e, 0xb2, 0x63, + 0xce, 0x39, 0x48, 0xba, 0xd6, 0x60, 0xd8, 0xdc, 0x43, 0xc1, 0x50, 0x51, 0x24, 0x68, 0xf9, 0x1e, + 0xb9, 0x01, 0xb1, 0x3d, 0xa7, 0x8f, 0x52, 0x39, 0x72, 0x77, 0x28, 0x92, 0xbc, 0x04, 0xb1, 0xc1, + 0x98, 0xc9, 0x27, 0x5b, 0x3e, 0x1d, 0xca, 0x20, 0x58, 0xc8, 0xa2, 0xc0, 0xc1, 0xb8, 0xeb, 0xcf, + 0xbd, 0xf8, 0xa9, 0x0a, 0x69, 0xba, 0x5f, 0x6f, 0xef, 0xd4, 0x6f, 0xe1, 0xb1, 0x61, 0xaf, 0xd5, + 0xc7, 0x1b, 0x02, 0xfa, 0x9a, 0xf2, 0x12, 0xad, 0xff, 0x8a, 0xb5, 0xe7, 0x3a, 0x23, 0x74, 0xcd, + 0x19, 0x93, 0x97, 0xe8, 0x92, 0xb3, 0xac, 0x38, 0xc6, 0x67, 0xc9, 0x8a, 0x98, 0xd1, 0xb7, 0x86, + 0x4d, 0xea, 0x03, 0x98, 0xbf, 0x0c, 0x9d, 0xae, 0xbd, 0xee, 0xe8, 0xd1, 0xed, 0x81, 0x75, 0xc8, + 0xfc, 0x64, 0x72, 0x80, 0x05, 0xf2, 0xb3, 0xec, 0xc8, 0xc7, 0x76, 0x92, 0x7d, 0x5f, 0x55, 0x7c, + 0x96, 0xf1, 0x3b, 0x14, 0x14, 0x9c, 0xfb, 0xb0, 0xb8, 0x78, 0x1b, 0xb2, 0x02, 0xef, 0x51, 0xae, + 0x28, 0x16, 0xf1, 0x63, 0x21, 0xd6, 0xa3, 0x6e, 0x75, 0x44, 0x3f, 0x46, 0x57, 0xd4, 0xa1, 0x1a, + 0xbe, 0x9a, 0x87, 0x58, 0xbd, 0xd1, 0xa0, 0x79, 0x56, 0xbd, 0xd1, 0x58, 0xd1, 0x94, 0xda, 0x0a, + 0xa4, 0xbb, 0x23, 0xcb, 0xa2, 0xae, 0xf7, 0x59, 0xe7, 0xbc, 0x2f, 0xe3, 0xb2, 0xfa, 0xb0, 0xda, + 0x5b, 0x90, 0xda, 0x63, 0x27, 0x3d, 0xf2, 0xcc, 0x5b, 0x8d, 0xc2, 0x1f, 0xb3, 0xdb, 0xb5, 0xe7, + 0x45, 0x40, 0xf4, 0x7c, 0x68, 0x7a, 0x3c, 0xb5, 0x1d, 0xc8, 0x8c, 0x9a, 0x47, 0x93, 0x7e, 0xc0, + 0x62, 0xb9, 0x9c, 0x34, 0x3d, 0xe2, 0x55, 0xb5, 0x75, 0x58, 0xb0, 0x1d, 0xef, 0x47, 0xbe, 0x66, + 0x9b, 0x7b, 0xb2, 0x59, 0x49, 0xb4, 0xd7, 0x81, 0xc5, 0x3e, 0x15, 0xb0, 0x1d, 0xde, 0xc0, 0xbc, + 0x5f, 0x6d, 0x0d, 0x34, 0x81, 0xa8, 0xc3, 0xdc, 0xa5, 0x8c, 0xa7, 0xc3, 0xbe, 0x4e, 0xf0, 0x79, + 0xd0, 0xc3, 0x46, 0x68, 0xb8, 0x0f, 0x94, 0xd1, 0x74, 0xd9, 0xc7, 0x1e, 0x3e, 0x0d, 0x86, 0x95, + 0x69, 0x1a, 0x1a, 0x11, 0x64, 0x34, 0xfb, 0xec, 0x4b, 0x10, 0x91, 0xa6, 0x6a, 0x44, 0x56, 0x67, + 0x72, 0x8c, 0xe1, 0xf4, 0xd8, 0xa7, 0x1c, 0x3e, 0x0f, 0x0b, 0x38, 0x33, 0x88, 0x8e, 0x1a, 0xd0, + 0x97, 0xd9, 0x77, 0x1e, 0x21, 0xa2, 0xa9, 0x11, 0x8d, 0x8f, 0x31, 0xa2, 0x27, 0xec, 0xb3, 0x0a, + 0x9f, 0x68, 0x7b, 0xd6, 0x88, 0xc6, 0xc7, 0x18, 0x51, 0x9f, 0x7d, 0x72, 0x11, 0x22, 0xaa, 0x1a, + 0xb5, 0x0d, 0x20, 0xe2, 0xc6, 0xf3, 0xe8, 0x2c, 0x65, 0x1a, 0xb0, 0x4f, 0x69, 0x82, 0xad, 0x67, + 0x46, 0xb3, 0xa8, 0x8e, 0x1a, 0x94, 0xcd, 0xbe, 0xb3, 0x09, 0x53, 0x55, 0x8d, 0xda, 0x03, 0x38, + 0x2d, 0x4e, 0xef, 0x58, 0xc3, 0x72, 0xd8, 0x47, 0x22, 0xc1, 0x04, 0xb9, 0xd5, 0x4c, 0xb2, 0xa3, + 0x06, 0x36, 0x64, 0x1f, 0x90, 0x44, 0xc8, 0xaa, 0x46, 0xed, 0x1e, 0xe4, 0x05, 0xb2, 0x5d, 0xbc, + 0x57, 0x90, 0x11, 0xbd, 0xc7, 0x3e, 0x7b, 0xf2, 0x89, 0x68, 0x46, 0x15, 0xdd, 0x3d, 0x96, 0x63, + 0x48, 0x69, 0x46, 0xec, 0xab, 0x9d, 0x60, 0x3c, 0x68, 0x13, 0x79, 0x51, 0x76, 0x59, 0x42, 0x22, + 0xe3, 0x19, 0xb3, 0x2f, 0x7a, 0x82, 0xe1, 0x50, 0x93, 0xda, 0x20, 0x34, 0x29, 0x8b, 0xa6, 0x19, + 0x52, 0x16, 0x17, 0x23, 0x62, 0x49, 0x02, 0x59, 0x12, 0xaf, 0xaf, 0x84, 0xe9, 0xd3, 0x62, 0xed, + 0x01, 0xcc, 0x9f, 0xc4, 0x65, 0x7d, 0xa0, 0xb0, 0xbb, 0x8c, 0xca, 0xd2, 0x8a, 0xb1, 0xb2, 0x6a, + 0xce, 0xb5, 0x43, 0x9e, 0x6b, 0x1d, 0xe6, 0x4e, 0xe0, 0xb6, 0x3e, 0x54, 0xd8, 0x8d, 0x00, 0xe5, + 0x32, 0x73, 0xed, 0xb0, 0xef, 0x9a, 0x3b, 0x81, 0xe3, 0xfa, 0x48, 0x61, 0x57, 0x48, 0x46, 0xd9, + 0xa7, 0xf1, 0x7c, 0xd7, 0xdc, 0x09, 0x1c, 0xd7, 0xc7, 0xec, 0xc4, 0xaf, 0x1a, 0x15, 0x91, 0x06, + 0x3d, 0xc5, 0xfc, 0x49, 0x1c, 0xd7, 0x27, 0x0a, 0x5e, 0x29, 0xa9, 0x86, 0xe1, 0xaf, 0x8f, 0xef, + 0xbb, 0xe6, 0x4f, 0xe2, 0xb8, 0xbe, 0xa6, 0xe0, 0xd5, 0x93, 0x6a, 0xac, 0x86, 0x88, 0xc2, 0x23, + 0x3a, 0x8e, 0xe3, 0xfa, 0x54, 0xc1, 0xfb, 0x20, 0xd5, 0xa8, 0xfa, 0x44, 0xdb, 0x53, 0x23, 0x3a, + 0x8e, 0xe3, 0xfa, 0x3a, 0x9e, 0xaf, 0x6a, 0xaa, 0x71, 0x33, 0x44, 0x84, 0xbe, 0x2b, 0x7f, 0x22, + 0xc7, 0xf5, 0x0d, 0x05, 0xaf, 0xee, 0x54, 0xe3, 0x96, 0xe9, 0x8d, 0x20, 0xf0, 0x5d, 0xf9, 0x13, + 0x39, 0xae, 0x6f, 0x2a, 0x78, 0xc7, 0xa7, 0x1a, 0xb7, 0xc3, 0x54, 0xe8, 0xbb, 0xb4, 0x93, 0x39, + 0xae, 0xcf, 0x14, 0xfc, 0xa2, 0x47, 0x5d, 0x5d, 0x36, 0xbd, 0x41, 0x08, 0xbe, 0x4b, 0x3b, 0x99, + 0xe3, 0xfa, 0x96, 0x82, 0x9f, 0xf9, 0xa8, 0xab, 0x2b, 0x11, 0xb2, 0xaa, 0x51, 0x5b, 0x83, 0xdc, + 0xf1, 0x1d, 0xd7, 0xb7, 0xc5, 0x1b, 0xd4, 0x6c, 0x5b, 0xf0, 0x5e, 0x8f, 0x85, 0xfd, 0x3b, 0x86, + 0xeb, 0xfa, 0x0e, 0x26, 0x7f, 0xb5, 0xe7, 0xde, 0x64, 0xf7, 0x8c, 0xcc, 0xe4, 0x95, 0xb6, 0xd5, + 0x79, 0xad, 0xe3, 0x38, 0xc1, 0x96, 0x32, 0x87, 0xd6, 0x08, 0xde, 0x9e, 0x63, 0x78, 0xb3, 0xef, + 0x2a, 0x78, 0x2d, 0x99, 0xe3, 0xd4, 0x68, 0xe1, 0xbf, 0x47, 0xcc, 0xb5, 0xd9, 0xc1, 0x9c, 0x8f, + 0xf6, 0x6b, 0xdf, 0x53, 0x4e, 0xe6, 0xd8, 0x6a, 0xb1, 0xc6, 0xd6, 0x9a, 0xbf, 0x38, 0x58, 0xf3, + 0x06, 0xc4, 0x0f, 0xca, 0xcb, 0x2b, 0xe1, 0x14, 0x4f, 0xbc, 0x95, 0x67, 0xee, 0x2c, 0x5b, 0x5e, + 0x08, 0xfd, 0x7c, 0x31, 0x18, 0xba, 0x87, 0x26, 0x5a, 0x72, 0x86, 0xb2, 0x84, 0xe1, 0x43, 0x29, + 0x43, 0x99, 0x33, 0x54, 0x24, 0x0c, 0x1f, 0x49, 0x19, 0x2a, 0x9c, 0xc1, 0x90, 0x30, 0x7c, 0x2c, + 0x65, 0x30, 0x38, 0xc3, 0xaa, 0x84, 0xe1, 0x13, 0x29, 0xc3, 0x2a, 0x67, 0xa8, 0x4a, 0x18, 0xbe, + 0x26, 0x65, 0xa8, 0x72, 0x86, 0x9b, 0x12, 0x86, 0x4f, 0xa5, 0x0c, 0x37, 0x39, 0xc3, 0x2d, 0x09, + 0xc3, 0xd7, 0xa5, 0x0c, 0xb7, 0x38, 0xc3, 0x6d, 0x09, 0xc3, 0x37, 0xa4, 0x0c, 0xb7, 0x19, 0xc3, + 0xca, 0xb2, 0x84, 0xe1, 0x9b, 0x32, 0x86, 0x95, 0x65, 0xce, 0x20, 0xd3, 0xe4, 0x67, 0x52, 0x06, + 0xae, 0xc9, 0x15, 0x99, 0x26, 0xbf, 0x25, 0x65, 0xe0, 0x9a, 0x5c, 0x91, 0x69, 0xf2, 0xdb, 0x52, + 0x06, 0xae, 0xc9, 0x15, 0x99, 0x26, 0xbf, 0x23, 0x65, 0xe0, 0x9a, 0x5c, 0x91, 0x69, 0xf2, 0xbb, + 0x52, 0x06, 0xae, 0xc9, 0x15, 0x99, 0x26, 0xbf, 0x27, 0x65, 0xe0, 0x9a, 0x5c, 0x91, 0x69, 0xf2, + 0x4f, 0xa4, 0x0c, 0x5c, 0x93, 0x2b, 0x32, 0x4d, 0xfe, 0xa9, 0x94, 0x81, 0x6b, 0x72, 0x45, 0xa6, + 0xc9, 0x3f, 0x93, 0x32, 0x70, 0x4d, 0x96, 0x65, 0x9a, 0xfc, 0xbe, 0x8c, 0xa1, 0xcc, 0x35, 0x59, + 0x96, 0x69, 0xf2, 0xcf, 0xa5, 0x0c, 0x5c, 0x93, 0x65, 0x99, 0x26, 0xff, 0x42, 0xca, 0xc0, 0x35, + 0x59, 0x96, 0x69, 0xf2, 0x07, 0x52, 0x06, 0xae, 0xc9, 0xb2, 0x4c, 0x93, 0x7f, 0x29, 0x65, 0xe0, + 0x9a, 0x2c, 0xcb, 0x34, 0xf9, 0x57, 0x52, 0x06, 0xae, 0xc9, 0xb2, 0x4c, 0x93, 0x7f, 0x2d, 0x65, + 0xe0, 0x9a, 0x2c, 0xcb, 0x34, 0xf9, 0x37, 0x52, 0x06, 0xae, 0xc9, 0xb2, 0x4c, 0x93, 0x7f, 0x2b, + 0x65, 0xe0, 0x9a, 0x2c, 0xcb, 0x34, 0xf9, 0x77, 0x52, 0x06, 0xae, 0xc9, 0x8a, 0x4c, 0x93, 0x7f, + 0x2f, 0x63, 0xa8, 0x70, 0x4d, 0x56, 0x64, 0x9a, 0xfc, 0x07, 0x29, 0x03, 0xd7, 0x64, 0x45, 0xa6, + 0xc9, 0x7f, 0x94, 0x32, 0x70, 0x4d, 0x56, 0x64, 0x9a, 0xfc, 0x27, 0x29, 0x03, 0xd7, 0x64, 0x45, + 0xa6, 0xc9, 0x7f, 0x96, 0x32, 0x70, 0x4d, 0x56, 0x64, 0x9a, 0xfc, 0x17, 0x29, 0x03, 0xd7, 0x64, + 0x45, 0xa6, 0xc9, 0x7f, 0x95, 0x32, 0x70, 0x4d, 0x56, 0x64, 0x9a, 0xfc, 0x37, 0x29, 0x03, 0xd7, + 0x64, 0x45, 0xa6, 0xc9, 0x1f, 0x4a, 0x19, 0xb8, 0x26, 0x2b, 0x32, 0x4d, 0xfe, 0xbb, 0x94, 0x81, + 0x6b, 0xd2, 0x90, 0x69, 0xf2, 0x3f, 0x64, 0x0c, 0x06, 0xd7, 0xa4, 0x21, 0xd3, 0xe4, 0x7f, 0x4a, + 0x19, 0xb8, 0x26, 0x0d, 0x99, 0x26, 0xff, 0x4b, 0xca, 0xc0, 0x35, 0x69, 0xc8, 0x34, 0xf9, 0xdf, + 0x52, 0x06, 0xae, 0x49, 0x43, 0xa6, 0xc9, 0xff, 0x91, 0x32, 0x70, 0x4d, 0x1a, 0x32, 0x4d, 0xfe, + 0xaf, 0x94, 0x81, 0x6b, 0xd2, 0x90, 0x69, 0xf2, 0x47, 0x52, 0x06, 0xae, 0x49, 0x43, 0xa6, 0xc9, + 0x1f, 0x4b, 0x19, 0xb8, 0x26, 0x0d, 0x99, 0x26, 0x7f, 0x22, 0x65, 0xe0, 0x9a, 0x34, 0x64, 0x9a, + 0xfc, 0xa9, 0x94, 0x81, 0x6b, 0x72, 0x55, 0xa6, 0xc9, 0xff, 0x93, 0x31, 0xac, 0x2e, 0xdf, 0xbd, + 0xfe, 0xf8, 0x5a, 0xb7, 0xe7, 0xee, 0x4f, 0x76, 0x97, 0xf6, 0x9c, 0xc1, 0x8d, 0xae, 0xd3, 0x6f, + 0xd9, 0xdd, 0x1b, 0x08, 0xdb, 0x9d, 0x74, 0x6e, 0x04, 0xff, 0xcc, 0xce, 0x4c, 0xff, 0x3f, 0x00, + 0x00, 0xff, 0xff, 0x8e, 0xb4, 0x0c, 0xbd, 0xe4, 0x3e, 0x00, 0x00, +} diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/proto/test_proto/test.proto b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/proto/test_proto/test.proto new file mode 100644 index 0000000..f339e05 --- /dev/null +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/proto/test_proto/test.proto @@ -0,0 +1,570 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// A feature-rich test file for the protocol compiler and libraries. + +syntax = "proto2"; + +option go_package = "github.com/golang/protobuf/proto/test_proto"; + +package test_proto; + +enum FOO { FOO1 = 1; }; + +message GoEnum { + required FOO foo = 1; +} + +message GoTestField { + required string Label = 1; + required string Type = 2; +} + +message GoTest { + // An enum, for completeness. + enum KIND { + VOID = 0; + + // Basic types + BOOL = 1; + BYTES = 2; + FINGERPRINT = 3; + FLOAT = 4; + INT = 5; + STRING = 6; + TIME = 7; + + // Groupings + TUPLE = 8; + ARRAY = 9; + MAP = 10; + + // Table types + TABLE = 11; + + // Functions + FUNCTION = 12; // last tag + }; + + // Some typical parameters + required KIND Kind = 1; + optional string Table = 2; + optional int32 Param = 3; + + // Required, repeated and optional foreign fields. + required GoTestField RequiredField = 4; + repeated GoTestField RepeatedField = 5; + optional GoTestField OptionalField = 6; + + // Required fields of all basic types + required bool F_Bool_required = 10; + required int32 F_Int32_required = 11; + required int64 F_Int64_required = 12; + required fixed32 F_Fixed32_required = 13; + required fixed64 F_Fixed64_required = 14; + required uint32 F_Uint32_required = 15; + required uint64 F_Uint64_required = 16; + required float F_Float_required = 17; + required double F_Double_required = 18; + required string F_String_required = 19; + required bytes F_Bytes_required = 101; + required sint32 F_Sint32_required = 102; + required sint64 F_Sint64_required = 103; + required sfixed32 F_Sfixed32_required = 104; + required sfixed64 F_Sfixed64_required = 105; + + // Repeated fields of all basic types + repeated bool F_Bool_repeated = 20; + repeated int32 F_Int32_repeated = 21; + repeated int64 F_Int64_repeated = 22; + repeated fixed32 F_Fixed32_repeated = 23; + repeated fixed64 F_Fixed64_repeated = 24; + repeated uint32 F_Uint32_repeated = 25; + repeated uint64 F_Uint64_repeated = 26; + repeated float F_Float_repeated = 27; + repeated double F_Double_repeated = 28; + repeated string F_String_repeated = 29; + repeated bytes F_Bytes_repeated = 201; + repeated sint32 F_Sint32_repeated = 202; + repeated sint64 F_Sint64_repeated = 203; + repeated sfixed32 F_Sfixed32_repeated = 204; + repeated sfixed64 F_Sfixed64_repeated = 205; + + // Optional fields of all basic types + optional bool F_Bool_optional = 30; + optional int32 F_Int32_optional = 31; + optional int64 F_Int64_optional = 32; + optional fixed32 F_Fixed32_optional = 33; + optional fixed64 F_Fixed64_optional = 34; + optional uint32 F_Uint32_optional = 35; + optional uint64 F_Uint64_optional = 36; + optional float F_Float_optional = 37; + optional double F_Double_optional = 38; + optional string F_String_optional = 39; + optional bytes F_Bytes_optional = 301; + optional sint32 F_Sint32_optional = 302; + optional sint64 F_Sint64_optional = 303; + optional sfixed32 F_Sfixed32_optional = 304; + optional sfixed64 F_Sfixed64_optional = 305; + + // Default-valued fields of all basic types + optional bool F_Bool_defaulted = 40 [default=true]; + optional int32 F_Int32_defaulted = 41 [default=32]; + optional int64 F_Int64_defaulted = 42 [default=64]; + optional fixed32 F_Fixed32_defaulted = 43 [default=320]; + optional fixed64 F_Fixed64_defaulted = 44 [default=640]; + optional uint32 F_Uint32_defaulted = 45 [default=3200]; + optional uint64 F_Uint64_defaulted = 46 [default=6400]; + optional float F_Float_defaulted = 47 [default=314159.]; + optional double F_Double_defaulted = 48 [default=271828.]; + optional string F_String_defaulted = 49 [default="hello, \"world!\"\n"]; + optional bytes F_Bytes_defaulted = 401 [default="Bignose"]; + optional sint32 F_Sint32_defaulted = 402 [default = -32]; + optional sint64 F_Sint64_defaulted = 403 [default = -64]; + optional sfixed32 F_Sfixed32_defaulted = 404 [default = -32]; + optional sfixed64 F_Sfixed64_defaulted = 405 [default = -64]; + + // Packed repeated fields (no string or bytes). + repeated bool F_Bool_repeated_packed = 50 [packed=true]; + repeated int32 F_Int32_repeated_packed = 51 [packed=true]; + repeated int64 F_Int64_repeated_packed = 52 [packed=true]; + repeated fixed32 F_Fixed32_repeated_packed = 53 [packed=true]; + repeated fixed64 F_Fixed64_repeated_packed = 54 [packed=true]; + repeated uint32 F_Uint32_repeated_packed = 55 [packed=true]; + repeated uint64 F_Uint64_repeated_packed = 56 [packed=true]; + repeated float F_Float_repeated_packed = 57 [packed=true]; + repeated double F_Double_repeated_packed = 58 [packed=true]; + repeated sint32 F_Sint32_repeated_packed = 502 [packed=true]; + repeated sint64 F_Sint64_repeated_packed = 503 [packed=true]; + repeated sfixed32 F_Sfixed32_repeated_packed = 504 [packed=true]; + repeated sfixed64 F_Sfixed64_repeated_packed = 505 [packed=true]; + + // Required, repeated, and optional groups. + required group RequiredGroup = 70 { + required string RequiredField = 71; + }; + + repeated group RepeatedGroup = 80 { + required string RequiredField = 81; + }; + + optional group OptionalGroup = 90 { + required string RequiredField = 91; + }; +} + +// For testing a group containing a required field. +message GoTestRequiredGroupField { + required group Group = 1 { + required int32 Field = 2; + }; +} + +// For testing skipping of unrecognized fields. +// Numbers are all big, larger than tag numbers in GoTestField, +// the message used in the corresponding test. +message GoSkipTest { + required int32 skip_int32 = 11; + required fixed32 skip_fixed32 = 12; + required fixed64 skip_fixed64 = 13; + required string skip_string = 14; + required group SkipGroup = 15 { + required int32 group_int32 = 16; + required string group_string = 17; + } +} + +// For testing packed/non-packed decoder switching. +// A serialized instance of one should be deserializable as the other. +message NonPackedTest { + repeated int32 a = 1; +} + +message PackedTest { + repeated int32 b = 1 [packed=true]; +} + +message MaxTag { + // Maximum possible tag number. + optional string last_field = 536870911; +} + +message OldMessage { + message Nested { + optional string name = 1; + } + optional Nested nested = 1; + + optional int32 num = 2; +} + +// NewMessage is wire compatible with OldMessage; +// imagine it as a future version. +message NewMessage { + message Nested { + optional string name = 1; + optional string food_group = 2; + } + optional Nested nested = 1; + + // This is an int32 in OldMessage. + optional int64 num = 2; +} + +// Smaller tests for ASCII formatting. + +message InnerMessage { + required string host = 1; + optional int32 port = 2 [default=4000]; + optional bool connected = 3; +} + +message OtherMessage { + optional int64 key = 1; + optional bytes value = 2; + optional float weight = 3; + optional InnerMessage inner = 4; + + extensions 100 to max; +} + +message RequiredInnerMessage { + required InnerMessage leo_finally_won_an_oscar = 1; +} + +message MyMessage { + required int32 count = 1; + optional string name = 2; + optional string quote = 3; + repeated string pet = 4; + optional InnerMessage inner = 5; + repeated OtherMessage others = 6; + optional RequiredInnerMessage we_must_go_deeper = 13; + repeated InnerMessage rep_inner = 12; + + enum Color { + RED = 0; + GREEN = 1; + BLUE = 2; + }; + optional Color bikeshed = 7; + + optional group SomeGroup = 8 { + optional int32 group_field = 9; + } + + // This field becomes [][]byte in the generated code. + repeated bytes rep_bytes = 10; + + optional double bigfloat = 11; + + extensions 100 to max; +} + +message Ext { + extend MyMessage { + optional Ext more = 103; + optional string text = 104; + optional int32 number = 105; + } + + optional string data = 1; + map map_field = 2; +} + +extend MyMessage { + repeated string greeting = 106; + // leave field 200 unregistered for testing +} + +message ComplexExtension { + optional int32 first = 1; + optional int32 second = 2; + repeated int32 third = 3; +} + +extend OtherMessage { + optional ComplexExtension complex = 200; + repeated ComplexExtension r_complex = 201; +} + +message DefaultsMessage { + enum DefaultsEnum { + ZERO = 0; + ONE = 1; + TWO = 2; + }; + extensions 100 to max; +} + +extend DefaultsMessage { + optional double no_default_double = 101; + optional float no_default_float = 102; + optional int32 no_default_int32 = 103; + optional int64 no_default_int64 = 104; + optional uint32 no_default_uint32 = 105; + optional uint64 no_default_uint64 = 106; + optional sint32 no_default_sint32 = 107; + optional sint64 no_default_sint64 = 108; + optional fixed32 no_default_fixed32 = 109; + optional fixed64 no_default_fixed64 = 110; + optional sfixed32 no_default_sfixed32 = 111; + optional sfixed64 no_default_sfixed64 = 112; + optional bool no_default_bool = 113; + optional string no_default_string = 114; + optional bytes no_default_bytes = 115; + optional DefaultsMessage.DefaultsEnum no_default_enum = 116; + + optional double default_double = 201 [default = 3.1415]; + optional float default_float = 202 [default = 3.14]; + optional int32 default_int32 = 203 [default = 42]; + optional int64 default_int64 = 204 [default = 43]; + optional uint32 default_uint32 = 205 [default = 44]; + optional uint64 default_uint64 = 206 [default = 45]; + optional sint32 default_sint32 = 207 [default = 46]; + optional sint64 default_sint64 = 208 [default = 47]; + optional fixed32 default_fixed32 = 209 [default = 48]; + optional fixed64 default_fixed64 = 210 [default = 49]; + optional sfixed32 default_sfixed32 = 211 [default = 50]; + optional sfixed64 default_sfixed64 = 212 [default = 51]; + optional bool default_bool = 213 [default = true]; + optional string default_string = 214 [default = "Hello, string,def=foo"]; + optional bytes default_bytes = 215 [default = "Hello, bytes"]; + optional DefaultsMessage.DefaultsEnum default_enum = 216 [default = ONE]; +} + +message MyMessageSet { + option message_set_wire_format = true; + extensions 100 to max; +} + +message Empty { +} + +extend MyMessageSet { + optional Empty x201 = 201; + optional Empty x202 = 202; + optional Empty x203 = 203; + optional Empty x204 = 204; + optional Empty x205 = 205; + optional Empty x206 = 206; + optional Empty x207 = 207; + optional Empty x208 = 208; + optional Empty x209 = 209; + optional Empty x210 = 210; + optional Empty x211 = 211; + optional Empty x212 = 212; + optional Empty x213 = 213; + optional Empty x214 = 214; + optional Empty x215 = 215; + optional Empty x216 = 216; + optional Empty x217 = 217; + optional Empty x218 = 218; + optional Empty x219 = 219; + optional Empty x220 = 220; + optional Empty x221 = 221; + optional Empty x222 = 222; + optional Empty x223 = 223; + optional Empty x224 = 224; + optional Empty x225 = 225; + optional Empty x226 = 226; + optional Empty x227 = 227; + optional Empty x228 = 228; + optional Empty x229 = 229; + optional Empty x230 = 230; + optional Empty x231 = 231; + optional Empty x232 = 232; + optional Empty x233 = 233; + optional Empty x234 = 234; + optional Empty x235 = 235; + optional Empty x236 = 236; + optional Empty x237 = 237; + optional Empty x238 = 238; + optional Empty x239 = 239; + optional Empty x240 = 240; + optional Empty x241 = 241; + optional Empty x242 = 242; + optional Empty x243 = 243; + optional Empty x244 = 244; + optional Empty x245 = 245; + optional Empty x246 = 246; + optional Empty x247 = 247; + optional Empty x248 = 248; + optional Empty x249 = 249; + optional Empty x250 = 250; +} + +message MessageList { + repeated group Message = 1 { + required string name = 2; + required int32 count = 3; + } +} + +message Strings { + optional string string_field = 1; + optional bytes bytes_field = 2; +} + +message Defaults { + enum Color { + RED = 0; + GREEN = 1; + BLUE = 2; + } + + // Default-valued fields of all basic types. + // Same as GoTest, but copied here to make testing easier. + optional bool F_Bool = 1 [default=true]; + optional int32 F_Int32 = 2 [default=32]; + optional int64 F_Int64 = 3 [default=64]; + optional fixed32 F_Fixed32 = 4 [default=320]; + optional fixed64 F_Fixed64 = 5 [default=640]; + optional uint32 F_Uint32 = 6 [default=3200]; + optional uint64 F_Uint64 = 7 [default=6400]; + optional float F_Float = 8 [default=314159.]; + optional double F_Double = 9 [default=271828.]; + optional string F_String = 10 [default="hello, \"world!\"\n"]; + optional bytes F_Bytes = 11 [default="Bignose"]; + optional sint32 F_Sint32 = 12 [default=-32]; + optional sint64 F_Sint64 = 13 [default=-64]; + optional Color F_Enum = 14 [default=GREEN]; + + // More fields with crazy defaults. + optional float F_Pinf = 15 [default=inf]; + optional float F_Ninf = 16 [default=-inf]; + optional float F_Nan = 17 [default=nan]; + + // Sub-message. + optional SubDefaults sub = 18; + + // Redundant but explicit defaults. + optional string str_zero = 19 [default=""]; +} + +message SubDefaults { + optional int64 n = 1 [default=7]; +} + +message RepeatedEnum { + enum Color { + RED = 1; + } + repeated Color color = 1; +} + +message MoreRepeated { + repeated bool bools = 1; + repeated bool bools_packed = 2 [packed=true]; + repeated int32 ints = 3; + repeated int32 ints_packed = 4 [packed=true]; + repeated int64 int64s_packed = 7 [packed=true]; + repeated string strings = 5; + repeated fixed32 fixeds = 6; +} + +// GroupOld and GroupNew have the same wire format. +// GroupNew has a new field inside a group. + +message GroupOld { + optional group G = 101 { + optional int32 x = 2; + } +} + +message GroupNew { + optional group G = 101 { + optional int32 x = 2; + optional int32 y = 3; + } +} + +message FloatingPoint { + required double f = 1; + optional bool exact = 2; +} + +message MessageWithMap { + map name_mapping = 1; + map msg_mapping = 2; + map byte_mapping = 3; + map str_to_str = 4; +} + +message Oneof { + oneof union { + bool F_Bool = 1; + int32 F_Int32 = 2; + int64 F_Int64 = 3; + fixed32 F_Fixed32 = 4; + fixed64 F_Fixed64 = 5; + uint32 F_Uint32 = 6; + uint64 F_Uint64 = 7; + float F_Float = 8; + double F_Double = 9; + string F_String = 10; + bytes F_Bytes = 11; + sint32 F_Sint32 = 12; + sint64 F_Sint64 = 13; + MyMessage.Color F_Enum = 14; + GoTestField F_Message = 15; + group F_Group = 16 { + optional int32 x = 17; + } + int32 F_Largest_Tag = 536870911; + } + + oneof tormato { + int32 value = 100; + } +} + +message Communique { + optional bool make_me_cry = 1; + + // This is a oneof, called "union". + oneof union { + int32 number = 5; + string name = 6; + bytes data = 7; + double temp_c = 8; + MyMessage.Color col = 9; + Strings msg = 10; + } +} + +message TestUTF8 { + optional string scalar = 1; + repeated string vector = 2; + oneof oneof { string field = 3; } + map map_key = 4; + map map_value = 5; +} diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/proto/text.go b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/proto/text.go index 965876b..2205fda 100644 --- a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/proto/text.go +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/proto/text.go @@ -50,7 +50,6 @@ import ( var ( newline = []byte("\n") spaces = []byte(" ") - gtNewline = []byte(">\n") endBraceNewline = []byte("}\n") backslashN = []byte{'\\', 'n'} backslashR = []byte{'\\', 'r'} @@ -170,11 +169,6 @@ func writeName(w *textWriter, props *Properties) error { return nil } -// raw is the interface satisfied by RawMessage. -type raw interface { - Bytes() []byte -} - func requiresQuotes(u string) bool { // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. for _, ch := range u { @@ -269,6 +263,10 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { props := sprops.Prop[i] name := st.Field(i).Name + if name == "XXX_NoUnkeyedLiteral" { + continue + } + if strings.HasPrefix(name, "XXX_") { // There are two XXX_ fields: // XXX_unrecognized []byte @@ -436,12 +434,6 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { return err } } - if b, ok := fv.Interface().(raw); ok { - if err := writeRaw(w, b.Bytes()); err != nil { - return err - } - continue - } // Enums have a String method, so writeAny will work fine. if err := tm.writeAny(w, fv, props); err != nil { @@ -455,7 +447,7 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { // Extensions (the XXX_extensions field). pv := sv.Addr() - if _, ok := extendable(pv.Interface()); ok { + if _, err := extendable(pv.Interface()); err == nil { if err := tm.writeExtensions(w, pv); err != nil { return err } @@ -464,27 +456,6 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { return nil } -// writeRaw writes an uninterpreted raw message. -func writeRaw(w *textWriter, b []byte) error { - if err := w.WriteByte('<'); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - if err := writeUnknownStruct(w, b); err != nil { - return err - } - w.unindent() - if err := w.WriteByte('>'); err != nil { - return err - } - return nil -} - // writeAny writes an arbitrary field. func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { v = reflect.Indirect(v) @@ -535,6 +506,19 @@ func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Propert } } w.indent() + if v.CanAddr() { + // Calling v.Interface on a struct causes the reflect package to + // copy the entire struct. This is racy with the new Marshaler + // since we atomically update the XXX_sizecache. + // + // Thus, we retrieve a pointer to the struct if possible to avoid + // a race since v.Interface on the pointer doesn't copy the struct. + // + // If v is not addressable, then we are not worried about a race + // since it implies that the binary Marshaler cannot possibly be + // mutating this value. + v = v.Addr() + } if etm, ok := v.Interface().(encoding.TextMarshaler); ok { text, err := etm.MarshalText() if err != nil { @@ -543,8 +527,13 @@ func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Propert if _, err = w.Write(text); err != nil { return err } - } else if err := tm.writeStruct(w, v); err != nil { - return err + } else { + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + if err := tm.writeStruct(w, v); err != nil { + return err + } } w.unindent() if err := w.WriteByte(ket); err != nil { diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/proto/text_parser.go b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/proto/text_parser.go index 5e14513..0685bae 100644 --- a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/proto/text_parser.go +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/proto/text_parser.go @@ -206,7 +206,6 @@ func (p *textParser) advance() { var ( errBadUTF8 = errors.New("proto: bad UTF-8") - errBadHex = errors.New("proto: bad hexadecimal") ) func unquoteC(s string, quote rune) (string, error) { @@ -277,60 +276,47 @@ func unescape(s string) (ch string, tail string, err error) { return "?", s, nil // trigraph workaround case '\'', '"', '\\': return string(r), s, nil - case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X': + case '0', '1', '2', '3', '4', '5', '6', '7': if len(s) < 2 { return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) } - base := 8 - ss := s[:2] + ss := string(r) + s[:2] s = s[2:] - if r == 'x' || r == 'X' { - base = 16 - } else { - ss = string(r) + ss - } - i, err := strconv.ParseUint(ss, base, 8) + i, err := strconv.ParseUint(ss, 8, 8) if err != nil { - return "", "", err + return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss) } return string([]byte{byte(i)}), s, nil - case 'u', 'U': - n := 4 - if r == 'U' { + case 'x', 'X', 'u', 'U': + var n int + switch r { + case 'x', 'X': + n = 2 + case 'u': + n = 4 + case 'U': n = 8 } if len(s) < n { - return "", "", fmt.Errorf(`\%c requires %d digits`, r, n) - } - - bs := make([]byte, n/2) - for i := 0; i < n; i += 2 { - a, ok1 := unhex(s[i]) - b, ok2 := unhex(s[i+1]) - if !ok1 || !ok2 { - return "", "", errBadHex - } - bs[i/2] = a<<4 | b + return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n) } + ss := s[:n] s = s[n:] - return string(bs), s, nil + i, err := strconv.ParseUint(ss, 16, 64) + if err != nil { + return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss) + } + if r == 'x' || r == 'X' { + return string([]byte{byte(i)}), s, nil + } + if i > utf8.MaxRune { + return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss) + } + return string(i), s, nil } return "", "", fmt.Errorf(`unknown escape \%c`, r) } -// Adapted from src/pkg/strconv/quote.go. -func unhex(b byte) (v byte, ok bool) { - switch { - case '0' <= b && b <= '9': - return b - '0', true - case 'a' <= b && b <= 'f': - return b - 'a' + 10, true - case 'A' <= b && b <= 'F': - return b - 'A' + 10, true - } - return 0, false -} - // Back off the parser by one token. Can only be done between calls to next(). // It makes the next advance() a no-op. func (p *textParser) back() { p.backed = true } @@ -728,6 +714,9 @@ func (p *textParser) consumeExtName() (string, error) { if tok.err != nil { return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) } + if p.done && tok.value != "]" { + return "", p.errorf("unclosed type_url or extension name") + } } return strings.Join(parts, ""), nil } @@ -865,7 +854,7 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error { return p.readStruct(fv, terminator) case reflect.Uint32: if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { - fv.SetUint(x) + fv.SetUint(uint64(x)) return nil } case reflect.Uint64: @@ -883,13 +872,9 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error { // UnmarshalText returns *RequiredNotSetError. func UnmarshalText(s string, pb Message) error { if um, ok := pb.(encoding.TextUnmarshaler); ok { - err := um.UnmarshalText([]byte(s)) - return err + return um.UnmarshalText([]byte(s)) } pb.Reset() v := reflect.ValueOf(pb) - if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil { - return pe - } - return nil + return newTextParser(s).readStruct(v.Elem(), "") } diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/Makefile b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/Makefile deleted file mode 100644 index a42cc37..0000000 --- a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/Makefile +++ /dev/null @@ -1,33 +0,0 @@ -# Go support for Protocol Buffers - Google's data interchange format -# -# Copyright 2010 The Go Authors. All rights reserved. -# https://github.com/golang/protobuf -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -test: - cd testdata && make test diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/Makefile b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/Makefile deleted file mode 100644 index f706871..0000000 --- a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/Makefile +++ /dev/null @@ -1,37 +0,0 @@ -# Go support for Protocol Buffers - Google's data interchange format -# -# Copyright 2010 The Go Authors. All rights reserved. -# https://github.com/golang/protobuf -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -# Not stored here, but descriptor.proto is in https://github.com/google/protobuf/ -# at src/google/protobuf/descriptor.proto -regenerate: - @echo WARNING! THIS RULE IS PROBABLY NOT RIGHT FOR YOUR INSTALLATION - cp $(HOME)/src/protobuf/include/google/protobuf/descriptor.proto . - protoc --go_out=../../../../.. -I$(HOME)/src/protobuf/include $(HOME)/src/protobuf/include/google/protobuf/descriptor.proto diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go index c6a91bc..e855b1f 100644 --- a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go @@ -1,36 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: google/protobuf/descriptor.proto -/* -Package descriptor is a generated protocol buffer package. - -It is generated from these files: - google/protobuf/descriptor.proto - -It has these top-level messages: - FileDescriptorSet - FileDescriptorProto - DescriptorProto - ExtensionRangeOptions - FieldDescriptorProto - OneofDescriptorProto - EnumDescriptorProto - EnumValueDescriptorProto - ServiceDescriptorProto - MethodDescriptorProto - FileOptions - MessageOptions - FieldOptions - OneofOptions - EnumOptions - EnumValueOptions - ServiceOptions - MethodOptions - UninterpretedOption - SourceCodeInfo - GeneratedCodeInfo -*/ -package descriptor +package descriptor // import "github.com/golang/protobuf/protoc-gen-go/descriptor" import proto "github.com/golang/protobuf/proto" import fmt "fmt" @@ -138,7 +109,9 @@ func (x *FieldDescriptorProto_Type) UnmarshalJSON(data []byte) error { *x = FieldDescriptorProto_Type(value) return nil } -func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{4, 0} } +func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{4, 0} +} type FieldDescriptorProto_Label int32 @@ -177,7 +150,7 @@ func (x *FieldDescriptorProto_Label) UnmarshalJSON(data []byte) error { return nil } func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) { - return fileDescriptor0, []int{4, 1} + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{4, 1} } // Generated classes can be optimized for speed or code size. @@ -217,7 +190,9 @@ func (x *FileOptions_OptimizeMode) UnmarshalJSON(data []byte) error { *x = FileOptions_OptimizeMode(value) return nil } -func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{10, 0} } +func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{10, 0} +} type FieldOptions_CType int32 @@ -255,7 +230,9 @@ func (x *FieldOptions_CType) UnmarshalJSON(data []byte) error { *x = FieldOptions_CType(value) return nil } -func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{12, 0} } +func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{12, 0} +} type FieldOptions_JSType int32 @@ -295,7 +272,9 @@ func (x *FieldOptions_JSType) UnmarshalJSON(data []byte) error { *x = FieldOptions_JSType(value) return nil } -func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{12, 1} } +func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{12, 1} +} // Is this method side-effect-free (or safe in HTTP parlance), or idempotent, // or neither? HTTP based RPC implementation may choose GET verb for safe @@ -336,20 +315,41 @@ func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(data []byte) error { return nil } func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) { - return fileDescriptor0, []int{17, 0} + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{17, 0} } // The protocol compiler can output a FileDescriptorSet containing the .proto // files it parses. type FileDescriptorSet struct { - File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"` - XXX_unrecognized []byte `json:"-"` + File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileDescriptorSet) Reset() { *m = FileDescriptorSet{} } +func (m *FileDescriptorSet) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorSet) ProtoMessage() {} +func (*FileDescriptorSet) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{0} +} +func (m *FileDescriptorSet) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileDescriptorSet.Unmarshal(m, b) +} +func (m *FileDescriptorSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileDescriptorSet.Marshal(b, m, deterministic) +} +func (dst *FileDescriptorSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileDescriptorSet.Merge(dst, src) +} +func (m *FileDescriptorSet) XXX_Size() int { + return xxx_messageInfo_FileDescriptorSet.Size(m) +} +func (m *FileDescriptorSet) XXX_DiscardUnknown() { + xxx_messageInfo_FileDescriptorSet.DiscardUnknown(m) } -func (m *FileDescriptorSet) Reset() { *m = FileDescriptorSet{} } -func (m *FileDescriptorSet) String() string { return proto.CompactTextString(m) } -func (*FileDescriptorSet) ProtoMessage() {} -func (*FileDescriptorSet) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +var xxx_messageInfo_FileDescriptorSet proto.InternalMessageInfo func (m *FileDescriptorSet) GetFile() []*FileDescriptorProto { if m != nil { @@ -382,14 +382,35 @@ type FileDescriptorProto struct { SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"` // The syntax of the proto file. // The supported values are "proto2" and "proto3". - Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` - XXX_unrecognized []byte `json:"-"` + Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileDescriptorProto) Reset() { *m = FileDescriptorProto{} } +func (m *FileDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorProto) ProtoMessage() {} +func (*FileDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{1} +} +func (m *FileDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileDescriptorProto.Unmarshal(m, b) +} +func (m *FileDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileDescriptorProto.Marshal(b, m, deterministic) +} +func (dst *FileDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileDescriptorProto.Merge(dst, src) +} +func (m *FileDescriptorProto) XXX_Size() int { + return xxx_messageInfo_FileDescriptorProto.Size(m) +} +func (m *FileDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_FileDescriptorProto.DiscardUnknown(m) } -func (m *FileDescriptorProto) Reset() { *m = FileDescriptorProto{} } -func (m *FileDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*FileDescriptorProto) ProtoMessage() {} -func (*FileDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } +var xxx_messageInfo_FileDescriptorProto proto.InternalMessageInfo func (m *FileDescriptorProto) GetName() string { if m != nil && m.Name != nil { @@ -488,14 +509,35 @@ type DescriptorProto struct { ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` // Reserved field names, which may not be used by fields in the same message. // A given name may only be reserved once. - ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` - XXX_unrecognized []byte `json:"-"` + ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DescriptorProto) Reset() { *m = DescriptorProto{} } +func (m *DescriptorProto) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto) ProtoMessage() {} +func (*DescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{2} +} +func (m *DescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto.Unmarshal(m, b) +} +func (m *DescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto.Marshal(b, m, deterministic) +} +func (dst *DescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto.Merge(dst, src) +} +func (m *DescriptorProto) XXX_Size() int { + return xxx_messageInfo_DescriptorProto.Size(m) +} +func (m *DescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto.DiscardUnknown(m) } -func (m *DescriptorProto) Reset() { *m = DescriptorProto{} } -func (m *DescriptorProto) String() string { return proto.CompactTextString(m) } -func (*DescriptorProto) ProtoMessage() {} -func (*DescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } +var xxx_messageInfo_DescriptorProto proto.InternalMessageInfo func (m *DescriptorProto) GetName() string { if m != nil && m.Name != nil { @@ -568,19 +610,38 @@ func (m *DescriptorProto) GetReservedName() []string { } type DescriptorProto_ExtensionRange struct { - Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` - End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` - Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` - XXX_unrecognized []byte `json:"-"` + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *DescriptorProto_ExtensionRange) Reset() { *m = DescriptorProto_ExtensionRange{} } func (m *DescriptorProto_ExtensionRange) String() string { return proto.CompactTextString(m) } func (*DescriptorProto_ExtensionRange) ProtoMessage() {} func (*DescriptorProto_ExtensionRange) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{2, 0} + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{2, 0} +} +func (m *DescriptorProto_ExtensionRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Unmarshal(m, b) +} +func (m *DescriptorProto_ExtensionRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Marshal(b, m, deterministic) +} +func (dst *DescriptorProto_ExtensionRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto_ExtensionRange.Merge(dst, src) +} +func (m *DescriptorProto_ExtensionRange) XXX_Size() int { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Size(m) +} +func (m *DescriptorProto_ExtensionRange) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto_ExtensionRange.DiscardUnknown(m) } +var xxx_messageInfo_DescriptorProto_ExtensionRange proto.InternalMessageInfo + func (m *DescriptorProto_ExtensionRange) GetStart() int32 { if m != nil && m.Start != nil { return *m.Start @@ -606,17 +667,36 @@ func (m *DescriptorProto_ExtensionRange) GetOptions() *ExtensionRangeOptions { // fields or extension ranges in the same message. Reserved ranges may // not overlap. type DescriptorProto_ReservedRange struct { - Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` - End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` - XXX_unrecognized []byte `json:"-"` + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *DescriptorProto_ReservedRange) Reset() { *m = DescriptorProto_ReservedRange{} } func (m *DescriptorProto_ReservedRange) String() string { return proto.CompactTextString(m) } func (*DescriptorProto_ReservedRange) ProtoMessage() {} func (*DescriptorProto_ReservedRange) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{2, 1} + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{2, 1} +} +func (m *DescriptorProto_ReservedRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto_ReservedRange.Unmarshal(m, b) +} +func (m *DescriptorProto_ReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto_ReservedRange.Marshal(b, m, deterministic) +} +func (dst *DescriptorProto_ReservedRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto_ReservedRange.Merge(dst, src) +} +func (m *DescriptorProto_ReservedRange) XXX_Size() int { + return xxx_messageInfo_DescriptorProto_ReservedRange.Size(m) } +func (m *DescriptorProto_ReservedRange) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto_ReservedRange.DiscardUnknown(m) +} + +var xxx_messageInfo_DescriptorProto_ReservedRange proto.InternalMessageInfo func (m *DescriptorProto_ReservedRange) GetStart() int32 { if m != nil && m.Start != nil { @@ -635,22 +715,43 @@ func (m *DescriptorProto_ReservedRange) GetEnd() int32 { type ExtensionRangeOptions struct { // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` proto.XXX_InternalExtensions `json:"-"` XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *ExtensionRangeOptions) Reset() { *m = ExtensionRangeOptions{} } -func (m *ExtensionRangeOptions) String() string { return proto.CompactTextString(m) } -func (*ExtensionRangeOptions) ProtoMessage() {} -func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } +func (m *ExtensionRangeOptions) Reset() { *m = ExtensionRangeOptions{} } +func (m *ExtensionRangeOptions) String() string { return proto.CompactTextString(m) } +func (*ExtensionRangeOptions) ProtoMessage() {} +func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{3} +} var extRange_ExtensionRangeOptions = []proto.ExtensionRange{ - {1000, 536870911}, + {Start: 1000, End: 536870911}, } func (*ExtensionRangeOptions) ExtensionRangeArray() []proto.ExtensionRange { return extRange_ExtensionRangeOptions } +func (m *ExtensionRangeOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExtensionRangeOptions.Unmarshal(m, b) +} +func (m *ExtensionRangeOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExtensionRangeOptions.Marshal(b, m, deterministic) +} +func (dst *ExtensionRangeOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtensionRangeOptions.Merge(dst, src) +} +func (m *ExtensionRangeOptions) XXX_Size() int { + return xxx_messageInfo_ExtensionRangeOptions.Size(m) +} +func (m *ExtensionRangeOptions) XXX_DiscardUnknown() { + xxx_messageInfo_ExtensionRangeOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_ExtensionRangeOptions proto.InternalMessageInfo func (m *ExtensionRangeOptions) GetUninterpretedOption() []*UninterpretedOption { if m != nil { @@ -689,15 +790,36 @@ type FieldDescriptorProto struct { // user has set a "json_name" option on this field, that option's value // will be used. Otherwise, it's deduced from the field's name by converting // it to camelCase. - JsonName *string `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"` - Options *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` - XXX_unrecognized []byte `json:"-"` + JsonName *string `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"` + Options *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *FieldDescriptorProto) Reset() { *m = FieldDescriptorProto{} } -func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*FieldDescriptorProto) ProtoMessage() {} -func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } +func (m *FieldDescriptorProto) Reset() { *m = FieldDescriptorProto{} } +func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*FieldDescriptorProto) ProtoMessage() {} +func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{4} +} +func (m *FieldDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FieldDescriptorProto.Unmarshal(m, b) +} +func (m *FieldDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FieldDescriptorProto.Marshal(b, m, deterministic) +} +func (dst *FieldDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldDescriptorProto.Merge(dst, src) +} +func (m *FieldDescriptorProto) XXX_Size() int { + return xxx_messageInfo_FieldDescriptorProto.Size(m) +} +func (m *FieldDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_FieldDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldDescriptorProto proto.InternalMessageInfo func (m *FieldDescriptorProto) GetName() string { if m != nil && m.Name != nil { @@ -771,15 +893,36 @@ func (m *FieldDescriptorProto) GetOptions() *FieldOptions { // Describes a oneof. type OneofDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` - XXX_unrecognized []byte `json:"-"` + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *OneofDescriptorProto) Reset() { *m = OneofDescriptorProto{} } +func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*OneofDescriptorProto) ProtoMessage() {} +func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{5} +} +func (m *OneofDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OneofDescriptorProto.Unmarshal(m, b) +} +func (m *OneofDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OneofDescriptorProto.Marshal(b, m, deterministic) +} +func (dst *OneofDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_OneofDescriptorProto.Merge(dst, src) +} +func (m *OneofDescriptorProto) XXX_Size() int { + return xxx_messageInfo_OneofDescriptorProto.Size(m) +} +func (m *OneofDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_OneofDescriptorProto.DiscardUnknown(m) } -func (m *OneofDescriptorProto) Reset() { *m = OneofDescriptorProto{} } -func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*OneofDescriptorProto) ProtoMessage() {} -func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } +var xxx_messageInfo_OneofDescriptorProto proto.InternalMessageInfo func (m *OneofDescriptorProto) GetName() string { if m != nil && m.Name != nil { @@ -797,16 +940,44 @@ func (m *OneofDescriptorProto) GetOptions() *OneofOptions { // Describes an enum type. type EnumDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Value []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` - Options *EnumOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` - XXX_unrecognized []byte `json:"-"` + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` + Options *EnumOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + // Range of reserved numeric values. Reserved numeric values may not be used + // by enum values in the same enum declaration. Reserved ranges may not + // overlap. + ReservedRange []*EnumDescriptorProto_EnumReservedRange `protobuf:"bytes,4,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` + // Reserved enum value names, which may not be reused. A given name may only + // be reserved once. + ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumDescriptorProto) Reset() { *m = EnumDescriptorProto{} } +func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*EnumDescriptorProto) ProtoMessage() {} +func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{6} +} +func (m *EnumDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumDescriptorProto.Unmarshal(m, b) +} +func (m *EnumDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumDescriptorProto.Marshal(b, m, deterministic) +} +func (dst *EnumDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumDescriptorProto.Merge(dst, src) +} +func (m *EnumDescriptorProto) XXX_Size() int { + return xxx_messageInfo_EnumDescriptorProto.Size(m) +} +func (m *EnumDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_EnumDescriptorProto.DiscardUnknown(m) } -func (m *EnumDescriptorProto) Reset() { *m = EnumDescriptorProto{} } -func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*EnumDescriptorProto) ProtoMessage() {} -func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } +var xxx_messageInfo_EnumDescriptorProto proto.InternalMessageInfo func (m *EnumDescriptorProto) GetName() string { if m != nil && m.Name != nil { @@ -829,18 +1000,105 @@ func (m *EnumDescriptorProto) GetOptions() *EnumOptions { return nil } +func (m *EnumDescriptorProto) GetReservedRange() []*EnumDescriptorProto_EnumReservedRange { + if m != nil { + return m.ReservedRange + } + return nil +} + +func (m *EnumDescriptorProto) GetReservedName() []string { + if m != nil { + return m.ReservedName + } + return nil +} + +// Range of reserved numeric values. Reserved values may not be used by +// entries in the same enum. Reserved ranges may not overlap. +// +// Note that this is distinct from DescriptorProto.ReservedRange in that it +// is inclusive such that it can appropriately represent the entire int32 +// domain. +type EnumDescriptorProto_EnumReservedRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumDescriptorProto_EnumReservedRange) Reset() { *m = EnumDescriptorProto_EnumReservedRange{} } +func (m *EnumDescriptorProto_EnumReservedRange) String() string { return proto.CompactTextString(m) } +func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {} +func (*EnumDescriptorProto_EnumReservedRange) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{6, 0} +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Unmarshal(m, b) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Marshal(b, m, deterministic) +} +func (dst *EnumDescriptorProto_EnumReservedRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Merge(dst, src) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Size() int { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Size(m) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_DiscardUnknown() { + xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumDescriptorProto_EnumReservedRange proto.InternalMessageInfo + +func (m *EnumDescriptorProto_EnumReservedRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + // Describes a value within an enum. type EnumValueDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"` - Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` - XXX_unrecognized []byte `json:"-"` + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"` + Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *EnumValueDescriptorProto) Reset() { *m = EnumValueDescriptorProto{} } -func (m *EnumValueDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*EnumValueDescriptorProto) ProtoMessage() {} -func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } +func (m *EnumValueDescriptorProto) Reset() { *m = EnumValueDescriptorProto{} } +func (m *EnumValueDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*EnumValueDescriptorProto) ProtoMessage() {} +func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{7} +} +func (m *EnumValueDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumValueDescriptorProto.Unmarshal(m, b) +} +func (m *EnumValueDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumValueDescriptorProto.Marshal(b, m, deterministic) +} +func (dst *EnumValueDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumValueDescriptorProto.Merge(dst, src) +} +func (m *EnumValueDescriptorProto) XXX_Size() int { + return xxx_messageInfo_EnumValueDescriptorProto.Size(m) +} +func (m *EnumValueDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_EnumValueDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumValueDescriptorProto proto.InternalMessageInfo func (m *EnumValueDescriptorProto) GetName() string { if m != nil && m.Name != nil { @@ -865,16 +1123,37 @@ func (m *EnumValueDescriptorProto) GetOptions() *EnumValueOptions { // Describes a service. type ServiceDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"` - Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` - XXX_unrecognized []byte `json:"-"` + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"` + Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *ServiceDescriptorProto) Reset() { *m = ServiceDescriptorProto{} } -func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*ServiceDescriptorProto) ProtoMessage() {} -func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } +func (m *ServiceDescriptorProto) Reset() { *m = ServiceDescriptorProto{} } +func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*ServiceDescriptorProto) ProtoMessage() {} +func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{8} +} +func (m *ServiceDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServiceDescriptorProto.Unmarshal(m, b) +} +func (m *ServiceDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServiceDescriptorProto.Marshal(b, m, deterministic) +} +func (dst *ServiceDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceDescriptorProto.Merge(dst, src) +} +func (m *ServiceDescriptorProto) XXX_Size() int { + return xxx_messageInfo_ServiceDescriptorProto.Size(m) +} +func (m *ServiceDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceDescriptorProto proto.InternalMessageInfo func (m *ServiceDescriptorProto) GetName() string { if m != nil && m.Name != nil { @@ -908,14 +1187,35 @@ type MethodDescriptorProto struct { // Identifies if client streams multiple client messages ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"` // Identifies if server streams multiple server messages - ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"` - XXX_unrecognized []byte `json:"-"` + ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MethodDescriptorProto) Reset() { *m = MethodDescriptorProto{} } +func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*MethodDescriptorProto) ProtoMessage() {} +func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{9} +} +func (m *MethodDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MethodDescriptorProto.Unmarshal(m, b) +} +func (m *MethodDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MethodDescriptorProto.Marshal(b, m, deterministic) +} +func (dst *MethodDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_MethodDescriptorProto.Merge(dst, src) +} +func (m *MethodDescriptorProto) XXX_Size() int { + return xxx_messageInfo_MethodDescriptorProto.Size(m) +} +func (m *MethodDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_MethodDescriptorProto.DiscardUnknown(m) } -func (m *MethodDescriptorProto) Reset() { *m = MethodDescriptorProto{} } -func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*MethodDescriptorProto) ProtoMessage() {} -func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } +var xxx_messageInfo_MethodDescriptorProto proto.InternalMessageInfo const Default_MethodDescriptorProto_ClientStreaming bool = false const Default_MethodDescriptorProto_ServerStreaming bool = false @@ -982,7 +1282,7 @@ type FileOptions struct { // top-level extensions defined in the file. JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"` // This option does nothing. - JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` + JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` // Deprecated: Do not use. // If set true, then the Java2 code generator will generate code that // throws an exception whenever an attempt is made to assign a non-UTF-8 // byte sequence to a string field. @@ -1036,24 +1336,46 @@ type FileOptions struct { // is empty. When this option is empty, the package name will be used for // determining the namespace. PhpNamespace *string `protobuf:"bytes,41,opt,name=php_namespace,json=phpNamespace" json:"php_namespace,omitempty"` - // The parser stores options it doesn't recognize here. See above. + // The parser stores options it doesn't recognize here. + // See the documentation for the "Options" section above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` proto.XXX_InternalExtensions `json:"-"` XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *FileOptions) Reset() { *m = FileOptions{} } -func (m *FileOptions) String() string { return proto.CompactTextString(m) } -func (*FileOptions) ProtoMessage() {} -func (*FileOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } +func (m *FileOptions) Reset() { *m = FileOptions{} } +func (m *FileOptions) String() string { return proto.CompactTextString(m) } +func (*FileOptions) ProtoMessage() {} +func (*FileOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{10} +} var extRange_FileOptions = []proto.ExtensionRange{ - {1000, 536870911}, + {Start: 1000, End: 536870911}, } func (*FileOptions) ExtensionRangeArray() []proto.ExtensionRange { return extRange_FileOptions } +func (m *FileOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileOptions.Unmarshal(m, b) +} +func (m *FileOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileOptions.Marshal(b, m, deterministic) +} +func (dst *FileOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileOptions.Merge(dst, src) +} +func (m *FileOptions) XXX_Size() int { + return xxx_messageInfo_FileOptions.Size(m) +} +func (m *FileOptions) XXX_DiscardUnknown() { + xxx_messageInfo_FileOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_FileOptions proto.InternalMessageInfo const Default_FileOptions_JavaMultipleFiles bool = false const Default_FileOptions_JavaStringCheckUtf8 bool = false @@ -1086,6 +1408,7 @@ func (m *FileOptions) GetJavaMultipleFiles() bool { return Default_FileOptions_JavaMultipleFiles } +// Deprecated: Do not use. func (m *FileOptions) GetJavaGenerateEqualsAndHash() bool { if m != nil && m.JavaGenerateEqualsAndHash != nil { return *m.JavaGenerateEqualsAndHash @@ -1251,22 +1574,43 @@ type MessageOptions struct { MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` proto.XXX_InternalExtensions `json:"-"` XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *MessageOptions) Reset() { *m = MessageOptions{} } -func (m *MessageOptions) String() string { return proto.CompactTextString(m) } -func (*MessageOptions) ProtoMessage() {} -func (*MessageOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } +func (m *MessageOptions) Reset() { *m = MessageOptions{} } +func (m *MessageOptions) String() string { return proto.CompactTextString(m) } +func (*MessageOptions) ProtoMessage() {} +func (*MessageOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{11} +} var extRange_MessageOptions = []proto.ExtensionRange{ - {1000, 536870911}, + {Start: 1000, End: 536870911}, } func (*MessageOptions) ExtensionRangeArray() []proto.ExtensionRange { return extRange_MessageOptions } +func (m *MessageOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MessageOptions.Unmarshal(m, b) +} +func (m *MessageOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MessageOptions.Marshal(b, m, deterministic) +} +func (dst *MessageOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_MessageOptions.Merge(dst, src) +} +func (m *MessageOptions) XXX_Size() int { + return xxx_messageInfo_MessageOptions.Size(m) +} +func (m *MessageOptions) XXX_DiscardUnknown() { + xxx_messageInfo_MessageOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_MessageOptions proto.InternalMessageInfo const Default_MessageOptions_MessageSetWireFormat bool = false const Default_MessageOptions_NoStandardDescriptorAccessor bool = false @@ -1369,22 +1713,43 @@ type FieldOptions struct { Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` proto.XXX_InternalExtensions `json:"-"` XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *FieldOptions) Reset() { *m = FieldOptions{} } -func (m *FieldOptions) String() string { return proto.CompactTextString(m) } -func (*FieldOptions) ProtoMessage() {} -func (*FieldOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } +func (m *FieldOptions) Reset() { *m = FieldOptions{} } +func (m *FieldOptions) String() string { return proto.CompactTextString(m) } +func (*FieldOptions) ProtoMessage() {} +func (*FieldOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{12} +} var extRange_FieldOptions = []proto.ExtensionRange{ - {1000, 536870911}, + {Start: 1000, End: 536870911}, } func (*FieldOptions) ExtensionRangeArray() []proto.ExtensionRange { return extRange_FieldOptions } +func (m *FieldOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FieldOptions.Unmarshal(m, b) +} +func (m *FieldOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FieldOptions.Marshal(b, m, deterministic) +} +func (dst *FieldOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldOptions.Merge(dst, src) +} +func (m *FieldOptions) XXX_Size() int { + return xxx_messageInfo_FieldOptions.Size(m) +} +func (m *FieldOptions) XXX_DiscardUnknown() { + xxx_messageInfo_FieldOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldOptions proto.InternalMessageInfo const Default_FieldOptions_Ctype FieldOptions_CType = FieldOptions_STRING const Default_FieldOptions_Jstype FieldOptions_JSType = FieldOptions_JS_NORMAL @@ -1444,22 +1809,43 @@ func (m *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { type OneofOptions struct { // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` proto.XXX_InternalExtensions `json:"-"` XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *OneofOptions) Reset() { *m = OneofOptions{} } -func (m *OneofOptions) String() string { return proto.CompactTextString(m) } -func (*OneofOptions) ProtoMessage() {} -func (*OneofOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } +func (m *OneofOptions) Reset() { *m = OneofOptions{} } +func (m *OneofOptions) String() string { return proto.CompactTextString(m) } +func (*OneofOptions) ProtoMessage() {} +func (*OneofOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{13} +} var extRange_OneofOptions = []proto.ExtensionRange{ - {1000, 536870911}, + {Start: 1000, End: 536870911}, } func (*OneofOptions) ExtensionRangeArray() []proto.ExtensionRange { return extRange_OneofOptions } +func (m *OneofOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OneofOptions.Unmarshal(m, b) +} +func (m *OneofOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OneofOptions.Marshal(b, m, deterministic) +} +func (dst *OneofOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_OneofOptions.Merge(dst, src) +} +func (m *OneofOptions) XXX_Size() int { + return xxx_messageInfo_OneofOptions.Size(m) +} +func (m *OneofOptions) XXX_DiscardUnknown() { + xxx_messageInfo_OneofOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_OneofOptions proto.InternalMessageInfo func (m *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { if m != nil { @@ -1479,22 +1865,43 @@ type EnumOptions struct { Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` proto.XXX_InternalExtensions `json:"-"` XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *EnumOptions) Reset() { *m = EnumOptions{} } -func (m *EnumOptions) String() string { return proto.CompactTextString(m) } -func (*EnumOptions) ProtoMessage() {} -func (*EnumOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } +func (m *EnumOptions) Reset() { *m = EnumOptions{} } +func (m *EnumOptions) String() string { return proto.CompactTextString(m) } +func (*EnumOptions) ProtoMessage() {} +func (*EnumOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{14} +} var extRange_EnumOptions = []proto.ExtensionRange{ - {1000, 536870911}, + {Start: 1000, End: 536870911}, } func (*EnumOptions) ExtensionRangeArray() []proto.ExtensionRange { return extRange_EnumOptions } +func (m *EnumOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumOptions.Unmarshal(m, b) +} +func (m *EnumOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumOptions.Marshal(b, m, deterministic) +} +func (dst *EnumOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumOptions.Merge(dst, src) +} +func (m *EnumOptions) XXX_Size() int { + return xxx_messageInfo_EnumOptions.Size(m) +} +func (m *EnumOptions) XXX_DiscardUnknown() { + xxx_messageInfo_EnumOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumOptions proto.InternalMessageInfo const Default_EnumOptions_Deprecated bool = false @@ -1527,22 +1934,43 @@ type EnumValueOptions struct { Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` proto.XXX_InternalExtensions `json:"-"` XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *EnumValueOptions) Reset() { *m = EnumValueOptions{} } -func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) } -func (*EnumValueOptions) ProtoMessage() {} -func (*EnumValueOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } +func (m *EnumValueOptions) Reset() { *m = EnumValueOptions{} } +func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) } +func (*EnumValueOptions) ProtoMessage() {} +func (*EnumValueOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{15} +} var extRange_EnumValueOptions = []proto.ExtensionRange{ - {1000, 536870911}, + {Start: 1000, End: 536870911}, } func (*EnumValueOptions) ExtensionRangeArray() []proto.ExtensionRange { return extRange_EnumValueOptions } +func (m *EnumValueOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumValueOptions.Unmarshal(m, b) +} +func (m *EnumValueOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumValueOptions.Marshal(b, m, deterministic) +} +func (dst *EnumValueOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumValueOptions.Merge(dst, src) +} +func (m *EnumValueOptions) XXX_Size() int { + return xxx_messageInfo_EnumValueOptions.Size(m) +} +func (m *EnumValueOptions) XXX_DiscardUnknown() { + xxx_messageInfo_EnumValueOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumValueOptions proto.InternalMessageInfo const Default_EnumValueOptions_Deprecated bool = false @@ -1568,22 +1996,43 @@ type ServiceOptions struct { Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` proto.XXX_InternalExtensions `json:"-"` XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *ServiceOptions) Reset() { *m = ServiceOptions{} } -func (m *ServiceOptions) String() string { return proto.CompactTextString(m) } -func (*ServiceOptions) ProtoMessage() {} -func (*ServiceOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } +func (m *ServiceOptions) Reset() { *m = ServiceOptions{} } +func (m *ServiceOptions) String() string { return proto.CompactTextString(m) } +func (*ServiceOptions) ProtoMessage() {} +func (*ServiceOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{16} +} var extRange_ServiceOptions = []proto.ExtensionRange{ - {1000, 536870911}, + {Start: 1000, End: 536870911}, } func (*ServiceOptions) ExtensionRangeArray() []proto.ExtensionRange { return extRange_ServiceOptions } +func (m *ServiceOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServiceOptions.Unmarshal(m, b) +} +func (m *ServiceOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServiceOptions.Marshal(b, m, deterministic) +} +func (dst *ServiceOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceOptions.Merge(dst, src) +} +func (m *ServiceOptions) XXX_Size() int { + return xxx_messageInfo_ServiceOptions.Size(m) +} +func (m *ServiceOptions) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceOptions proto.InternalMessageInfo const Default_ServiceOptions_Deprecated bool = false @@ -1610,22 +2059,43 @@ type MethodOptions struct { IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` proto.XXX_InternalExtensions `json:"-"` XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *MethodOptions) Reset() { *m = MethodOptions{} } -func (m *MethodOptions) String() string { return proto.CompactTextString(m) } -func (*MethodOptions) ProtoMessage() {} -func (*MethodOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } +func (m *MethodOptions) Reset() { *m = MethodOptions{} } +func (m *MethodOptions) String() string { return proto.CompactTextString(m) } +func (*MethodOptions) ProtoMessage() {} +func (*MethodOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{17} +} var extRange_MethodOptions = []proto.ExtensionRange{ - {1000, 536870911}, + {Start: 1000, End: 536870911}, } func (*MethodOptions) ExtensionRangeArray() []proto.ExtensionRange { return extRange_MethodOptions } +func (m *MethodOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MethodOptions.Unmarshal(m, b) +} +func (m *MethodOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MethodOptions.Marshal(b, m, deterministic) +} +func (dst *MethodOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_MethodOptions.Merge(dst, src) +} +func (m *MethodOptions) XXX_Size() int { + return xxx_messageInfo_MethodOptions.Size(m) +} +func (m *MethodOptions) XXX_DiscardUnknown() { + xxx_messageInfo_MethodOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_MethodOptions proto.InternalMessageInfo const Default_MethodOptions_Deprecated bool = false const Default_MethodOptions_IdempotencyLevel MethodOptions_IdempotencyLevel = MethodOptions_IDEMPOTENCY_UNKNOWN @@ -1661,19 +2131,40 @@ type UninterpretedOption struct { Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"` // The value of the uninterpreted option, in whatever type the tokenizer // identified it as during parsing. Exactly one of these should be set. - IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"` - PositiveIntValue *uint64 `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"` - NegativeIntValue *int64 `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"` - DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"` - StringValue []byte `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"` - AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"` - XXX_unrecognized []byte `json:"-"` + IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"` + PositiveIntValue *uint64 `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"` + NegativeIntValue *int64 `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"` + DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"` + StringValue []byte `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"` + AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *UninterpretedOption) Reset() { *m = UninterpretedOption{} } -func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) } -func (*UninterpretedOption) ProtoMessage() {} -func (*UninterpretedOption) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } +func (m *UninterpretedOption) Reset() { *m = UninterpretedOption{} } +func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) } +func (*UninterpretedOption) ProtoMessage() {} +func (*UninterpretedOption) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{18} +} +func (m *UninterpretedOption) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UninterpretedOption.Unmarshal(m, b) +} +func (m *UninterpretedOption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UninterpretedOption.Marshal(b, m, deterministic) +} +func (dst *UninterpretedOption) XXX_Merge(src proto.Message) { + xxx_messageInfo_UninterpretedOption.Merge(dst, src) +} +func (m *UninterpretedOption) XXX_Size() int { + return xxx_messageInfo_UninterpretedOption.Size(m) +} +func (m *UninterpretedOption) XXX_DiscardUnknown() { + xxx_messageInfo_UninterpretedOption.DiscardUnknown(m) +} + +var xxx_messageInfo_UninterpretedOption proto.InternalMessageInfo func (m *UninterpretedOption) GetName() []*UninterpretedOption_NamePart { if m != nil { @@ -1730,18 +2221,37 @@ func (m *UninterpretedOption) GetAggregateValue() string { // E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents // "foo.(bar.baz).qux". type UninterpretedOption_NamePart struct { - NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"` - IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"` - XXX_unrecognized []byte `json:"-"` + NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"` + IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *UninterpretedOption_NamePart) Reset() { *m = UninterpretedOption_NamePart{} } func (m *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(m) } func (*UninterpretedOption_NamePart) ProtoMessage() {} func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{18, 0} + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{18, 0} +} +func (m *UninterpretedOption_NamePart) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UninterpretedOption_NamePart.Unmarshal(m, b) +} +func (m *UninterpretedOption_NamePart) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UninterpretedOption_NamePart.Marshal(b, m, deterministic) +} +func (dst *UninterpretedOption_NamePart) XXX_Merge(src proto.Message) { + xxx_messageInfo_UninterpretedOption_NamePart.Merge(dst, src) +} +func (m *UninterpretedOption_NamePart) XXX_Size() int { + return xxx_messageInfo_UninterpretedOption_NamePart.Size(m) +} +func (m *UninterpretedOption_NamePart) XXX_DiscardUnknown() { + xxx_messageInfo_UninterpretedOption_NamePart.DiscardUnknown(m) } +var xxx_messageInfo_UninterpretedOption_NamePart proto.InternalMessageInfo + func (m *UninterpretedOption_NamePart) GetNamePart() string { if m != nil && m.NamePart != nil { return *m.NamePart @@ -1802,14 +2312,35 @@ type SourceCodeInfo struct { // - Code which tries to interpret locations should probably be designed to // ignore those that it doesn't understand, as more types of locations could // be recorded in the future. - Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` - XXX_unrecognized []byte `json:"-"` + Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SourceCodeInfo) Reset() { *m = SourceCodeInfo{} } +func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) } +func (*SourceCodeInfo) ProtoMessage() {} +func (*SourceCodeInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{19} +} +func (m *SourceCodeInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SourceCodeInfo.Unmarshal(m, b) +} +func (m *SourceCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SourceCodeInfo.Marshal(b, m, deterministic) +} +func (dst *SourceCodeInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceCodeInfo.Merge(dst, src) +} +func (m *SourceCodeInfo) XXX_Size() int { + return xxx_messageInfo_SourceCodeInfo.Size(m) +} +func (m *SourceCodeInfo) XXX_DiscardUnknown() { + xxx_messageInfo_SourceCodeInfo.DiscardUnknown(m) } -func (m *SourceCodeInfo) Reset() { *m = SourceCodeInfo{} } -func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) } -func (*SourceCodeInfo) ProtoMessage() {} -func (*SourceCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } +var xxx_messageInfo_SourceCodeInfo proto.InternalMessageInfo func (m *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { if m != nil { @@ -1899,13 +2430,34 @@ type SourceCodeInfo_Location struct { LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"` TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"` LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SourceCodeInfo_Location) Reset() { *m = SourceCodeInfo_Location{} } +func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) } +func (*SourceCodeInfo_Location) ProtoMessage() {} +func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{19, 0} +} +func (m *SourceCodeInfo_Location) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SourceCodeInfo_Location.Unmarshal(m, b) +} +func (m *SourceCodeInfo_Location) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SourceCodeInfo_Location.Marshal(b, m, deterministic) +} +func (dst *SourceCodeInfo_Location) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceCodeInfo_Location.Merge(dst, src) +} +func (m *SourceCodeInfo_Location) XXX_Size() int { + return xxx_messageInfo_SourceCodeInfo_Location.Size(m) +} +func (m *SourceCodeInfo_Location) XXX_DiscardUnknown() { + xxx_messageInfo_SourceCodeInfo_Location.DiscardUnknown(m) } -func (m *SourceCodeInfo_Location) Reset() { *m = SourceCodeInfo_Location{} } -func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) } -func (*SourceCodeInfo_Location) ProtoMessage() {} -func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19, 0} } +var xxx_messageInfo_SourceCodeInfo_Location proto.InternalMessageInfo func (m *SourceCodeInfo_Location) GetPath() []int32 { if m != nil { @@ -1948,14 +2500,35 @@ func (m *SourceCodeInfo_Location) GetLeadingDetachedComments() []string { type GeneratedCodeInfo struct { // An Annotation connects some span of text in generated code to an element // of its generating .proto file. - Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"` - XXX_unrecognized []byte `json:"-"` + Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *GeneratedCodeInfo) Reset() { *m = GeneratedCodeInfo{} } -func (m *GeneratedCodeInfo) String() string { return proto.CompactTextString(m) } -func (*GeneratedCodeInfo) ProtoMessage() {} -func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } +func (m *GeneratedCodeInfo) Reset() { *m = GeneratedCodeInfo{} } +func (m *GeneratedCodeInfo) String() string { return proto.CompactTextString(m) } +func (*GeneratedCodeInfo) ProtoMessage() {} +func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{20} +} +func (m *GeneratedCodeInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GeneratedCodeInfo.Unmarshal(m, b) +} +func (m *GeneratedCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GeneratedCodeInfo.Marshal(b, m, deterministic) +} +func (dst *GeneratedCodeInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_GeneratedCodeInfo.Merge(dst, src) +} +func (m *GeneratedCodeInfo) XXX_Size() int { + return xxx_messageInfo_GeneratedCodeInfo.Size(m) +} +func (m *GeneratedCodeInfo) XXX_DiscardUnknown() { + xxx_messageInfo_GeneratedCodeInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_GeneratedCodeInfo proto.InternalMessageInfo func (m *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation { if m != nil { @@ -1976,17 +2549,36 @@ type GeneratedCodeInfo_Annotation struct { // Identifies the ending offset in bytes in the generated code that // relates to the identified offset. The end offset should be one past // the last relevant byte (so the length of the text = end - begin). - End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` - XXX_unrecognized []byte `json:"-"` + End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *GeneratedCodeInfo_Annotation) Reset() { *m = GeneratedCodeInfo_Annotation{} } func (m *GeneratedCodeInfo_Annotation) String() string { return proto.CompactTextString(m) } func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{20, 0} + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{20, 0} +} +func (m *GeneratedCodeInfo_Annotation) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Unmarshal(m, b) +} +func (m *GeneratedCodeInfo_Annotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Marshal(b, m, deterministic) +} +func (dst *GeneratedCodeInfo_Annotation) XXX_Merge(src proto.Message) { + xxx_messageInfo_GeneratedCodeInfo_Annotation.Merge(dst, src) +} +func (m *GeneratedCodeInfo_Annotation) XXX_Size() int { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Size(m) +} +func (m *GeneratedCodeInfo_Annotation) XXX_DiscardUnknown() { + xxx_messageInfo_GeneratedCodeInfo_Annotation.DiscardUnknown(m) } +var xxx_messageInfo_GeneratedCodeInfo_Annotation proto.InternalMessageInfo + func (m *GeneratedCodeInfo_Annotation) GetPath() []int32 { if m != nil { return m.Path @@ -2025,6 +2617,7 @@ func init() { proto.RegisterType((*FieldDescriptorProto)(nil), "google.protobuf.FieldDescriptorProto") proto.RegisterType((*OneofDescriptorProto)(nil), "google.protobuf.OneofDescriptorProto") proto.RegisterType((*EnumDescriptorProto)(nil), "google.protobuf.EnumDescriptorProto") + proto.RegisterType((*EnumDescriptorProto_EnumReservedRange)(nil), "google.protobuf.EnumDescriptorProto.EnumReservedRange") proto.RegisterType((*EnumValueDescriptorProto)(nil), "google.protobuf.EnumValueDescriptorProto") proto.RegisterType((*ServiceDescriptorProto)(nil), "google.protobuf.ServiceDescriptorProto") proto.RegisterType((*MethodDescriptorProto)(nil), "google.protobuf.MethodDescriptorProto") @@ -2050,166 +2643,170 @@ func init() { proto.RegisterEnum("google.protobuf.MethodOptions_IdempotencyLevel", MethodOptions_IdempotencyLevel_name, MethodOptions_IdempotencyLevel_value) } -func init() { proto.RegisterFile("google/protobuf/descriptor.proto", fileDescriptor0) } +func init() { + proto.RegisterFile("google/protobuf/descriptor.proto", fileDescriptor_descriptor_4df4cb5f42392df6) +} -var fileDescriptor0 = []byte{ - // 2519 bytes of a gzipped FileDescriptorProto +var fileDescriptor_descriptor_4df4cb5f42392df6 = []byte{ + // 2555 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xdd, 0x6e, 0x1b, 0xc7, - 0x15, 0x0e, 0x7f, 0x45, 0x1e, 0x52, 0xd4, 0x68, 0xa4, 0xd8, 0x6b, 0xe5, 0xc7, 0x32, 0xf3, 0x63, - 0xd9, 0x69, 0xa8, 0x40, 0xb1, 0x1d, 0x47, 0x29, 0xd2, 0x52, 0xe4, 0x5a, 0xa1, 0x4a, 0x91, 0xec, - 0x92, 0x6a, 0x7e, 0x6e, 0x16, 0xa3, 0xdd, 0x21, 0xb9, 0xf6, 0x72, 0x77, 0xb3, 0xbb, 0xb4, 0xad, - 0xa0, 0x17, 0x06, 0x7a, 0x55, 0xa0, 0x0f, 0x50, 0x14, 0x45, 0x2f, 0x72, 0x13, 0xa0, 0x0f, 0x50, - 0x20, 0x77, 0x7d, 0x82, 0x02, 0x79, 0x83, 0xa2, 0x28, 0xd0, 0x3e, 0x46, 0x31, 0x33, 0xbb, 0xcb, - 0x5d, 0xfe, 0xc4, 0x6a, 0x80, 0x38, 0x57, 0xe4, 0x7c, 0xe7, 0x3b, 0x67, 0xce, 0x9c, 0x39, 0x33, - 0x73, 0x66, 0x16, 0x76, 0x47, 0xb6, 0x3d, 0x32, 0xe9, 0xbe, 0xe3, 0xda, 0xbe, 0x7d, 0x3e, 0x1d, - 0xee, 0xeb, 0xd4, 0xd3, 0x5c, 0xc3, 0xf1, 0x6d, 0xb7, 0xc6, 0x31, 0xbc, 0x21, 0x18, 0xb5, 0x90, - 0x51, 0x3d, 0x85, 0xcd, 0x07, 0x86, 0x49, 0x9b, 0x11, 0xb1, 0x4f, 0x7d, 0x7c, 0x1f, 0xb2, 0x43, - 0xc3, 0xa4, 0x52, 0x6a, 0x37, 0xb3, 0x57, 0x3a, 0x78, 0xb3, 0x36, 0xa7, 0x54, 0x4b, 0x6a, 0xf4, - 0x18, 0xac, 0x70, 0x8d, 0xea, 0xbf, 0xb3, 0xb0, 0xb5, 0x44, 0x8a, 0x31, 0x64, 0x2d, 0x32, 0x61, - 0x16, 0x53, 0x7b, 0x45, 0x85, 0xff, 0xc7, 0x12, 0xac, 0x39, 0x44, 0x7b, 0x44, 0x46, 0x54, 0x4a, - 0x73, 0x38, 0x6c, 0xe2, 0xd7, 0x01, 0x74, 0xea, 0x50, 0x4b, 0xa7, 0x96, 0x76, 0x21, 0x65, 0x76, - 0x33, 0x7b, 0x45, 0x25, 0x86, 0xe0, 0x77, 0x60, 0xd3, 0x99, 0x9e, 0x9b, 0x86, 0xa6, 0xc6, 0x68, - 0xb0, 0x9b, 0xd9, 0xcb, 0x29, 0x48, 0x08, 0x9a, 0x33, 0xf2, 0x4d, 0xd8, 0x78, 0x42, 0xc9, 0xa3, - 0x38, 0xb5, 0xc4, 0xa9, 0x15, 0x06, 0xc7, 0x88, 0x0d, 0x28, 0x4f, 0xa8, 0xe7, 0x91, 0x11, 0x55, - 0xfd, 0x0b, 0x87, 0x4a, 0x59, 0x3e, 0xfa, 0xdd, 0x85, 0xd1, 0xcf, 0x8f, 0xbc, 0x14, 0x68, 0x0d, - 0x2e, 0x1c, 0x8a, 0xeb, 0x50, 0xa4, 0xd6, 0x74, 0x22, 0x2c, 0xe4, 0x56, 0xc4, 0x4f, 0xb6, 0xa6, - 0x93, 0x79, 0x2b, 0x05, 0xa6, 0x16, 0x98, 0x58, 0xf3, 0xa8, 0xfb, 0xd8, 0xd0, 0xa8, 0x94, 0xe7, - 0x06, 0x6e, 0x2e, 0x18, 0xe8, 0x0b, 0xf9, 0xbc, 0x8d, 0x50, 0x0f, 0x37, 0xa0, 0x48, 0x9f, 0xfa, - 0xd4, 0xf2, 0x0c, 0xdb, 0x92, 0xd6, 0xb8, 0x91, 0xb7, 0x96, 0xcc, 0x22, 0x35, 0xf5, 0x79, 0x13, - 0x33, 0x3d, 0x7c, 0x0f, 0xd6, 0x6c, 0xc7, 0x37, 0x6c, 0xcb, 0x93, 0x0a, 0xbb, 0xa9, 0xbd, 0xd2, - 0xc1, 0xab, 0x4b, 0x13, 0xa1, 0x2b, 0x38, 0x4a, 0x48, 0xc6, 0x2d, 0x40, 0x9e, 0x3d, 0x75, 0x35, - 0xaa, 0x6a, 0xb6, 0x4e, 0x55, 0xc3, 0x1a, 0xda, 0x52, 0x91, 0x1b, 0xb8, 0xbe, 0x38, 0x10, 0x4e, - 0x6c, 0xd8, 0x3a, 0x6d, 0x59, 0x43, 0x5b, 0xa9, 0x78, 0x89, 0x36, 0xbe, 0x02, 0x79, 0xef, 0xc2, - 0xf2, 0xc9, 0x53, 0xa9, 0xcc, 0x33, 0x24, 0x68, 0x55, 0xbf, 0xcd, 0xc3, 0xc6, 0x65, 0x52, 0xec, - 0x23, 0xc8, 0x0d, 0xd9, 0x28, 0xa5, 0xf4, 0xff, 0x13, 0x03, 0xa1, 0x93, 0x0c, 0x62, 0xfe, 0x07, - 0x06, 0xb1, 0x0e, 0x25, 0x8b, 0x7a, 0x3e, 0xd5, 0x45, 0x46, 0x64, 0x2e, 0x99, 0x53, 0x20, 0x94, - 0x16, 0x53, 0x2a, 0xfb, 0x83, 0x52, 0xea, 0x33, 0xd8, 0x88, 0x5c, 0x52, 0x5d, 0x62, 0x8d, 0xc2, - 0xdc, 0xdc, 0x7f, 0x9e, 0x27, 0x35, 0x39, 0xd4, 0x53, 0x98, 0x9a, 0x52, 0xa1, 0x89, 0x36, 0x6e, - 0x02, 0xd8, 0x16, 0xb5, 0x87, 0xaa, 0x4e, 0x35, 0x53, 0x2a, 0xac, 0x88, 0x52, 0x97, 0x51, 0x16, - 0xa2, 0x64, 0x0b, 0x54, 0x33, 0xf1, 0x87, 0xb3, 0x54, 0x5b, 0x5b, 0x91, 0x29, 0xa7, 0x62, 0x91, - 0x2d, 0x64, 0xdb, 0x19, 0x54, 0x5c, 0xca, 0xf2, 0x9e, 0xea, 0xc1, 0xc8, 0x8a, 0xdc, 0x89, 0xda, - 0x73, 0x47, 0xa6, 0x04, 0x6a, 0x62, 0x60, 0xeb, 0x6e, 0xbc, 0x89, 0xdf, 0x80, 0x08, 0x50, 0x79, - 0x5a, 0x01, 0xdf, 0x85, 0xca, 0x21, 0xd8, 0x21, 0x13, 0xba, 0xf3, 0x15, 0x54, 0x92, 0xe1, 0xc1, - 0xdb, 0x90, 0xf3, 0x7c, 0xe2, 0xfa, 0x3c, 0x0b, 0x73, 0x8a, 0x68, 0x60, 0x04, 0x19, 0x6a, 0xe9, - 0x7c, 0x97, 0xcb, 0x29, 0xec, 0x2f, 0xfe, 0xe5, 0x6c, 0xc0, 0x19, 0x3e, 0xe0, 0xb7, 0x17, 0x67, - 0x34, 0x61, 0x79, 0x7e, 0xdc, 0x3b, 0x1f, 0xc0, 0x7a, 0x62, 0x00, 0x97, 0xed, 0xba, 0xfa, 0x5b, - 0x78, 0x79, 0xa9, 0x69, 0xfc, 0x19, 0x6c, 0x4f, 0x2d, 0xc3, 0xf2, 0xa9, 0xeb, 0xb8, 0x94, 0x65, - 0xac, 0xe8, 0x4a, 0xfa, 0xcf, 0xda, 0x8a, 0x9c, 0x3b, 0x8b, 0xb3, 0x85, 0x15, 0x65, 0x6b, 0xba, - 0x08, 0xde, 0x2e, 0x16, 0xfe, 0xbb, 0x86, 0x9e, 0x3d, 0x7b, 0xf6, 0x2c, 0x5d, 0xfd, 0x63, 0x1e, - 0xb6, 0x97, 0xad, 0x99, 0xa5, 0xcb, 0xf7, 0x0a, 0xe4, 0xad, 0xe9, 0xe4, 0x9c, 0xba, 0x3c, 0x48, - 0x39, 0x25, 0x68, 0xe1, 0x3a, 0xe4, 0x4c, 0x72, 0x4e, 0x4d, 0x29, 0xbb, 0x9b, 0xda, 0xab, 0x1c, - 0xbc, 0x73, 0xa9, 0x55, 0x59, 0x6b, 0x33, 0x15, 0x45, 0x68, 0xe2, 0x8f, 0x21, 0x1b, 0x6c, 0xd1, - 0xcc, 0xc2, 0xed, 0xcb, 0x59, 0x60, 0x6b, 0x49, 0xe1, 0x7a, 0xf8, 0x15, 0x28, 0xb2, 0x5f, 0x91, - 0x1b, 0x79, 0xee, 0x73, 0x81, 0x01, 0x2c, 0x2f, 0xf0, 0x0e, 0x14, 0xf8, 0x32, 0xd1, 0x69, 0x78, - 0xb4, 0x45, 0x6d, 0x96, 0x58, 0x3a, 0x1d, 0x92, 0xa9, 0xe9, 0xab, 0x8f, 0x89, 0x39, 0xa5, 0x3c, - 0xe1, 0x8b, 0x4a, 0x39, 0x00, 0x7f, 0xc3, 0x30, 0x7c, 0x1d, 0x4a, 0x62, 0x55, 0x19, 0x96, 0x4e, - 0x9f, 0xf2, 0xdd, 0x33, 0xa7, 0x88, 0x85, 0xd6, 0x62, 0x08, 0xeb, 0xfe, 0xa1, 0x67, 0x5b, 0x61, - 0x6a, 0xf2, 0x2e, 0x18, 0xc0, 0xbb, 0xff, 0x60, 0x7e, 0xe3, 0x7e, 0x6d, 0xf9, 0xf0, 0xe6, 0x73, - 0xaa, 0xfa, 0xb7, 0x34, 0x64, 0xf9, 0x7e, 0xb1, 0x01, 0xa5, 0xc1, 0xe7, 0x3d, 0x59, 0x6d, 0x76, - 0xcf, 0x8e, 0xda, 0x32, 0x4a, 0xe1, 0x0a, 0x00, 0x07, 0x1e, 0xb4, 0xbb, 0xf5, 0x01, 0x4a, 0x47, - 0xed, 0x56, 0x67, 0x70, 0xef, 0x0e, 0xca, 0x44, 0x0a, 0x67, 0x02, 0xc8, 0xc6, 0x09, 0xef, 0x1f, - 0xa0, 0x1c, 0x46, 0x50, 0x16, 0x06, 0x5a, 0x9f, 0xc9, 0xcd, 0x7b, 0x77, 0x50, 0x3e, 0x89, 0xbc, - 0x7f, 0x80, 0xd6, 0xf0, 0x3a, 0x14, 0x39, 0x72, 0xd4, 0xed, 0xb6, 0x51, 0x21, 0xb2, 0xd9, 0x1f, - 0x28, 0xad, 0xce, 0x31, 0x2a, 0x46, 0x36, 0x8f, 0x95, 0xee, 0x59, 0x0f, 0x41, 0x64, 0xe1, 0x54, - 0xee, 0xf7, 0xeb, 0xc7, 0x32, 0x2a, 0x45, 0x8c, 0xa3, 0xcf, 0x07, 0x72, 0x1f, 0x95, 0x13, 0x6e, - 0xbd, 0x7f, 0x80, 0xd6, 0xa3, 0x2e, 0xe4, 0xce, 0xd9, 0x29, 0xaa, 0xe0, 0x4d, 0x58, 0x17, 0x5d, - 0x84, 0x4e, 0x6c, 0xcc, 0x41, 0xf7, 0xee, 0x20, 0x34, 0x73, 0x44, 0x58, 0xd9, 0x4c, 0x00, 0xf7, - 0xee, 0x20, 0x5c, 0x6d, 0x40, 0x8e, 0x67, 0x17, 0xc6, 0x50, 0x69, 0xd7, 0x8f, 0xe4, 0xb6, 0xda, - 0xed, 0x0d, 0x5a, 0xdd, 0x4e, 0xbd, 0x8d, 0x52, 0x33, 0x4c, 0x91, 0x7f, 0x7d, 0xd6, 0x52, 0xe4, - 0x26, 0x4a, 0xc7, 0xb1, 0x9e, 0x5c, 0x1f, 0xc8, 0x4d, 0x94, 0xa9, 0x6a, 0xb0, 0xbd, 0x6c, 0x9f, - 0x5c, 0xba, 0x32, 0x62, 0x53, 0x9c, 0x5e, 0x31, 0xc5, 0xdc, 0xd6, 0xc2, 0x14, 0x7f, 0x9d, 0x82, - 0xad, 0x25, 0x67, 0xc5, 0xd2, 0x4e, 0x7e, 0x01, 0x39, 0x91, 0xa2, 0xe2, 0xf4, 0xbc, 0xb5, 0xf4, - 0xd0, 0xe1, 0x09, 0xbb, 0x70, 0x82, 0x72, 0xbd, 0x78, 0x05, 0x91, 0x59, 0x51, 0x41, 0x30, 0x13, - 0x0b, 0x4e, 0xfe, 0x2e, 0x05, 0xd2, 0x2a, 0xdb, 0xcf, 0xd9, 0x28, 0xd2, 0x89, 0x8d, 0xe2, 0xa3, - 0x79, 0x07, 0x6e, 0xac, 0x1e, 0xc3, 0x82, 0x17, 0xdf, 0xa4, 0xe0, 0xca, 0xf2, 0x42, 0x6b, 0xa9, - 0x0f, 0x1f, 0x43, 0x7e, 0x42, 0xfd, 0xb1, 0x1d, 0x16, 0x1b, 0x6f, 0x2f, 0x39, 0xc2, 0x98, 0x78, - 0x3e, 0x56, 0x81, 0x56, 0xfc, 0x0c, 0xcc, 0xac, 0xaa, 0x96, 0x84, 0x37, 0x0b, 0x9e, 0xfe, 0x3e, - 0x0d, 0x2f, 0x2f, 0x35, 0xbe, 0xd4, 0xd1, 0xd7, 0x00, 0x0c, 0xcb, 0x99, 0xfa, 0xa2, 0xa0, 0x10, - 0xfb, 0x53, 0x91, 0x23, 0x7c, 0xed, 0xb3, 0xbd, 0x67, 0xea, 0x47, 0xf2, 0x0c, 0x97, 0x83, 0x80, - 0x38, 0xe1, 0xfe, 0xcc, 0xd1, 0x2c, 0x77, 0xf4, 0xf5, 0x15, 0x23, 0x5d, 0x38, 0xab, 0xdf, 0x03, - 0xa4, 0x99, 0x06, 0xb5, 0x7c, 0xd5, 0xf3, 0x5d, 0x4a, 0x26, 0x86, 0x35, 0xe2, 0x1b, 0x70, 0xe1, - 0x30, 0x37, 0x24, 0xa6, 0x47, 0x95, 0x0d, 0x21, 0xee, 0x87, 0x52, 0xa6, 0xc1, 0xcf, 0x38, 0x37, - 0xa6, 0x91, 0x4f, 0x68, 0x08, 0x71, 0xa4, 0x51, 0xfd, 0xb6, 0x00, 0xa5, 0x58, 0x59, 0x8a, 0x6f, - 0x40, 0xf9, 0x21, 0x79, 0x4c, 0xd4, 0xf0, 0xaa, 0x21, 0x22, 0x51, 0x62, 0x58, 0x2f, 0xb8, 0x6e, - 0xbc, 0x07, 0xdb, 0x9c, 0x62, 0x4f, 0x7d, 0xea, 0xaa, 0x9a, 0x49, 0x3c, 0x8f, 0x07, 0xad, 0xc0, - 0xa9, 0x98, 0xc9, 0xba, 0x4c, 0xd4, 0x08, 0x25, 0xf8, 0x2e, 0x6c, 0x71, 0x8d, 0xc9, 0xd4, 0xf4, - 0x0d, 0xc7, 0xa4, 0x2a, 0xbb, 0xfc, 0x78, 0x7c, 0x23, 0x8e, 0x3c, 0xdb, 0x64, 0x8c, 0xd3, 0x80, - 0xc0, 0x3c, 0xf2, 0x70, 0x13, 0x5e, 0xe3, 0x6a, 0x23, 0x6a, 0x51, 0x97, 0xf8, 0x54, 0xa5, 0x5f, - 0x4e, 0x89, 0xe9, 0xa9, 0xc4, 0xd2, 0xd5, 0x31, 0xf1, 0xc6, 0xd2, 0x36, 0x33, 0x70, 0x94, 0x96, - 0x52, 0xca, 0x35, 0x46, 0x3c, 0x0e, 0x78, 0x32, 0xa7, 0xd5, 0x2d, 0xfd, 0x13, 0xe2, 0x8d, 0xf1, - 0x21, 0x5c, 0xe1, 0x56, 0x3c, 0xdf, 0x35, 0xac, 0x91, 0xaa, 0x8d, 0xa9, 0xf6, 0x48, 0x9d, 0xfa, - 0xc3, 0xfb, 0xd2, 0x2b, 0xf1, 0xfe, 0xb9, 0x87, 0x7d, 0xce, 0x69, 0x30, 0xca, 0x99, 0x3f, 0xbc, - 0x8f, 0xfb, 0x50, 0x66, 0x93, 0x31, 0x31, 0xbe, 0xa2, 0xea, 0xd0, 0x76, 0xf9, 0xc9, 0x52, 0x59, - 0xb2, 0xb2, 0x63, 0x11, 0xac, 0x75, 0x03, 0x85, 0x53, 0x5b, 0xa7, 0x87, 0xb9, 0x7e, 0x4f, 0x96, - 0x9b, 0x4a, 0x29, 0xb4, 0xf2, 0xc0, 0x76, 0x59, 0x42, 0x8d, 0xec, 0x28, 0xc0, 0x25, 0x91, 0x50, - 0x23, 0x3b, 0x0c, 0xef, 0x5d, 0xd8, 0xd2, 0x34, 0x31, 0x66, 0x43, 0x53, 0x83, 0x2b, 0x8a, 0x27, - 0xa1, 0x44, 0xb0, 0x34, 0xed, 0x58, 0x10, 0x82, 0x1c, 0xf7, 0xf0, 0x87, 0xf0, 0xf2, 0x2c, 0x58, - 0x71, 0xc5, 0xcd, 0x85, 0x51, 0xce, 0xab, 0xde, 0x85, 0x2d, 0xe7, 0x62, 0x51, 0x11, 0x27, 0x7a, - 0x74, 0x2e, 0xe6, 0xd5, 0x3e, 0x80, 0x6d, 0x67, 0xec, 0x2c, 0xea, 0xdd, 0x8e, 0xeb, 0x61, 0x67, - 0xec, 0xcc, 0x2b, 0xbe, 0xc5, 0xef, 0xab, 0x2e, 0xd5, 0x88, 0x4f, 0x75, 0xe9, 0x6a, 0x9c, 0x1e, - 0x13, 0xe0, 0x7d, 0x40, 0x9a, 0xa6, 0x52, 0x8b, 0x9c, 0x9b, 0x54, 0x25, 0x2e, 0xb5, 0x88, 0x27, - 0x5d, 0x8f, 0x93, 0x2b, 0x9a, 0x26, 0x73, 0x69, 0x9d, 0x0b, 0xf1, 0x6d, 0xd8, 0xb4, 0xcf, 0x1f, - 0x6a, 0x22, 0x25, 0x55, 0xc7, 0xa5, 0x43, 0xe3, 0xa9, 0xf4, 0x26, 0x8f, 0xef, 0x06, 0x13, 0xf0, - 0x84, 0xec, 0x71, 0x18, 0xdf, 0x02, 0xa4, 0x79, 0x63, 0xe2, 0x3a, 0xbc, 0x26, 0xf0, 0x1c, 0xa2, - 0x51, 0xe9, 0x2d, 0x41, 0x15, 0x78, 0x27, 0x84, 0xd9, 0x92, 0xf0, 0x9e, 0x18, 0x43, 0x3f, 0xb4, - 0x78, 0x53, 0x2c, 0x09, 0x8e, 0x05, 0xd6, 0xf6, 0x00, 0xb1, 0x50, 0x24, 0x3a, 0xde, 0xe3, 0xb4, - 0x8a, 0x33, 0x76, 0xe2, 0xfd, 0xbe, 0x01, 0xeb, 0x8c, 0x39, 0xeb, 0xf4, 0x96, 0xa8, 0x67, 0x9c, - 0x71, 0xac, 0xc7, 0x1f, 0xad, 0xb4, 0xac, 0x1e, 0x42, 0x39, 0x9e, 0x9f, 0xb8, 0x08, 0x22, 0x43, - 0x51, 0x8a, 0x9d, 0xf5, 0x8d, 0x6e, 0x93, 0x9d, 0xd2, 0x5f, 0xc8, 0x28, 0xcd, 0xaa, 0x85, 0x76, - 0x6b, 0x20, 0xab, 0xca, 0x59, 0x67, 0xd0, 0x3a, 0x95, 0x51, 0x26, 0x56, 0x96, 0x9e, 0x64, 0x0b, - 0x6f, 0xa3, 0x9b, 0xd5, 0xef, 0xd2, 0x50, 0x49, 0xde, 0x33, 0xf0, 0xcf, 0xe1, 0x6a, 0xf8, 0x28, - 0xe0, 0x51, 0x5f, 0x7d, 0x62, 0xb8, 0x7c, 0xe1, 0x4c, 0x88, 0xa8, 0xb3, 0xa3, 0xa9, 0xdb, 0x0e, - 0x58, 0x7d, 0xea, 0x7f, 0x6a, 0xb8, 0x6c, 0x59, 0x4c, 0x88, 0x8f, 0xdb, 0x70, 0xdd, 0xb2, 0x55, - 0xcf, 0x27, 0x96, 0x4e, 0x5c, 0x5d, 0x9d, 0x3d, 0xc7, 0xa8, 0x44, 0xd3, 0xa8, 0xe7, 0xd9, 0xe2, - 0xc0, 0x8a, 0xac, 0xbc, 0x6a, 0xd9, 0xfd, 0x80, 0x3c, 0xdb, 0xc9, 0xeb, 0x01, 0x75, 0x2e, 0xcd, - 0x32, 0xab, 0xd2, 0xec, 0x15, 0x28, 0x4e, 0x88, 0xa3, 0x52, 0xcb, 0x77, 0x2f, 0x78, 0x75, 0x59, - 0x50, 0x0a, 0x13, 0xe2, 0xc8, 0xac, 0xfd, 0x42, 0x8a, 0xfc, 0x93, 0x6c, 0xa1, 0x80, 0x8a, 0x27, - 0xd9, 0x42, 0x11, 0x41, 0xf5, 0x5f, 0x19, 0x28, 0xc7, 0xab, 0x4d, 0x56, 0xbc, 0x6b, 0xfc, 0x64, - 0x49, 0xf1, 0xbd, 0xe7, 0x8d, 0xef, 0xad, 0x4d, 0x6b, 0x0d, 0x76, 0xe4, 0x1c, 0xe6, 0x45, 0x0d, - 0xa8, 0x08, 0x4d, 0x76, 0xdc, 0xb3, 0xdd, 0x86, 0x8a, 0x7b, 0x4d, 0x41, 0x09, 0x5a, 0xf8, 0x18, - 0xf2, 0x0f, 0x3d, 0x6e, 0x3b, 0xcf, 0x6d, 0xbf, 0xf9, 0xfd, 0xb6, 0x4f, 0xfa, 0xdc, 0x78, 0xf1, - 0xa4, 0xaf, 0x76, 0xba, 0xca, 0x69, 0xbd, 0xad, 0x04, 0xea, 0xf8, 0x1a, 0x64, 0x4d, 0xf2, 0xd5, - 0x45, 0xf2, 0x70, 0xe2, 0xd0, 0x65, 0x27, 0xe1, 0x1a, 0x64, 0x9f, 0x50, 0xf2, 0x28, 0x79, 0x24, - 0x70, 0xe8, 0x47, 0x5c, 0x0c, 0xfb, 0x90, 0xe3, 0xf1, 0xc2, 0x00, 0x41, 0xc4, 0xd0, 0x4b, 0xb8, - 0x00, 0xd9, 0x46, 0x57, 0x61, 0x0b, 0x02, 0x41, 0x59, 0xa0, 0x6a, 0xaf, 0x25, 0x37, 0x64, 0x94, - 0xae, 0xde, 0x85, 0xbc, 0x08, 0x02, 0x5b, 0x2c, 0x51, 0x18, 0xd0, 0x4b, 0x41, 0x33, 0xb0, 0x91, - 0x0a, 0xa5, 0x67, 0xa7, 0x47, 0xb2, 0x82, 0xd2, 0xc9, 0xa9, 0xce, 0xa2, 0x5c, 0xd5, 0x83, 0x72, - 0xbc, 0xdc, 0x7c, 0x31, 0x57, 0xc9, 0xbf, 0xa7, 0xa0, 0x14, 0x2b, 0x1f, 0x59, 0xe1, 0x42, 0x4c, - 0xd3, 0x7e, 0xa2, 0x12, 0xd3, 0x20, 0x5e, 0x90, 0x1a, 0xc0, 0xa1, 0x3a, 0x43, 0x2e, 0x3b, 0x75, - 0x2f, 0x68, 0x89, 0xe4, 0x50, 0xbe, 0xfa, 0x97, 0x14, 0xa0, 0xf9, 0x02, 0x74, 0xce, 0xcd, 0xd4, - 0x4f, 0xe9, 0x66, 0xf5, 0xcf, 0x29, 0xa8, 0x24, 0xab, 0xce, 0x39, 0xf7, 0x6e, 0xfc, 0xa4, 0xee, - 0xfd, 0x33, 0x0d, 0xeb, 0x89, 0x5a, 0xf3, 0xb2, 0xde, 0x7d, 0x09, 0x9b, 0x86, 0x4e, 0x27, 0x8e, - 0xed, 0x53, 0x4b, 0xbb, 0x50, 0x4d, 0xfa, 0x98, 0x9a, 0x52, 0x95, 0x6f, 0x1a, 0xfb, 0xdf, 0x5f, - 0xcd, 0xd6, 0x5a, 0x33, 0xbd, 0x36, 0x53, 0x3b, 0xdc, 0x6a, 0x35, 0xe5, 0xd3, 0x5e, 0x77, 0x20, - 0x77, 0x1a, 0x9f, 0xab, 0x67, 0x9d, 0x5f, 0x75, 0xba, 0x9f, 0x76, 0x14, 0x64, 0xcc, 0xd1, 0x7e, - 0xc4, 0x65, 0xdf, 0x03, 0x34, 0xef, 0x14, 0xbe, 0x0a, 0xcb, 0xdc, 0x42, 0x2f, 0xe1, 0x2d, 0xd8, - 0xe8, 0x74, 0xd5, 0x7e, 0xab, 0x29, 0xab, 0xf2, 0x83, 0x07, 0x72, 0x63, 0xd0, 0x17, 0xd7, 0xfb, - 0x88, 0x3d, 0x48, 0x2c, 0xf0, 0xea, 0x9f, 0x32, 0xb0, 0xb5, 0xc4, 0x13, 0x5c, 0x0f, 0x6e, 0x16, - 0xe2, 0xb2, 0xf3, 0xee, 0x65, 0xbc, 0xaf, 0xb1, 0x82, 0xa0, 0x47, 0x5c, 0x3f, 0xb8, 0x88, 0xdc, - 0x02, 0x16, 0x25, 0xcb, 0x37, 0x86, 0x06, 0x75, 0x83, 0xd7, 0x10, 0x71, 0xdd, 0xd8, 0x98, 0xe1, - 0xe2, 0x41, 0xe4, 0x67, 0x80, 0x1d, 0xdb, 0x33, 0x7c, 0xe3, 0x31, 0x55, 0x0d, 0x2b, 0x7c, 0x3a, - 0x61, 0xd7, 0x8f, 0xac, 0x82, 0x42, 0x49, 0xcb, 0xf2, 0x23, 0xb6, 0x45, 0x47, 0x64, 0x8e, 0xcd, - 0x36, 0xf3, 0x8c, 0x82, 0x42, 0x49, 0xc4, 0xbe, 0x01, 0x65, 0xdd, 0x9e, 0xb2, 0x9a, 0x4c, 0xf0, - 0xd8, 0xd9, 0x91, 0x52, 0x4a, 0x02, 0x8b, 0x28, 0x41, 0xb5, 0x3d, 0x7b, 0xb3, 0x29, 0x2b, 0x25, - 0x81, 0x09, 0xca, 0x4d, 0xd8, 0x20, 0xa3, 0x91, 0xcb, 0x8c, 0x87, 0x86, 0xc4, 0xfd, 0xa1, 0x12, - 0xc1, 0x9c, 0xb8, 0x73, 0x02, 0x85, 0x30, 0x0e, 0xec, 0xa8, 0x66, 0x91, 0x50, 0x1d, 0xf1, 0x6e, - 0x97, 0xde, 0x2b, 0x2a, 0x05, 0x2b, 0x14, 0xde, 0x80, 0xb2, 0xe1, 0xa9, 0xb3, 0x27, 0xe8, 0xf4, - 0x6e, 0x7a, 0xaf, 0xa0, 0x94, 0x0c, 0x2f, 0x7a, 0xbe, 0xab, 0x7e, 0x93, 0x86, 0x4a, 0xf2, 0x09, - 0x1d, 0x37, 0xa1, 0x60, 0xda, 0x1a, 0xe1, 0xa9, 0x25, 0xbe, 0xdf, 0xec, 0x3d, 0xe7, 0xd5, 0xbd, - 0xd6, 0x0e, 0xf8, 0x4a, 0xa4, 0xb9, 0xf3, 0x8f, 0x14, 0x14, 0x42, 0x18, 0x5f, 0x81, 0xac, 0x43, - 0xfc, 0x31, 0x37, 0x97, 0x3b, 0x4a, 0xa3, 0x94, 0xc2, 0xdb, 0x0c, 0xf7, 0x1c, 0x62, 0xf1, 0x14, - 0x08, 0x70, 0xd6, 0x66, 0xf3, 0x6a, 0x52, 0xa2, 0xf3, 0xcb, 0x89, 0x3d, 0x99, 0x50, 0xcb, 0xf7, - 0xc2, 0x79, 0x0d, 0xf0, 0x46, 0x00, 0xe3, 0x77, 0x60, 0xd3, 0x77, 0x89, 0x61, 0x26, 0xb8, 0x59, - 0xce, 0x45, 0xa1, 0x20, 0x22, 0x1f, 0xc2, 0xb5, 0xd0, 0xae, 0x4e, 0x7d, 0xa2, 0x8d, 0xa9, 0x3e, - 0x53, 0xca, 0xf3, 0xf7, 0xd9, 0xab, 0x01, 0xa1, 0x19, 0xc8, 0x43, 0xdd, 0xea, 0x77, 0x29, 0xd8, - 0x0c, 0xaf, 0x53, 0x7a, 0x14, 0xac, 0x53, 0x00, 0x62, 0x59, 0xb6, 0x1f, 0x0f, 0xd7, 0x62, 0x2a, - 0x2f, 0xe8, 0xd5, 0xea, 0x91, 0x92, 0x12, 0x33, 0xb0, 0x33, 0x01, 0x98, 0x49, 0x56, 0x86, 0xed, - 0x3a, 0x94, 0x82, 0xef, 0x23, 0xfc, 0x23, 0x9b, 0xb8, 0x80, 0x83, 0x80, 0xd8, 0xbd, 0x0b, 0x6f, - 0x43, 0xee, 0x9c, 0x8e, 0x0c, 0x2b, 0x78, 0xf5, 0x14, 0x8d, 0xf0, 0x25, 0x37, 0x1b, 0xbd, 0xe4, - 0x1e, 0xfd, 0x21, 0x05, 0x5b, 0x9a, 0x3d, 0x99, 0xf7, 0xf7, 0x08, 0xcd, 0xbd, 0x02, 0x78, 0x9f, - 0xa4, 0xbe, 0xf8, 0x78, 0x64, 0xf8, 0xe3, 0xe9, 0x79, 0x4d, 0xb3, 0x27, 0xfb, 0x23, 0xdb, 0x24, - 0xd6, 0x68, 0xf6, 0x95, 0x90, 0xff, 0xd1, 0xde, 0x1d, 0x51, 0xeb, 0xdd, 0x91, 0x1d, 0xfb, 0x66, - 0xf8, 0xd1, 0xec, 0xef, 0xd7, 0xe9, 0xcc, 0x71, 0xef, 0xe8, 0xaf, 0xe9, 0x9d, 0x63, 0xd1, 0x57, - 0x2f, 0x8c, 0x8d, 0x42, 0x87, 0x26, 0xd5, 0xd8, 0x78, 0xff, 0x17, 0x00, 0x00, 0xff, 0xff, 0x0c, - 0xab, 0xb6, 0x37, 0x7e, 0x1c, 0x00, 0x00, + 0xf5, 0xcf, 0xf2, 0x4b, 0xe4, 0x21, 0x45, 0x8d, 0x46, 0x8a, 0xbd, 0x56, 0x3e, 0x2c, 0x33, 0x1f, + 0x96, 0x9d, 0x7f, 0xa8, 0xc0, 0xb1, 0x1d, 0x47, 0xfe, 0x23, 0x2d, 0x45, 0xae, 0x15, 0xaa, 0x12, + 0xc9, 0x2e, 0xa9, 0xe6, 0x03, 0x28, 0x16, 0xa3, 0xdd, 0x21, 0xb9, 0xf6, 0x72, 0x77, 0xb3, 0xbb, + 0xb4, 0xad, 0xa0, 0x17, 0x06, 0x7a, 0xd5, 0xab, 0xde, 0x16, 0x45, 0xd1, 0x8b, 0xde, 0x04, 0xe8, + 0x03, 0x14, 0xc8, 0x5d, 0x9f, 0xa0, 0x40, 0xde, 0xa0, 0x68, 0x0b, 0xb4, 0x8f, 0xd0, 0xcb, 0x62, + 0x66, 0x76, 0x97, 0xbb, 0x24, 0x15, 0x2b, 0x01, 0xe2, 0x5c, 0x91, 0xf3, 0x9b, 0xdf, 0x39, 0x73, + 0xe6, 0xcc, 0x99, 0x33, 0x67, 0x66, 0x61, 0x7b, 0xe4, 0x38, 0x23, 0x8b, 0xee, 0xba, 0x9e, 0x13, + 0x38, 0xa7, 0xd3, 0xe1, 0xae, 0x41, 0x7d, 0xdd, 0x33, 0xdd, 0xc0, 0xf1, 0xea, 0x1c, 0xc3, 0x6b, + 0x82, 0x51, 0x8f, 0x18, 0xb5, 0x63, 0x58, 0x7f, 0x60, 0x5a, 0xb4, 0x15, 0x13, 0xfb, 0x34, 0xc0, + 0xf7, 0x20, 0x37, 0x34, 0x2d, 0x2a, 0x4b, 0xdb, 0xd9, 0x9d, 0xf2, 0xad, 0x37, 0xeb, 0x73, 0x42, + 0xf5, 0xb4, 0x44, 0x8f, 0xc1, 0x2a, 0x97, 0xa8, 0xfd, 0x2b, 0x07, 0x1b, 0x4b, 0x7a, 0x31, 0x86, + 0x9c, 0x4d, 0x26, 0x4c, 0xa3, 0xb4, 0x53, 0x52, 0xf9, 0x7f, 0x2c, 0xc3, 0x8a, 0x4b, 0xf4, 0x47, + 0x64, 0x44, 0xe5, 0x0c, 0x87, 0xa3, 0x26, 0x7e, 0x1d, 0xc0, 0xa0, 0x2e, 0xb5, 0x0d, 0x6a, 0xeb, + 0x67, 0x72, 0x76, 0x3b, 0xbb, 0x53, 0x52, 0x13, 0x08, 0x7e, 0x07, 0xd6, 0xdd, 0xe9, 0xa9, 0x65, + 0xea, 0x5a, 0x82, 0x06, 0xdb, 0xd9, 0x9d, 0xbc, 0x8a, 0x44, 0x47, 0x6b, 0x46, 0xbe, 0x0e, 0x6b, + 0x4f, 0x28, 0x79, 0x94, 0xa4, 0x96, 0x39, 0xb5, 0xca, 0xe0, 0x04, 0xb1, 0x09, 0x95, 0x09, 0xf5, + 0x7d, 0x32, 0xa2, 0x5a, 0x70, 0xe6, 0x52, 0x39, 0xc7, 0x67, 0xbf, 0xbd, 0x30, 0xfb, 0xf9, 0x99, + 0x97, 0x43, 0xa9, 0xc1, 0x99, 0x4b, 0x71, 0x03, 0x4a, 0xd4, 0x9e, 0x4e, 0x84, 0x86, 0xfc, 0x39, + 0xfe, 0x53, 0xec, 0xe9, 0x64, 0x5e, 0x4b, 0x91, 0x89, 0x85, 0x2a, 0x56, 0x7c, 0xea, 0x3d, 0x36, + 0x75, 0x2a, 0x17, 0xb8, 0x82, 0xeb, 0x0b, 0x0a, 0xfa, 0xa2, 0x7f, 0x5e, 0x47, 0x24, 0x87, 0x9b, + 0x50, 0xa2, 0x4f, 0x03, 0x6a, 0xfb, 0xa6, 0x63, 0xcb, 0x2b, 0x5c, 0xc9, 0x5b, 0x4b, 0x56, 0x91, + 0x5a, 0xc6, 0xbc, 0x8a, 0x99, 0x1c, 0xbe, 0x0b, 0x2b, 0x8e, 0x1b, 0x98, 0x8e, 0xed, 0xcb, 0xc5, + 0x6d, 0x69, 0xa7, 0x7c, 0xeb, 0xd5, 0xa5, 0x81, 0xd0, 0x15, 0x1c, 0x35, 0x22, 0xe3, 0x36, 0x20, + 0xdf, 0x99, 0x7a, 0x3a, 0xd5, 0x74, 0xc7, 0xa0, 0x9a, 0x69, 0x0f, 0x1d, 0xb9, 0xc4, 0x15, 0x5c, + 0x5d, 0x9c, 0x08, 0x27, 0x36, 0x1d, 0x83, 0xb6, 0xed, 0xa1, 0xa3, 0x56, 0xfd, 0x54, 0x1b, 0x5f, + 0x82, 0x82, 0x7f, 0x66, 0x07, 0xe4, 0xa9, 0x5c, 0xe1, 0x11, 0x12, 0xb6, 0x6a, 0x5f, 0x17, 0x60, + 0xed, 0x22, 0x21, 0x76, 0x1f, 0xf2, 0x43, 0x36, 0x4b, 0x39, 0xf3, 0x5d, 0x7c, 0x20, 0x64, 0xd2, + 0x4e, 0x2c, 0x7c, 0x4f, 0x27, 0x36, 0xa0, 0x6c, 0x53, 0x3f, 0xa0, 0x86, 0x88, 0x88, 0xec, 0x05, + 0x63, 0x0a, 0x84, 0xd0, 0x62, 0x48, 0xe5, 0xbe, 0x57, 0x48, 0x7d, 0x0a, 0x6b, 0xb1, 0x49, 0x9a, + 0x47, 0xec, 0x51, 0x14, 0x9b, 0xbb, 0xcf, 0xb3, 0xa4, 0xae, 0x44, 0x72, 0x2a, 0x13, 0x53, 0xab, + 0x34, 0xd5, 0xc6, 0x2d, 0x00, 0xc7, 0xa6, 0xce, 0x50, 0x33, 0xa8, 0x6e, 0xc9, 0xc5, 0x73, 0xbc, + 0xd4, 0x65, 0x94, 0x05, 0x2f, 0x39, 0x02, 0xd5, 0x2d, 0xfc, 0xe1, 0x2c, 0xd4, 0x56, 0xce, 0x89, + 0x94, 0x63, 0xb1, 0xc9, 0x16, 0xa2, 0xed, 0x04, 0xaa, 0x1e, 0x65, 0x71, 0x4f, 0x8d, 0x70, 0x66, + 0x25, 0x6e, 0x44, 0xfd, 0xb9, 0x33, 0x53, 0x43, 0x31, 0x31, 0xb1, 0x55, 0x2f, 0xd9, 0xc4, 0x6f, + 0x40, 0x0c, 0x68, 0x3c, 0xac, 0x80, 0x67, 0xa1, 0x4a, 0x04, 0x76, 0xc8, 0x84, 0x6e, 0x7d, 0x09, + 0xd5, 0xb4, 0x7b, 0xf0, 0x26, 0xe4, 0xfd, 0x80, 0x78, 0x01, 0x8f, 0xc2, 0xbc, 0x2a, 0x1a, 0x18, + 0x41, 0x96, 0xda, 0x06, 0xcf, 0x72, 0x79, 0x95, 0xfd, 0xc5, 0x3f, 0x9d, 0x4d, 0x38, 0xcb, 0x27, + 0xfc, 0xf6, 0xe2, 0x8a, 0xa6, 0x34, 0xcf, 0xcf, 0x7b, 0xeb, 0x03, 0x58, 0x4d, 0x4d, 0xe0, 0xa2, + 0x43, 0xd7, 0x7e, 0x05, 0x2f, 0x2f, 0x55, 0x8d, 0x3f, 0x85, 0xcd, 0xa9, 0x6d, 0xda, 0x01, 0xf5, + 0x5c, 0x8f, 0xb2, 0x88, 0x15, 0x43, 0xc9, 0xff, 0x5e, 0x39, 0x27, 0xe6, 0x4e, 0x92, 0x6c, 0xa1, + 0x45, 0xdd, 0x98, 0x2e, 0x82, 0x37, 0x4b, 0xc5, 0xff, 0xac, 0xa0, 0x67, 0xcf, 0x9e, 0x3d, 0xcb, + 0xd4, 0x7e, 0x57, 0x80, 0xcd, 0x65, 0x7b, 0x66, 0xe9, 0xf6, 0xbd, 0x04, 0x05, 0x7b, 0x3a, 0x39, + 0xa5, 0x1e, 0x77, 0x52, 0x5e, 0x0d, 0x5b, 0xb8, 0x01, 0x79, 0x8b, 0x9c, 0x52, 0x4b, 0xce, 0x6d, + 0x4b, 0x3b, 0xd5, 0x5b, 0xef, 0x5c, 0x68, 0x57, 0xd6, 0x8f, 0x98, 0x88, 0x2a, 0x24, 0xf1, 0x47, + 0x90, 0x0b, 0x53, 0x34, 0xd3, 0x70, 0xf3, 0x62, 0x1a, 0xd8, 0x5e, 0x52, 0xb9, 0x1c, 0x7e, 0x05, + 0x4a, 0xec, 0x57, 0xc4, 0x46, 0x81, 0xdb, 0x5c, 0x64, 0x00, 0x8b, 0x0b, 0xbc, 0x05, 0x45, 0xbe, + 0x4d, 0x0c, 0x1a, 0x1d, 0x6d, 0x71, 0x9b, 0x05, 0x96, 0x41, 0x87, 0x64, 0x6a, 0x05, 0xda, 0x63, + 0x62, 0x4d, 0x29, 0x0f, 0xf8, 0x92, 0x5a, 0x09, 0xc1, 0x5f, 0x30, 0x0c, 0x5f, 0x85, 0xb2, 0xd8, + 0x55, 0xa6, 0x6d, 0xd0, 0xa7, 0x3c, 0x7b, 0xe6, 0x55, 0xb1, 0xd1, 0xda, 0x0c, 0x61, 0xc3, 0x3f, + 0xf4, 0x1d, 0x3b, 0x0a, 0x4d, 0x3e, 0x04, 0x03, 0xf8, 0xf0, 0x1f, 0xcc, 0x27, 0xee, 0xd7, 0x96, + 0x4f, 0x6f, 0x3e, 0xa6, 0x6a, 0x7f, 0xc9, 0x40, 0x8e, 0xe7, 0x8b, 0x35, 0x28, 0x0f, 0x3e, 0xeb, + 0x29, 0x5a, 0xab, 0x7b, 0xb2, 0x7f, 0xa4, 0x20, 0x09, 0x57, 0x01, 0x38, 0xf0, 0xe0, 0xa8, 0xdb, + 0x18, 0xa0, 0x4c, 0xdc, 0x6e, 0x77, 0x06, 0x77, 0x6f, 0xa3, 0x6c, 0x2c, 0x70, 0x22, 0x80, 0x5c, + 0x92, 0xf0, 0xfe, 0x2d, 0x94, 0xc7, 0x08, 0x2a, 0x42, 0x41, 0xfb, 0x53, 0xa5, 0x75, 0xf7, 0x36, + 0x2a, 0xa4, 0x91, 0xf7, 0x6f, 0xa1, 0x15, 0xbc, 0x0a, 0x25, 0x8e, 0xec, 0x77, 0xbb, 0x47, 0xa8, + 0x18, 0xeb, 0xec, 0x0f, 0xd4, 0x76, 0xe7, 0x00, 0x95, 0x62, 0x9d, 0x07, 0x6a, 0xf7, 0xa4, 0x87, + 0x20, 0xd6, 0x70, 0xac, 0xf4, 0xfb, 0x8d, 0x03, 0x05, 0x95, 0x63, 0xc6, 0xfe, 0x67, 0x03, 0xa5, + 0x8f, 0x2a, 0x29, 0xb3, 0xde, 0xbf, 0x85, 0x56, 0xe3, 0x21, 0x94, 0xce, 0xc9, 0x31, 0xaa, 0xe2, + 0x75, 0x58, 0x15, 0x43, 0x44, 0x46, 0xac, 0xcd, 0x41, 0x77, 0x6f, 0x23, 0x34, 0x33, 0x44, 0x68, + 0x59, 0x4f, 0x01, 0x77, 0x6f, 0x23, 0x5c, 0x6b, 0x42, 0x9e, 0x47, 0x17, 0xc6, 0x50, 0x3d, 0x6a, + 0xec, 0x2b, 0x47, 0x5a, 0xb7, 0x37, 0x68, 0x77, 0x3b, 0x8d, 0x23, 0x24, 0xcd, 0x30, 0x55, 0xf9, + 0xf9, 0x49, 0x5b, 0x55, 0x5a, 0x28, 0x93, 0xc4, 0x7a, 0x4a, 0x63, 0xa0, 0xb4, 0x50, 0xb6, 0xa6, + 0xc3, 0xe6, 0xb2, 0x3c, 0xb9, 0x74, 0x67, 0x24, 0x96, 0x38, 0x73, 0xce, 0x12, 0x73, 0x5d, 0x0b, + 0x4b, 0xfc, 0xcf, 0x0c, 0x6c, 0x2c, 0x39, 0x2b, 0x96, 0x0e, 0xf2, 0x13, 0xc8, 0x8b, 0x10, 0x15, + 0xa7, 0xe7, 0x8d, 0xa5, 0x87, 0x0e, 0x0f, 0xd8, 0x85, 0x13, 0x94, 0xcb, 0x25, 0x2b, 0x88, 0xec, + 0x39, 0x15, 0x04, 0x53, 0xb1, 0x90, 0xd3, 0x7f, 0xb9, 0x90, 0xd3, 0xc5, 0xb1, 0x77, 0xf7, 0x22, + 0xc7, 0x1e, 0xc7, 0xbe, 0x5b, 0x6e, 0xcf, 0x2f, 0xc9, 0xed, 0xf7, 0x61, 0x7d, 0x41, 0xd1, 0x85, + 0x73, 0xec, 0xaf, 0x25, 0x90, 0xcf, 0x73, 0xce, 0x73, 0x32, 0x5d, 0x26, 0x95, 0xe9, 0xee, 0xcf, + 0x7b, 0xf0, 0xda, 0xf9, 0x8b, 0xb0, 0xb0, 0xd6, 0x5f, 0x49, 0x70, 0x69, 0x79, 0xa5, 0xb8, 0xd4, + 0x86, 0x8f, 0xa0, 0x30, 0xa1, 0xc1, 0xd8, 0x89, 0xaa, 0xa5, 0xb7, 0x97, 0x9c, 0xc1, 0xac, 0x7b, + 0x7e, 0xb1, 0x43, 0xa9, 0xe4, 0x21, 0x9e, 0x3d, 0xaf, 0xdc, 0x13, 0xd6, 0x2c, 0x58, 0xfa, 0x9b, + 0x0c, 0xbc, 0xbc, 0x54, 0xf9, 0x52, 0x43, 0x5f, 0x03, 0x30, 0x6d, 0x77, 0x1a, 0x88, 0x8a, 0x48, + 0x24, 0xd8, 0x12, 0x47, 0x78, 0xf2, 0x62, 0xc9, 0x73, 0x1a, 0xc4, 0xfd, 0x59, 0xde, 0x0f, 0x02, + 0xe2, 0x84, 0x7b, 0x33, 0x43, 0x73, 0xdc, 0xd0, 0xd7, 0xcf, 0x99, 0xe9, 0x42, 0x60, 0xbe, 0x07, + 0x48, 0xb7, 0x4c, 0x6a, 0x07, 0x9a, 0x1f, 0x78, 0x94, 0x4c, 0x4c, 0x7b, 0xc4, 0x4f, 0x90, 0xe2, + 0x5e, 0x7e, 0x48, 0x2c, 0x9f, 0xaa, 0x6b, 0xa2, 0xbb, 0x1f, 0xf5, 0x32, 0x09, 0x1e, 0x40, 0x5e, + 0x42, 0xa2, 0x90, 0x92, 0x10, 0xdd, 0xb1, 0x44, 0xed, 0xeb, 0x22, 0x94, 0x13, 0x75, 0x35, 0xbe, + 0x06, 0x95, 0x87, 0xe4, 0x31, 0xd1, 0xa2, 0xbb, 0x92, 0xf0, 0x44, 0x99, 0x61, 0xbd, 0xf0, 0xbe, + 0xf4, 0x1e, 0x6c, 0x72, 0x8a, 0x33, 0x0d, 0xa8, 0xa7, 0xe9, 0x16, 0xf1, 0x7d, 0xee, 0xb4, 0x22, + 0xa7, 0x62, 0xd6, 0xd7, 0x65, 0x5d, 0xcd, 0xa8, 0x07, 0xdf, 0x81, 0x0d, 0x2e, 0x31, 0x99, 0x5a, + 0x81, 0xe9, 0x5a, 0x54, 0x63, 0xb7, 0x37, 0x9f, 0x9f, 0x24, 0xb1, 0x65, 0xeb, 0x8c, 0x71, 0x1c, + 0x12, 0x98, 0x45, 0x3e, 0x6e, 0xc1, 0x6b, 0x5c, 0x6c, 0x44, 0x6d, 0xea, 0x91, 0x80, 0x6a, 0xf4, + 0x8b, 0x29, 0xb1, 0x7c, 0x8d, 0xd8, 0x86, 0x36, 0x26, 0xfe, 0x58, 0xde, 0x64, 0x0a, 0xf6, 0x33, + 0xb2, 0xa4, 0x5e, 0x61, 0xc4, 0x83, 0x90, 0xa7, 0x70, 0x5a, 0xc3, 0x36, 0x3e, 0x26, 0xfe, 0x18, + 0xef, 0xc1, 0x25, 0xae, 0xc5, 0x0f, 0x3c, 0xd3, 0x1e, 0x69, 0xfa, 0x98, 0xea, 0x8f, 0xb4, 0x69, + 0x30, 0xbc, 0x27, 0xbf, 0x92, 0x1c, 0x9f, 0x5b, 0xd8, 0xe7, 0x9c, 0x26, 0xa3, 0x9c, 0x04, 0xc3, + 0x7b, 0xb8, 0x0f, 0x15, 0xb6, 0x18, 0x13, 0xf3, 0x4b, 0xaa, 0x0d, 0x1d, 0x8f, 0x1f, 0x8d, 0xd5, + 0x25, 0xa9, 0x29, 0xe1, 0xc1, 0x7a, 0x37, 0x14, 0x38, 0x76, 0x0c, 0xba, 0x97, 0xef, 0xf7, 0x14, + 0xa5, 0xa5, 0x96, 0x23, 0x2d, 0x0f, 0x1c, 0x8f, 0x05, 0xd4, 0xc8, 0x89, 0x1d, 0x5c, 0x16, 0x01, + 0x35, 0x72, 0x22, 0xf7, 0xde, 0x81, 0x0d, 0x5d, 0x17, 0x73, 0x36, 0x75, 0x2d, 0xbc, 0x63, 0xf9, + 0x32, 0x4a, 0x39, 0x4b, 0xd7, 0x0f, 0x04, 0x21, 0x8c, 0x71, 0x1f, 0x7f, 0x08, 0x2f, 0xcf, 0x9c, + 0x95, 0x14, 0x5c, 0x5f, 0x98, 0xe5, 0xbc, 0xe8, 0x1d, 0xd8, 0x70, 0xcf, 0x16, 0x05, 0x71, 0x6a, + 0x44, 0xf7, 0x6c, 0x5e, 0xec, 0x03, 0xd8, 0x74, 0xc7, 0xee, 0xa2, 0xdc, 0xcd, 0xa4, 0x1c, 0x76, + 0xc7, 0xee, 0xbc, 0xe0, 0x5b, 0xfc, 0xc2, 0xed, 0x51, 0x9d, 0x04, 0xd4, 0x90, 0x2f, 0x27, 0xe9, + 0x89, 0x0e, 0xbc, 0x0b, 0x48, 0xd7, 0x35, 0x6a, 0x93, 0x53, 0x8b, 0x6a, 0xc4, 0xa3, 0x36, 0xf1, + 0xe5, 0xab, 0x49, 0x72, 0x55, 0xd7, 0x15, 0xde, 0xdb, 0xe0, 0x9d, 0xf8, 0x26, 0xac, 0x3b, 0xa7, + 0x0f, 0x75, 0x11, 0x92, 0x9a, 0xeb, 0xd1, 0xa1, 0xf9, 0x54, 0x7e, 0x93, 0xfb, 0x77, 0x8d, 0x75, + 0xf0, 0x80, 0xec, 0x71, 0x18, 0xdf, 0x00, 0xa4, 0xfb, 0x63, 0xe2, 0xb9, 0x3c, 0x27, 0xfb, 0x2e, + 0xd1, 0xa9, 0xfc, 0x96, 0xa0, 0x0a, 0xbc, 0x13, 0xc1, 0x6c, 0x4b, 0xf8, 0x4f, 0xcc, 0x61, 0x10, + 0x69, 0xbc, 0x2e, 0xb6, 0x04, 0xc7, 0x42, 0x6d, 0x3b, 0x80, 0x98, 0x2b, 0x52, 0x03, 0xef, 0x70, + 0x5a, 0xd5, 0x1d, 0xbb, 0xc9, 0x71, 0xdf, 0x80, 0x55, 0xc6, 0x9c, 0x0d, 0x7a, 0x43, 0x14, 0x64, + 0xee, 0x38, 0x31, 0xe2, 0x0f, 0x56, 0x1b, 0xd7, 0xf6, 0xa0, 0x92, 0x8c, 0x4f, 0x5c, 0x02, 0x11, + 0xa1, 0x48, 0x62, 0xc5, 0x4a, 0xb3, 0xdb, 0x62, 0x65, 0xc6, 0xe7, 0x0a, 0xca, 0xb0, 0x72, 0xe7, + 0xa8, 0x3d, 0x50, 0x34, 0xf5, 0xa4, 0x33, 0x68, 0x1f, 0x2b, 0x28, 0x9b, 0xa8, 0xab, 0x0f, 0x73, + 0xc5, 0xb7, 0xd1, 0xf5, 0xda, 0x37, 0x19, 0xa8, 0xa6, 0x2f, 0x4a, 0xf8, 0xff, 0xe1, 0x72, 0xf4, + 0xaa, 0xe1, 0xd3, 0x40, 0x7b, 0x62, 0x7a, 0x7c, 0xe3, 0x4c, 0x88, 0x38, 0xc4, 0xe2, 0xa5, 0xdb, + 0x0c, 0x59, 0x7d, 0x1a, 0x7c, 0x62, 0x7a, 0x6c, 0x5b, 0x4c, 0x48, 0x80, 0x8f, 0xe0, 0xaa, 0xed, + 0x68, 0x7e, 0x40, 0x6c, 0x83, 0x78, 0x86, 0x36, 0x7b, 0x4f, 0xd2, 0x88, 0xae, 0x53, 0xdf, 0x77, + 0xc4, 0x81, 0x15, 0x6b, 0x79, 0xd5, 0x76, 0xfa, 0x21, 0x79, 0x96, 0xc9, 0x1b, 0x21, 0x75, 0x2e, + 0xcc, 0xb2, 0xe7, 0x85, 0xd9, 0x2b, 0x50, 0x9a, 0x10, 0x57, 0xa3, 0x76, 0xe0, 0x9d, 0xf1, 0xf2, + 0xb8, 0xa8, 0x16, 0x27, 0xc4, 0x55, 0x58, 0xfb, 0x85, 0xdc, 0x52, 0x0e, 0x73, 0xc5, 0x22, 0x2a, + 0x1d, 0xe6, 0x8a, 0x25, 0x04, 0xb5, 0x7f, 0x64, 0xa1, 0x92, 0x2c, 0x97, 0xd9, 0xed, 0x43, 0xe7, + 0x27, 0x8b, 0xc4, 0x73, 0xcf, 0x1b, 0xdf, 0x5a, 0x5c, 0xd7, 0x9b, 0xec, 0xc8, 0xd9, 0x2b, 0x88, + 0x22, 0x56, 0x15, 0x92, 0xec, 0xb8, 0x67, 0xd9, 0x86, 0x8a, 0xa2, 0xa1, 0xa8, 0x86, 0x2d, 0x7c, + 0x00, 0x85, 0x87, 0x3e, 0xd7, 0x5d, 0xe0, 0xba, 0xdf, 0xfc, 0x76, 0xdd, 0x87, 0x7d, 0xae, 0xbc, + 0x74, 0xd8, 0xd7, 0x3a, 0x5d, 0xf5, 0xb8, 0x71, 0xa4, 0x86, 0xe2, 0xf8, 0x0a, 0xe4, 0x2c, 0xf2, + 0xe5, 0x59, 0xfa, 0x70, 0xe2, 0xd0, 0x45, 0x17, 0xe1, 0x0a, 0xe4, 0x9e, 0x50, 0xf2, 0x28, 0x7d, + 0x24, 0x70, 0xe8, 0x07, 0xdc, 0x0c, 0xbb, 0x90, 0xe7, 0xfe, 0xc2, 0x00, 0xa1, 0xc7, 0xd0, 0x4b, + 0xb8, 0x08, 0xb9, 0x66, 0x57, 0x65, 0x1b, 0x02, 0x41, 0x45, 0xa0, 0x5a, 0xaf, 0xad, 0x34, 0x15, + 0x94, 0xa9, 0xdd, 0x81, 0x82, 0x70, 0x02, 0xdb, 0x2c, 0xb1, 0x1b, 0xd0, 0x4b, 0x61, 0x33, 0xd4, + 0x21, 0x45, 0xbd, 0x27, 0xc7, 0xfb, 0x8a, 0x8a, 0x32, 0xe9, 0xa5, 0xce, 0xa1, 0x7c, 0xcd, 0x87, + 0x4a, 0xb2, 0x5e, 0x7e, 0x31, 0x77, 0xe1, 0xbf, 0x4a, 0x50, 0x4e, 0xd4, 0xbf, 0xac, 0x70, 0x21, + 0x96, 0xe5, 0x3c, 0xd1, 0x88, 0x65, 0x12, 0x3f, 0x0c, 0x0d, 0xe0, 0x50, 0x83, 0x21, 0x17, 0x5d, + 0xba, 0x17, 0xb4, 0x45, 0xf2, 0xa8, 0x50, 0xfb, 0xa3, 0x04, 0x68, 0xbe, 0x00, 0x9d, 0x33, 0x53, + 0xfa, 0x31, 0xcd, 0xac, 0xfd, 0x41, 0x82, 0x6a, 0xba, 0xea, 0x9c, 0x33, 0xef, 0xda, 0x8f, 0x6a, + 0xde, 0xdf, 0x33, 0xb0, 0x9a, 0xaa, 0x35, 0x2f, 0x6a, 0xdd, 0x17, 0xb0, 0x6e, 0x1a, 0x74, 0xe2, + 0x3a, 0x01, 0xb5, 0xf5, 0x33, 0xcd, 0xa2, 0x8f, 0xa9, 0x25, 0xd7, 0x78, 0xd2, 0xd8, 0xfd, 0xf6, + 0x6a, 0xb6, 0xde, 0x9e, 0xc9, 0x1d, 0x31, 0xb1, 0xbd, 0x8d, 0x76, 0x4b, 0x39, 0xee, 0x75, 0x07, + 0x4a, 0xa7, 0xf9, 0x99, 0x76, 0xd2, 0xf9, 0x59, 0xa7, 0xfb, 0x49, 0x47, 0x45, 0xe6, 0x1c, 0xed, + 0x07, 0xdc, 0xf6, 0x3d, 0x40, 0xf3, 0x46, 0xe1, 0xcb, 0xb0, 0xcc, 0x2c, 0xf4, 0x12, 0xde, 0x80, + 0xb5, 0x4e, 0x57, 0xeb, 0xb7, 0x5b, 0x8a, 0xa6, 0x3c, 0x78, 0xa0, 0x34, 0x07, 0x7d, 0xf1, 0x3e, + 0x11, 0xb3, 0x07, 0xa9, 0x0d, 0x5e, 0xfb, 0x7d, 0x16, 0x36, 0x96, 0x58, 0x82, 0x1b, 0xe1, 0xcd, + 0x42, 0x5c, 0x76, 0xde, 0xbd, 0x88, 0xf5, 0x75, 0x56, 0x10, 0xf4, 0x88, 0x17, 0x84, 0x17, 0x91, + 0x1b, 0xc0, 0xbc, 0x64, 0x07, 0xe6, 0xd0, 0xa4, 0x5e, 0xf8, 0x9c, 0x23, 0xae, 0x1b, 0x6b, 0x33, + 0x5c, 0xbc, 0xe8, 0xfc, 0x1f, 0x60, 0xd7, 0xf1, 0xcd, 0xc0, 0x7c, 0x4c, 0x35, 0xd3, 0x8e, 0xde, + 0x7e, 0xd8, 0xf5, 0x23, 0xa7, 0xa2, 0xa8, 0xa7, 0x6d, 0x07, 0x31, 0xdb, 0xa6, 0x23, 0x32, 0xc7, + 0x66, 0xc9, 0x3c, 0xab, 0xa2, 0xa8, 0x27, 0x66, 0x5f, 0x83, 0x8a, 0xe1, 0x4c, 0x59, 0x4d, 0x26, + 0x78, 0xec, 0xec, 0x90, 0xd4, 0xb2, 0xc0, 0x62, 0x4a, 0x58, 0x6d, 0xcf, 0x1e, 0x9d, 0x2a, 0x6a, + 0x59, 0x60, 0x82, 0x72, 0x1d, 0xd6, 0xc8, 0x68, 0xe4, 0x31, 0xe5, 0x91, 0x22, 0x71, 0x7f, 0xa8, + 0xc6, 0x30, 0x27, 0x6e, 0x1d, 0x42, 0x31, 0xf2, 0x03, 0x3b, 0xaa, 0x99, 0x27, 0x34, 0x57, 0x5c, + 0x8a, 0x33, 0x3b, 0x25, 0xb5, 0x68, 0x47, 0x9d, 0xd7, 0xa0, 0x62, 0xfa, 0xda, 0xec, 0x0d, 0x3d, + 0xb3, 0x9d, 0xd9, 0x29, 0xaa, 0x65, 0xd3, 0x8f, 0xdf, 0x1f, 0x6b, 0x5f, 0x65, 0xa0, 0x9a, 0xfe, + 0x06, 0x80, 0x5b, 0x50, 0xb4, 0x1c, 0x9d, 0xf0, 0xd0, 0x12, 0x1f, 0xa0, 0x76, 0x9e, 0xf3, 0xd9, + 0xa0, 0x7e, 0x14, 0xf2, 0xd5, 0x58, 0x72, 0xeb, 0x6f, 0x12, 0x14, 0x23, 0x18, 0x5f, 0x82, 0x9c, + 0x4b, 0x82, 0x31, 0x57, 0x97, 0xdf, 0xcf, 0x20, 0x49, 0xe5, 0x6d, 0x86, 0xfb, 0x2e, 0xb1, 0x79, + 0x08, 0x84, 0x38, 0x6b, 0xb3, 0x75, 0xb5, 0x28, 0x31, 0xf8, 0xe5, 0xc4, 0x99, 0x4c, 0xa8, 0x1d, + 0xf8, 0xd1, 0xba, 0x86, 0x78, 0x33, 0x84, 0xf1, 0x3b, 0xb0, 0x1e, 0x78, 0xc4, 0xb4, 0x52, 0xdc, + 0x1c, 0xe7, 0xa2, 0xa8, 0x23, 0x26, 0xef, 0xc1, 0x95, 0x48, 0xaf, 0x41, 0x03, 0xa2, 0x8f, 0xa9, + 0x31, 0x13, 0x2a, 0xf0, 0x47, 0x88, 0xcb, 0x21, 0xa1, 0x15, 0xf6, 0x47, 0xb2, 0xb5, 0x6f, 0x24, + 0x58, 0x8f, 0xae, 0x53, 0x46, 0xec, 0xac, 0x63, 0x00, 0x62, 0xdb, 0x4e, 0x90, 0x74, 0xd7, 0x62, + 0x28, 0x2f, 0xc8, 0xd5, 0x1b, 0xb1, 0x90, 0x9a, 0x50, 0xb0, 0x35, 0x01, 0x98, 0xf5, 0x9c, 0xeb, + 0xb6, 0xab, 0x50, 0x0e, 0x3f, 0xf0, 0xf0, 0xaf, 0x84, 0xe2, 0x02, 0x0e, 0x02, 0x62, 0xf7, 0x2e, + 0xbc, 0x09, 0xf9, 0x53, 0x3a, 0x32, 0xed, 0xf0, 0xd9, 0x56, 0x34, 0xa2, 0x67, 0x92, 0x5c, 0xfc, + 0x4c, 0xb2, 0xff, 0x5b, 0x09, 0x36, 0x74, 0x67, 0x32, 0x6f, 0xef, 0x3e, 0x9a, 0x7b, 0x05, 0xf0, + 0x3f, 0x96, 0x3e, 0xff, 0x68, 0x64, 0x06, 0xe3, 0xe9, 0x69, 0x5d, 0x77, 0x26, 0xbb, 0x23, 0xc7, + 0x22, 0xf6, 0x68, 0xf6, 0x99, 0x93, 0xff, 0xd1, 0xdf, 0x1d, 0x51, 0xfb, 0xdd, 0x91, 0x93, 0xf8, + 0xe8, 0x79, 0x7f, 0xf6, 0xf7, 0xbf, 0x92, 0xf4, 0xa7, 0x4c, 0xf6, 0xa0, 0xb7, 0xff, 0xe7, 0xcc, + 0xd6, 0x81, 0x18, 0xae, 0x17, 0xb9, 0x47, 0xa5, 0x43, 0x8b, 0xea, 0x6c, 0xca, 0xff, 0x0b, 0x00, + 0x00, 0xff, 0xff, 0x1a, 0x28, 0x25, 0x79, 0x42, 0x1d, 0x00, 0x00, } diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto index 4d4fb37..8697a50 100644 --- a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto @@ -45,6 +45,7 @@ option java_package = "com.google.protobuf"; option java_outer_classname = "DescriptorProtos"; option csharp_namespace = "Google.Protobuf.Reflection"; option objc_class_prefix = "GPB"; +option cc_enable_arenas = true; // descriptor.proto must be optimized for speed because reflection-based // algorithms don't work during bootstrapping. @@ -225,6 +226,26 @@ message EnumDescriptorProto { repeated EnumValueDescriptorProto value = 2; optional EnumOptions options = 3; + + // Range of reserved numeric values. Reserved values may not be used by + // entries in the same enum. Reserved ranges may not overlap. + // + // Note that this is distinct from DescriptorProto.ReservedRange in that it + // is inclusive such that it can appropriately represent the entire int32 + // domain. + message EnumReservedRange { + optional int32 start = 1; // Inclusive. + optional int32 end = 2; // Inclusive. + } + + // Range of reserved numeric values. Reserved numeric values may not be used + // by enum values in the same enum declaration. Reserved ranges may not + // overlap. + repeated EnumReservedRange reserved_range = 4; + + // Reserved enum value names, which may not be reused. A given name may only + // be reserved once. + repeated string reserved_name = 5; } // Describes a value within an enum. @@ -396,10 +417,12 @@ message FileOptions { // determining the namespace. optional string php_namespace = 41; - // The parser stores options it doesn't recognize here. See above. + // The parser stores options it doesn't recognize here. + // See the documentation for the "Options" section above. repeated UninterpretedOption uninterpreted_option = 999; - // Clients can define custom options in extensions of this message. See above. + // Clients can define custom options in extensions of this message. + // See the documentation for the "Options" section above. extensions 1000 to max; reserved 38; diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/generator/Makefile b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/generator/Makefile deleted file mode 100644 index b5715c3..0000000 --- a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/generator/Makefile +++ /dev/null @@ -1,40 +0,0 @@ -# Go support for Protocol Buffers - Google's data interchange format -# -# Copyright 2010 The Go Authors. All rights reserved. -# https://github.com/golang/protobuf -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -include $(GOROOT)/src/Make.inc - -TARG=github.com/golang/protobuf/compiler/generator -GOFILES=\ - generator.go\ - -DEPS=../descriptor ../plugin ../../proto - -include $(GOROOT)/src/Make.pkg diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go index 569451f..a2e0a5f 100644 --- a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go @@ -40,19 +40,24 @@ import ( "bufio" "bytes" "compress/gzip" + "crypto/sha256" + "encoding/hex" "fmt" + "go/build" "go/parser" "go/printer" "go/token" "log" "os" "path" + "sort" "strconv" "strings" "unicode" "unicode/utf8" "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/protoc-gen-go/generator/internal/remap" "github.com/golang/protobuf/protoc-gen-go/descriptor" plugin "github.com/golang/protobuf/protoc-gen-go/plugin" @@ -88,6 +93,14 @@ func RegisterPlugin(p Plugin) { plugins = append(plugins, p) } +// A GoImportPath is the import path of a Go package. e.g., "google.golang.org/genproto/protobuf". +type GoImportPath string + +func (p GoImportPath) String() string { return strconv.Quote(string(p)) } + +// A GoPackageName is the name of a Go package. e.g., "protobuf". +type GoPackageName string + // Each type we import as a protocol buffer (other than FileDescriptorProto) needs // a pointer to the FileDescriptorProto that represents it. These types achieve that // wrapping by placing each Proto inside a struct with the pointer to its File. The @@ -96,19 +109,21 @@ func RegisterPlugin(p Plugin) { // The file and package name method are common to messages and enums. type common struct { - file *descriptor.FileDescriptorProto // File this object comes from. + file *FileDescriptor // File this object comes from. } -// PackageName is name in the package clause in the generated file. -func (c *common) PackageName() string { return uniquePackageOf(c.file) } +// GoImportPath is the import path of the Go package containing the type. +func (c *common) GoImportPath() GoImportPath { + return c.file.importPath +} -func (c *common) File() *descriptor.FileDescriptorProto { return c.file } +func (c *common) File() *FileDescriptor { return c.file } func fileIsProto3(file *descriptor.FileDescriptorProto) bool { return file.GetSyntax() == "proto3" } -func (c *common) proto3() bool { return fileIsProto3(c.file) } +func (c *common) proto3() bool { return fileIsProto3(c.file.FileDescriptorProto) } // Descriptor represents a protocol buffer message. type Descriptor struct { @@ -134,7 +149,7 @@ func (d *Descriptor) TypeName() []string { for parent := d; parent != nil; parent = parent.parent { n++ } - s := make([]string, n, n) + s := make([]string, n) for parent := d; parent != nil; parent = parent.parent { n-- s[n] = parent.GetName() @@ -256,77 +271,61 @@ type FileDescriptor struct { // This is used for supporting public imports. exported map[Object][]symbol - index int // The index of this file in the list of files to generate code for + fingerprint string // Fingerprint of this file's contents. + importPath GoImportPath // Import path of this file's package. + packageName GoPackageName // Name of this file's Go package. proto3 bool // whether to generate proto3 code for this file } -// PackageName is the package name we'll use in the generated code to refer to this file. -func (d *FileDescriptor) PackageName() string { return uniquePackageOf(d.FileDescriptorProto) } - // VarName is the variable name we'll use in the generated code to refer // to the compressed bytes of this descriptor. It is not exported, so // it is only valid inside the generated package. -func (d *FileDescriptor) VarName() string { return fmt.Sprintf("fileDescriptor%d", d.index) } +func (d *FileDescriptor) VarName() string { + name := strings.Map(badToUnderscore, baseName(d.GetName())) + return fmt.Sprintf("fileDescriptor_%s_%s", name, d.fingerprint) +} // goPackageOption interprets the file's go_package option. // If there is no go_package, it returns ("", "", false). // If there's a simple name, it returns ("", pkg, true). // If the option implies an import path, it returns (impPath, pkg, true). -func (d *FileDescriptor) goPackageOption() (impPath, pkg string, ok bool) { - pkg = d.GetOptions().GetGoPackage() - if pkg == "" { - return - } - ok = true - // The presence of a slash implies there's an import path. - slash := strings.LastIndex(pkg, "/") - if slash < 0 { - return - } - impPath, pkg = pkg, pkg[slash+1:] - // A semicolon-delimited suffix overrides the package name. - sc := strings.IndexByte(impPath, ';') - if sc < 0 { - return +func (d *FileDescriptor) goPackageOption() (impPath GoImportPath, pkg GoPackageName, ok bool) { + opt := d.GetOptions().GetGoPackage() + if opt == "" { + return "", "", false } - impPath, pkg = impPath[:sc], impPath[sc+1:] - return -} - -// goPackageName returns the Go package name to use in the -// generated Go file. The result explicit reports whether the name -// came from an option go_package statement. If explicit is false, -// the name was derived from the protocol buffer's package statement -// or the input file name. -func (d *FileDescriptor) goPackageName() (name string, explicit bool) { - // Does the file have a "go_package" option? - if _, pkg, ok := d.goPackageOption(); ok { - return pkg, true + // A semicolon-delimited suffix delimits the import path and package name. + sc := strings.Index(opt, ";") + if sc >= 0 { + return GoImportPath(opt[:sc]), cleanPackageName(opt[sc+1:]), true } - - // Does the file have a package clause? - if pkg := d.GetPackage(); pkg != "" { - return pkg, false + // The presence of a slash implies there's an import path. + slash := strings.LastIndex(opt, "/") + if slash >= 0 { + return GoImportPath(opt), cleanPackageName(opt[slash+1:]), true } - // Use the file base name. - return baseName(d.GetName()), false + return "", cleanPackageName(opt), true } // goFileName returns the output name for the generated Go file. -func (d *FileDescriptor) goFileName() string { +func (d *FileDescriptor) goFileName(pathType pathType) string { name := *d.Name if ext := path.Ext(name); ext == ".proto" || ext == ".protodevel" { name = name[:len(name)-len(ext)] } name += ".pb.go" + if pathType == pathTypeSourceRelative { + return name + } + // Does the file have a "go_package" option? // If it does, it may override the filename. if impPath, _, ok := d.goPackageOption(); ok && impPath != "" { // Replace the existing dirname with the declared import path. _, name = path.Split(name) - name = path.Join(impPath, name) + name = path.Join(string(impPath), name) return name } @@ -341,14 +340,13 @@ func (d *FileDescriptor) addExport(obj Object, sym symbol) { type symbol interface { // GenerateAlias should generate an appropriate alias // for the symbol from the named package. - GenerateAlias(g *Generator, pkg string) + GenerateAlias(g *Generator, pkg GoPackageName) } type messageSymbol struct { sym string hasExtensions, isMessageSet bool - hasOneof bool - getters []getterSymbol + oneofTypes []string } type getterSymbol struct { @@ -358,144 +356,11 @@ type getterSymbol struct { genType bool // whether typ contains a generated type (message/group/enum) } -func (ms *messageSymbol) GenerateAlias(g *Generator, pkg string) { - remoteSym := pkg + "." + ms.sym - - g.P("type ", ms.sym, " ", remoteSym) - g.P("func (m *", ms.sym, ") Reset() { (*", remoteSym, ")(m).Reset() }") - g.P("func (m *", ms.sym, ") String() string { return (*", remoteSym, ")(m).String() }") - g.P("func (*", ms.sym, ") ProtoMessage() {}") - if ms.hasExtensions { - g.P("func (*", ms.sym, ") ExtensionRangeArray() []", g.Pkg["proto"], ".ExtensionRange ", - "{ return (*", remoteSym, ")(nil).ExtensionRangeArray() }") - if ms.isMessageSet { - g.P("func (m *", ms.sym, ") Marshal() ([]byte, error) ", - "{ return (*", remoteSym, ")(m).Marshal() }") - g.P("func (m *", ms.sym, ") Unmarshal(buf []byte) error ", - "{ return (*", remoteSym, ")(m).Unmarshal(buf) }") - } - } - if ms.hasOneof { - // Oneofs and public imports do not mix well. - // We can make them work okay for the binary format, - // but they're going to break weirdly for text/JSON. - enc := "_" + ms.sym + "_OneofMarshaler" - dec := "_" + ms.sym + "_OneofUnmarshaler" - size := "_" + ms.sym + "_OneofSizer" - encSig := "(msg " + g.Pkg["proto"] + ".Message, b *" + g.Pkg["proto"] + ".Buffer) error" - decSig := "(msg " + g.Pkg["proto"] + ".Message, tag, wire int, b *" + g.Pkg["proto"] + ".Buffer) (bool, error)" - sizeSig := "(msg " + g.Pkg["proto"] + ".Message) int" - g.P("func (m *", ms.sym, ") XXX_OneofFuncs() (func", encSig, ", func", decSig, ", func", sizeSig, ", []interface{}) {") - g.P("return ", enc, ", ", dec, ", ", size, ", nil") - g.P("}") - - g.P("func ", enc, encSig, " {") - g.P("m := msg.(*", ms.sym, ")") - g.P("m0 := (*", remoteSym, ")(m)") - g.P("enc, _, _, _ := m0.XXX_OneofFuncs()") - g.P("return enc(m0, b)") - g.P("}") - - g.P("func ", dec, decSig, " {") - g.P("m := msg.(*", ms.sym, ")") - g.P("m0 := (*", remoteSym, ")(m)") - g.P("_, dec, _, _ := m0.XXX_OneofFuncs()") - g.P("return dec(m0, tag, wire, b)") - g.P("}") - - g.P("func ", size, sizeSig, " {") - g.P("m := msg.(*", ms.sym, ")") - g.P("m0 := (*", remoteSym, ")(m)") - g.P("_, _, size, _ := m0.XXX_OneofFuncs()") - g.P("return size(m0)") - g.P("}") +func (ms *messageSymbol) GenerateAlias(g *Generator, pkg GoPackageName) { + g.P("type ", ms.sym, " = ", pkg, ".", ms.sym) + for _, name := range ms.oneofTypes { + g.P("type ", name, " = ", pkg, ".", name) } - for _, get := range ms.getters { - - if get.typeName != "" { - g.RecordTypeUse(get.typeName) - } - typ := get.typ - val := "(*" + remoteSym + ")(m)." + get.name + "()" - if get.genType { - // typ will be "*pkg.T" (message/group) or "pkg.T" (enum) - // or "map[t]*pkg.T" (map to message/enum). - // The first two of those might have a "[]" prefix if it is repeated. - // Drop any package qualifier since we have hoisted the type into this package. - rep := strings.HasPrefix(typ, "[]") - if rep { - typ = typ[2:] - } - isMap := strings.HasPrefix(typ, "map[") - star := typ[0] == '*' - if !isMap { // map types handled lower down - typ = typ[strings.Index(typ, ".")+1:] - } - if star { - typ = "*" + typ - } - if rep { - // Go does not permit conversion between slice types where both - // element types are named. That means we need to generate a bit - // of code in this situation. - // typ is the element type. - // val is the expression to get the slice from the imported type. - - ctyp := typ // conversion type expression; "Foo" or "(*Foo)" - if star { - ctyp = "(" + typ + ")" - } - - g.P("func (m *", ms.sym, ") ", get.name, "() []", typ, " {") - g.In() - g.P("o := ", val) - g.P("if o == nil {") - g.In() - g.P("return nil") - g.Out() - g.P("}") - g.P("s := make([]", typ, ", len(o))") - g.P("for i, x := range o {") - g.In() - g.P("s[i] = ", ctyp, "(x)") - g.Out() - g.P("}") - g.P("return s") - g.Out() - g.P("}") - continue - } - if isMap { - // Split map[keyTyp]valTyp. - bra, ket := strings.Index(typ, "["), strings.Index(typ, "]") - keyTyp, valTyp := typ[bra+1:ket], typ[ket+1:] - // Drop any package qualifier. - // Only the value type may be foreign. - star := valTyp[0] == '*' - valTyp = valTyp[strings.Index(valTyp, ".")+1:] - if star { - valTyp = "*" + valTyp - } - - typ := "map[" + keyTyp + "]" + valTyp - g.P("func (m *", ms.sym, ") ", get.name, "() ", typ, " {") - g.P("o := ", val) - g.P("if o == nil { return nil }") - g.P("s := make(", typ, ", len(o))") - g.P("for k, v := range o {") - g.P("s[k] = (", valTyp, ")(v)") - g.P("}") - g.P("return s") - g.P("}") - continue - } - // Convert imported type into the forwarding type. - val = "(" + typ + ")(" + val + ")" - } - - g.P("func (m *", ms.sym, ") ", get.name, "() ", typ, " { return ", val, " }") - } - } type enumSymbol struct { @@ -503,16 +368,11 @@ type enumSymbol struct { proto3 bool // Whether this came from a proto3 file. } -func (es enumSymbol) GenerateAlias(g *Generator, pkg string) { +func (es enumSymbol) GenerateAlias(g *Generator, pkg GoPackageName) { s := es.name - g.P("type ", s, " ", pkg, ".", s) + g.P("type ", s, " = ", pkg, ".", s) g.P("var ", s, "_name = ", pkg, ".", s, "_name") g.P("var ", s, "_value = ", pkg, ".", s, "_value") - g.P("func (x ", s, ") String() string { return (", pkg, ".", s, ")(x).String() }") - if !es.proto3 { - g.P("func (x ", s, ") Enum() *", s, "{ return (*", s, ")((", pkg, ".", s, ")(x).Enum()) }") - g.P("func (x *", s, ") UnmarshalJSON(data []byte) error { return (*", pkg, ".", s, ")(x).UnmarshalJSON(data) }") - } } type constOrVarSymbol struct { @@ -521,8 +381,8 @@ type constOrVarSymbol struct { cast string // if non-empty, a type cast is required (used for enums) } -func (cs constOrVarSymbol) GenerateAlias(g *Generator, pkg string) { - v := pkg + "." + cs.sym +func (cs constOrVarSymbol) GenerateAlias(g *Generator, pkg GoPackageName) { + v := string(pkg) + "." + cs.sym if cs.cast != "" { v = cs.cast + "(" + v + ")" } @@ -531,21 +391,9 @@ func (cs constOrVarSymbol) GenerateAlias(g *Generator, pkg string) { // Object is an interface abstracting the abilities shared by enums, messages, extensions and imported objects. type Object interface { - PackageName() string // The name we use in our output (a_b_c), possibly renamed for uniqueness. + GoImportPath() GoImportPath TypeName() []string - File() *descriptor.FileDescriptorProto -} - -// Each package name we generate must be unique. The package we're generating -// gets its own name but every other package must have a unique name that does -// not conflict in the code we generate. These names are chosen globally (although -// they don't have to be, it simplifies things to do them globally). -func uniquePackageOf(fd *descriptor.FileDescriptorProto) string { - s, ok := uniquePackageName[fd] - if !ok { - log.Fatal("internal error: no package name defined for " + fd.GetName()) - } - return s + File() *FileDescriptor } // Generator is the type whose methods generate the output, stored in the associated response structure. @@ -562,18 +410,30 @@ type Generator struct { Pkg map[string]string // The names under which we import support packages - packageName string // What we're calling ourselves. - allFiles []*FileDescriptor // All files in the tree - allFilesByName map[string]*FileDescriptor // All files by filename. - genFiles []*FileDescriptor // Those files we will generate output for. - file *FileDescriptor // The file we are compiling now. - usedPackages map[string]bool // Names of packages used in current file. - typeNameToObject map[string]Object // Key is a fully-qualified name in input syntax. - init []string // Lines to emit in the init function. + outputImportPath GoImportPath // Package we're generating code for. + allFiles []*FileDescriptor // All files in the tree + allFilesByName map[string]*FileDescriptor // All files by filename. + genFiles []*FileDescriptor // Those files we will generate output for. + file *FileDescriptor // The file we are compiling now. + packageNames map[GoImportPath]GoPackageName // Imported package names in the current file. + usedPackages map[GoImportPath]bool // Packages used in current file. + usedPackageNames map[GoPackageName]bool // Package names used in the current file. + typeNameToObject map[string]Object // Key is a fully-qualified name in input syntax. + init []string // Lines to emit in the init function. indent string + pathType pathType // How to generate output filenames. writeOutput bool + annotateCode bool // whether to store annotations + annotations []*descriptor.GeneratedCodeInfo_Annotation // annotations to store } +type pathType int + +const ( + pathTypeImport pathType = iota + pathTypeSourceRelative +) + // New creates a new generator and allocates the request and response protobufs. func New() *Generator { g := new(Generator) @@ -618,8 +478,21 @@ func (g *Generator) CommandLineParameters(parameter string) { g.ImportPrefix = v case "import_path": g.PackageImportPath = v + case "paths": + switch v { + case "import": + g.pathType = pathTypeImport + case "source_relative": + g.pathType = pathTypeSourceRelative + default: + g.Fail(fmt.Sprintf(`Unknown path type %q: want "import" or "source_relative".`, v)) + } case "plugins": pluginList = v + case "annotate_code": + if v == "true" { + g.annotateCode = true + } default: if len(k) > 0 && k[0] == 'M' { g.ImportMap[k[1:]] = v @@ -646,37 +519,42 @@ func (g *Generator) CommandLineParameters(parameter string) { // If its file is in a different package, it returns the package name we're using for this file, plus ".". // Otherwise it returns the empty string. func (g *Generator) DefaultPackageName(obj Object) string { - pkg := obj.PackageName() - if pkg == g.packageName { + importPath := obj.GoImportPath() + if importPath == g.outputImportPath { return "" } - return pkg + "." + return string(g.GoPackageName(importPath)) + "." } -// For each input file, the unique package name to use, underscored. -var uniquePackageName = make(map[*descriptor.FileDescriptorProto]string) +// GoPackageName returns the name used for a package. +func (g *Generator) GoPackageName(importPath GoImportPath) GoPackageName { + if name, ok := g.packageNames[importPath]; ok { + return name + } + name := cleanPackageName(baseName(string(importPath))) + for i, orig := 1, name; g.usedPackageNames[name]; i++ { + name = orig + GoPackageName(strconv.Itoa(i)) + } + g.packageNames[importPath] = name + g.usedPackageNames[name] = true + return name +} -// Package names already registered. Key is the name from the .proto file; -// value is the name that appears in the generated code. -var pkgNamesInUse = make(map[string]bool) +var globalPackageNames = map[GoPackageName]bool{ + "fmt": true, + "math": true, + "proto": true, +} -// Create and remember a guaranteed unique package name for this file descriptor. -// Pkg is the candidate name. If f is nil, it's a builtin package like "proto" and -// has no file descriptor. +// Create and remember a guaranteed unique package name. Pkg is the candidate name. +// The FileDescriptor parameter is unused. func RegisterUniquePackageName(pkg string, f *FileDescriptor) string { - // Convert dots to underscores before finding a unique alias. - pkg = strings.Map(badToUnderscore, pkg) - - for i, orig := 1, pkg; pkgNamesInUse[pkg]; i++ { - // It's a duplicate; must rename. - pkg = orig + strconv.Itoa(i) - } - // Install it. - pkgNamesInUse[pkg] = true - if f != nil { - uniquePackageName[f.FileDescriptorProto] = pkg + name := cleanPackageName(pkg) + for i, orig := 1, name; globalPackageNames[name]; i++ { + name = orig + GoPackageName(strconv.Itoa(i)) } - return pkg + globalPackageNames[name] = true + return string(name) } var isGoKeyword = map[string]bool{ @@ -707,97 +585,83 @@ var isGoKeyword = map[string]bool{ "var": true, } +func cleanPackageName(name string) GoPackageName { + name = strings.Map(badToUnderscore, name) + // Identifier must not be keyword: insert _. + if isGoKeyword[name] { + name = "_" + name + } + // Identifier must not begin with digit: insert _. + if r, _ := utf8.DecodeRuneInString(name); unicode.IsDigit(r) { + name = "_" + name + } + return GoPackageName(name) +} + // defaultGoPackage returns the package name to use, // derived from the import path of the package we're building code for. -func (g *Generator) defaultGoPackage() string { +func (g *Generator) defaultGoPackage() GoPackageName { p := g.PackageImportPath if i := strings.LastIndex(p, "/"); i >= 0 { p = p[i+1:] } - if p == "" { - return "" - } - - p = strings.Map(badToUnderscore, p) - // Identifier must not be keyword: insert _. - if isGoKeyword[p] { - p = "_" + p - } - // Identifier must not begin with digit: insert _. - if r, _ := utf8.DecodeRuneInString(p); unicode.IsDigit(r) { - p = "_" + p - } - return p + return cleanPackageName(p) } // SetPackageNames sets the package name for this run. // The package name must agree across all files being generated. // It also defines unique package names for all imported files. func (g *Generator) SetPackageNames() { - // Register the name for this package. It will be the first name - // registered so is guaranteed to be unmodified. - pkg, explicit := g.genFiles[0].goPackageName() + g.outputImportPath = g.genFiles[0].importPath - // Check all files for an explicit go_package option. + defaultPackageNames := make(map[GoImportPath]GoPackageName) for _, f := range g.genFiles { - thisPkg, thisExplicit := f.goPackageName() - if thisExplicit { - if !explicit { - // Let this file's go_package option serve for all input files. - pkg, explicit = thisPkg, true - } else if thisPkg != pkg { - g.Fail("inconsistent package names:", thisPkg, pkg) - } + if _, p, ok := f.goPackageOption(); ok { + defaultPackageNames[f.importPath] = p } } - - // If we don't have an explicit go_package option but we have an - // import path, use that. - if !explicit { - p := g.defaultGoPackage() - if p != "" { - pkg, explicit = p, true + for _, f := range g.genFiles { + if _, p, ok := f.goPackageOption(); ok { + // Source file: option go_package = "quux/bar"; + f.packageName = p + } else if p, ok := defaultPackageNames[f.importPath]; ok { + // A go_package option in another file in the same package. + // + // This is a poor choice in general, since every source file should + // contain a go_package option. Supported mainly for historical + // compatibility. + f.packageName = p + } else if p := g.defaultGoPackage(); p != "" { + // Command-line: import_path=quux/bar. + // + // The import_path flag sets a package name for files which don't + // contain a go_package option. + f.packageName = p + } else if p := f.GetPackage(); p != "" { + // Source file: package quux.bar; + f.packageName = cleanPackageName(p) + } else { + // Source filename. + f.packageName = cleanPackageName(baseName(f.GetName())) } } - // If there was no go_package and no import path to use, - // double-check that all the inputs have the same implicit - // Go package name. - if !explicit { - for _, f := range g.genFiles { - thisPkg, _ := f.goPackageName() - if thisPkg != pkg { - g.Fail("inconsistent package names:", thisPkg, pkg) - } + // Check that all files have a consistent package name and import path. + for _, f := range g.genFiles[1:] { + if a, b := g.genFiles[0].importPath, f.importPath; a != b { + g.Fail(fmt.Sprintf("inconsistent package import paths: %v, %v", a, b)) + } + if a, b := g.genFiles[0].packageName, f.packageName; a != b { + g.Fail(fmt.Sprintf("inconsistent package names: %v, %v", a, b)) } } - g.packageName = RegisterUniquePackageName(pkg, g.genFiles[0]) - - // Register the support package names. They might collide with the - // name of a package we import. + // Names of support packages. These never vary (if there are conflicts, + // we rename the conflicting package), so this could be removed someday. g.Pkg = map[string]string{ - "fmt": RegisterUniquePackageName("fmt", nil), - "math": RegisterUniquePackageName("math", nil), - "proto": RegisterUniquePackageName("proto", nil), - } - -AllFiles: - for _, f := range g.allFiles { - for _, genf := range g.genFiles { - if f == genf { - // In this package already. - uniquePackageName[f.FileDescriptorProto] = g.packageName - continue AllFiles - } - } - // The file is a dependency, so we want to ignore its go_package option - // because that is only relevant for its specific generated output. - pkg := f.GetPackage() - if pkg == "" { - pkg = baseName(*f.Name) - } - RegisterUniquePackageName(pkg, f) + "fmt": "fmt", + "math": "math", + "proto": "proto", } } @@ -807,27 +671,51 @@ AllFiles: func (g *Generator) WrapTypes() { g.allFiles = make([]*FileDescriptor, 0, len(g.Request.ProtoFile)) g.allFilesByName = make(map[string]*FileDescriptor, len(g.allFiles)) + genFileNames := make(map[string]bool) + for _, n := range g.Request.FileToGenerate { + genFileNames[n] = true + } for _, f := range g.Request.ProtoFile { - // We must wrap the descriptors before we wrap the enums - descs := wrapDescriptors(f) - g.buildNestedDescriptors(descs) - enums := wrapEnumDescriptors(f, descs) - g.buildNestedEnums(descs, enums) - exts := wrapExtensions(f) fd := &FileDescriptor{ FileDescriptorProto: f, - desc: descs, - enum: enums, - ext: exts, exported: make(map[Object][]symbol), proto3: fileIsProto3(f), } + // The import path may be set in a number of ways. + if substitution, ok := g.ImportMap[f.GetName()]; ok { + // Command-line: M=foo.proto=quux/bar. + // + // Explicit mapping of source file to import path. + fd.importPath = GoImportPath(substitution) + } else if genFileNames[f.GetName()] && g.PackageImportPath != "" { + // Command-line: import_path=quux/bar. + // + // The import_path flag sets the import path for every file that + // we generate code for. + fd.importPath = GoImportPath(g.PackageImportPath) + } else if p, _, _ := fd.goPackageOption(); p != "" { + // Source file: option go_package = "quux/bar"; + // + // The go_package option sets the import path. Most users should use this. + fd.importPath = p + } else { + // Source filename. + // + // Last resort when nothing else is available. + fd.importPath = GoImportPath(path.Dir(f.GetName())) + } + // We must wrap the descriptors before we wrap the enums + fd.desc = wrapDescriptors(fd) + g.buildNestedDescriptors(fd.desc) + fd.enum = wrapEnumDescriptors(fd, fd.desc) + g.buildNestedEnums(fd.desc, fd.enum) + fd.ext = wrapExtensions(fd) extractComments(fd) g.allFiles = append(g.allFiles, fd) g.allFilesByName[f.GetName()] = fd } for _, fd := range g.allFiles { - fd.imp = wrapImported(fd.FileDescriptorProto, g) + fd.imp = wrapImported(fd, g) } g.genFiles = make([]*FileDescriptor, 0, len(g.Request.FileToGenerate)) @@ -836,11 +724,27 @@ func (g *Generator) WrapTypes() { if fd == nil { g.Fail("could not find file named", fileName) } - fd.index = len(g.genFiles) + fingerprint, err := fingerprintProto(fd.FileDescriptorProto) + if err != nil { + g.Error(err) + } + fd.fingerprint = fingerprint g.genFiles = append(g.genFiles, fd) } } +// fingerprintProto returns a fingerprint for a message. +// The fingerprint is intended to prevent conflicts between generated fileds, +// not to provide cryptographic security. +func fingerprintProto(m proto.Message) (string, error) { + b, err := proto.Marshal(m) + if err != nil { + return "", err + } + h := sha256.Sum256(b) + return hex.EncodeToString(h[:8]), nil +} + // Scan the descriptors in this file. For each one, build the slice of nested descriptors func (g *Generator) buildNestedDescriptors(descs []*Descriptor) { for _, desc := range descs { @@ -873,7 +777,7 @@ func (g *Generator) buildNestedEnums(descs []*Descriptor, enums []*EnumDescripto } // Construct the Descriptor -func newDescriptor(desc *descriptor.DescriptorProto, parent *Descriptor, file *descriptor.FileDescriptorProto, index int) *Descriptor { +func newDescriptor(desc *descriptor.DescriptorProto, parent *Descriptor, file *FileDescriptor, index int) *Descriptor { d := &Descriptor{ common: common{file}, DescriptorProto: desc, @@ -910,7 +814,7 @@ func newDescriptor(desc *descriptor.DescriptorProto, parent *Descriptor, file *d } // Return a slice of all the Descriptors defined within this file -func wrapDescriptors(file *descriptor.FileDescriptorProto) []*Descriptor { +func wrapDescriptors(file *FileDescriptor) []*Descriptor { sl := make([]*Descriptor, 0, len(file.MessageType)+10) for i, desc := range file.MessageType { sl = wrapThisDescriptor(sl, desc, nil, file, i) @@ -919,7 +823,7 @@ func wrapDescriptors(file *descriptor.FileDescriptorProto) []*Descriptor { } // Wrap this Descriptor, recursively -func wrapThisDescriptor(sl []*Descriptor, desc *descriptor.DescriptorProto, parent *Descriptor, file *descriptor.FileDescriptorProto, index int) []*Descriptor { +func wrapThisDescriptor(sl []*Descriptor, desc *descriptor.DescriptorProto, parent *Descriptor, file *FileDescriptor, index int) []*Descriptor { sl = append(sl, newDescriptor(desc, parent, file, index)) me := sl[len(sl)-1] for i, nested := range desc.NestedType { @@ -929,7 +833,7 @@ func wrapThisDescriptor(sl []*Descriptor, desc *descriptor.DescriptorProto, pare } // Construct the EnumDescriptor -func newEnumDescriptor(desc *descriptor.EnumDescriptorProto, parent *Descriptor, file *descriptor.FileDescriptorProto, index int) *EnumDescriptor { +func newEnumDescriptor(desc *descriptor.EnumDescriptorProto, parent *Descriptor, file *FileDescriptor, index int) *EnumDescriptor { ed := &EnumDescriptor{ common: common{file}, EnumDescriptorProto: desc, @@ -945,7 +849,7 @@ func newEnumDescriptor(desc *descriptor.EnumDescriptorProto, parent *Descriptor, } // Return a slice of all the EnumDescriptors defined within this file -func wrapEnumDescriptors(file *descriptor.FileDescriptorProto, descs []*Descriptor) []*EnumDescriptor { +func wrapEnumDescriptors(file *FileDescriptor, descs []*Descriptor) []*EnumDescriptor { sl := make([]*EnumDescriptor, 0, len(file.EnumType)+10) // Top-level enums. for i, enum := range file.EnumType { @@ -961,7 +865,7 @@ func wrapEnumDescriptors(file *descriptor.FileDescriptorProto, descs []*Descript } // Return a slice of all the top-level ExtensionDescriptors defined within this file. -func wrapExtensions(file *descriptor.FileDescriptorProto) []*ExtensionDescriptor { +func wrapExtensions(file *FileDescriptor) []*ExtensionDescriptor { var sl []*ExtensionDescriptor for _, field := range file.Extension { sl = append(sl, &ExtensionDescriptor{common{file}, field, nil}) @@ -970,7 +874,7 @@ func wrapExtensions(file *descriptor.FileDescriptorProto) []*ExtensionDescriptor } // Return a slice of all the types that are publicly imported into this file. -func wrapImported(file *descriptor.FileDescriptorProto, g *Generator) (sl []*ImportedDescriptor) { +func wrapImported(file *FileDescriptor, g *Generator) (sl []*ImportedDescriptor) { for _, index := range file.PublicDependency { df := g.fileByName(file.Dependency[index]) for _, d := range df.desc { @@ -1070,35 +974,84 @@ func (g *Generator) ObjectNamed(typeName string) Object { return o } +// AnnotatedAtoms is a list of atoms (as consumed by P) that records the file name and proto AST path from which they originated. +type AnnotatedAtoms struct { + source string + path string + atoms []interface{} +} + +// Annotate records the file name and proto AST path of a list of atoms +// so that a later call to P can emit a link from each atom to its origin. +func Annotate(file *FileDescriptor, path string, atoms ...interface{}) *AnnotatedAtoms { + return &AnnotatedAtoms{source: *file.Name, path: path, atoms: atoms} +} + +// printAtom prints the (atomic, non-annotation) argument to the generated output. +func (g *Generator) printAtom(v interface{}) { + switch v := v.(type) { + case string: + g.WriteString(v) + case *string: + g.WriteString(*v) + case bool: + fmt.Fprint(g, v) + case *bool: + fmt.Fprint(g, *v) + case int: + fmt.Fprint(g, v) + case *int32: + fmt.Fprint(g, *v) + case *int64: + fmt.Fprint(g, *v) + case float64: + fmt.Fprint(g, v) + case *float64: + fmt.Fprint(g, *v) + case GoPackageName: + g.WriteString(string(v)) + case GoImportPath: + g.WriteString(strconv.Quote(string(v))) + default: + g.Fail(fmt.Sprintf("unknown type in printer: %T", v)) + } +} + // P prints the arguments to the generated output. It handles strings and int32s, plus -// handling indirections because they may be *string, etc. +// handling indirections because they may be *string, etc. Any inputs of type AnnotatedAtoms may emit +// annotations in a .meta file in addition to outputting the atoms themselves (if g.annotateCode +// is true). func (g *Generator) P(str ...interface{}) { if !g.writeOutput { return } g.WriteString(g.indent) for _, v := range str { - switch s := v.(type) { - case string: - g.WriteString(s) - case *string: - g.WriteString(*s) - case bool: - fmt.Fprintf(g, "%t", s) - case *bool: - fmt.Fprintf(g, "%t", *s) - case int: - fmt.Fprintf(g, "%d", s) - case *int32: - fmt.Fprintf(g, "%d", *s) - case *int64: - fmt.Fprintf(g, "%d", *s) - case float64: - fmt.Fprintf(g, "%g", s) - case *float64: - fmt.Fprintf(g, "%g", *s) + switch v := v.(type) { + case *AnnotatedAtoms: + begin := int32(g.Len()) + for _, v := range v.atoms { + g.printAtom(v) + } + if g.annotateCode { + end := int32(g.Len()) + var path []int32 + for _, token := range strings.Split(v.path, ",") { + val, err := strconv.ParseInt(token, 10, 32) + if err != nil { + g.Fail("could not parse proto AST path: ", err.Error()) + } + path = append(path, int32(val)) + } + g.annotations = append(g.annotations, &descriptor.GeneratedCodeInfo_Annotation{ + Path: path, + SourceFile: &v.source, + Begin: &begin, + End: &end, + }) + } default: - g.Fail(fmt.Sprintf("unknown type in printer: %T", v)) + g.printAtom(v) } } g.WriteByte('\n') @@ -1135,15 +1088,25 @@ func (g *Generator) GenerateAllFiles() { } for _, file := range g.allFiles { g.Reset() + g.annotations = nil g.writeOutput = genFileMap[file] g.generate(file) if !g.writeOutput { continue } + fname := file.goFileName(g.pathType) g.Response.File = append(g.Response.File, &plugin.CodeGeneratorResponse_File{ - Name: proto.String(file.goFileName()), + Name: proto.String(fname), Content: proto.String(g.String()), }) + if g.annotateCode { + // Store the generated code annotations in text, as the protoc plugin protocol requires that + // strings contain valid UTF-8. + g.Response.File = append(g.Response.File, &plugin.CodeGeneratorResponse_File{ + Name: proto.String(file.goFileName(g.pathType) + ".meta"), + Content: proto.String(proto.CompactTextString(&descriptor.GeneratedCodeInfo{Annotation: g.annotations})), + }) + } } } @@ -1154,32 +1117,24 @@ func (g *Generator) runPlugins(file *FileDescriptor) { } } -// FileOf return the FileDescriptor for this FileDescriptorProto. -func (g *Generator) FileOf(fd *descriptor.FileDescriptorProto) *FileDescriptor { - for _, file := range g.allFiles { - if file.FileDescriptorProto == fd { - return file - } - } - g.Fail("could not find file in table:", fd.GetName()) - return nil -} - // Fill the response protocol buffer with the generated output for all the files we're // supposed to generate. func (g *Generator) generate(file *FileDescriptor) { - g.file = g.FileOf(file.FileDescriptorProto) - g.usedPackages = make(map[string]bool) - - if g.file.index == 0 { - // For one file in the package, assert version compatibility. - g.P("// This is a compile-time assertion to ensure that this generated file") - g.P("// is compatible with the proto package it is being compiled against.") - g.P("// A compilation error at this line likely means your copy of the") - g.P("// proto package needs to be updated.") - g.P("const _ = ", g.Pkg["proto"], ".ProtoPackageIsVersion", generatedCodeVersion, " // please upgrade the proto package") - g.P() - } + g.file = file + g.usedPackages = make(map[GoImportPath]bool) + g.packageNames = make(map[GoImportPath]GoPackageName) + g.usedPackageNames = make(map[GoPackageName]bool) + for name := range globalPackageNames { + g.usedPackageNames[name] = true + } + + g.P("// This is a compile-time assertion to ensure that this generated file") + g.P("// is compatible with the proto package it is being compiled against.") + g.P("// A compilation error at this line likely means your copy of the") + g.P("// proto package needs to be updated.") + g.P("const _ = ", g.Pkg["proto"], ".ProtoPackageIsVersion", generatedCodeVersion, " // please upgrade the proto package") + g.P() + for _, td := range g.file.imp { g.generateImported(td) } @@ -1205,24 +1160,36 @@ func (g *Generator) generate(file *FileDescriptor) { // Generate header and imports last, though they appear first in the output. rem := g.Buffer + remAnno := g.annotations g.Buffer = new(bytes.Buffer) + g.annotations = nil g.generateHeader() g.generateImports() if !g.writeOutput { return } + // Adjust the offsets for annotations displaced by the header and imports. + for _, anno := range remAnno { + *anno.Begin += int32(g.Len()) + *anno.End += int32(g.Len()) + g.annotations = append(g.annotations, anno) + } g.Write(rem.Bytes()) - // Reformat generated code. + // Reformat generated code and patch annotation locations. fset := token.NewFileSet() - raw := g.Bytes() - ast, err := parser.ParseFile(fset, "", g, parser.ParseComments) + original := g.Bytes() + if g.annotateCode { + // make a copy independent of g; we'll need it after Reset. + original = append([]byte(nil), original...) + } + ast, err := parser.ParseFile(fset, "", original, parser.ParseComments) if err != nil { // Print out the bad code with line numbers. // This should never happen in practice, but it can while changing generated code, // so consider this a debugging aid. var src bytes.Buffer - s := bufio.NewScanner(bytes.NewReader(raw)) + s := bufio.NewScanner(bytes.NewReader(original)) for line := 1; s.Scan(); line++ { fmt.Fprintf(&src, "%5d\t%s\n", line, s.Bytes()) } @@ -1233,55 +1200,59 @@ func (g *Generator) generate(file *FileDescriptor) { if err != nil { g.Fail("generated Go source code could not be reformatted:", err.Error()) } + if g.annotateCode { + m, err := remap.Compute(original, g.Bytes()) + if err != nil { + g.Fail("formatted generated Go source code could not be mapped back to the original code:", err.Error()) + } + for _, anno := range g.annotations { + new, ok := m.Find(int(*anno.Begin), int(*anno.End)) + if !ok { + g.Fail("span in formatted generated Go source code could not be mapped back to the original code") + } + *anno.Begin = int32(new.Pos) + *anno.End = int32(new.End) + } + } } // Generate the header, including package definition func (g *Generator) generateHeader() { g.P("// Code generated by protoc-gen-go. DO NOT EDIT.") - g.P("// source: ", g.file.Name) + if g.file.GetOptions().GetDeprecated() { + g.P("// ", g.file.Name, " is a deprecated file.") + } else { + g.P("// source: ", g.file.Name) + } g.P() - name := g.file.PackageName() + importPath, _, _ := g.file.goPackageOption() + if importPath == "" { + g.P("package ", g.file.packageName) + } else { + g.P("package ", g.file.packageName, " // import ", GoImportPath(g.ImportPrefix)+importPath) + } + g.P() - if g.file.index == 0 { - // Generate package docs for the first file in the package. + if loc, ok := g.file.comments[strconv.Itoa(packagePath)]; ok { g.P("/*") - g.P("Package ", name, " is a generated protocol buffer package.") - g.P() - if loc, ok := g.file.comments[strconv.Itoa(packagePath)]; ok { - // not using g.PrintComments because this is a /* */ comment block. - text := strings.TrimSuffix(loc.GetLeadingComments(), "\n") - for _, line := range strings.Split(text, "\n") { - line = strings.TrimPrefix(line, " ") - // ensure we don't escape from the block comment - line = strings.Replace(line, "*/", "* /", -1) - g.P(line) - } - g.P() - } - var topMsgs []string - g.P("It is generated from these files:") - for _, f := range g.genFiles { - g.P("\t", f.Name) - for _, msg := range f.desc { - if msg.parent != nil { - continue - } - topMsgs = append(topMsgs, CamelCaseSlice(msg.TypeName())) - } - } - g.P() - g.P("It has these top-level messages:") - for _, msg := range topMsgs { - g.P("\t", msg) + // not using g.PrintComments because this is a /* */ comment block. + text := strings.TrimSuffix(loc.GetLeadingComments(), "\n") + for _, line := range strings.Split(text, "\n") { + line = strings.TrimPrefix(line, " ") + // ensure we don't escape from the block comment + line = strings.Replace(line, "*/", "* /", -1) + g.P(line) } g.P("*/") + g.P() } - - g.P("package ", name) - g.P() } +// deprecationComment is the standard comment added to deprecated +// messages, fields, enums, and enum values. +var deprecationComment = "// Deprecated: Do not use." + // PrintComments prints any comments from the source .proto file. // The path is a comma-separated list of integers. // It returns an indication of whether any comments were printed. @@ -1319,35 +1290,46 @@ func (g *Generator) generateImports() { // We almost always need a proto import. Rather than computing when we // do, which is tricky when there's a plugin, just import it and // reference it later. The same argument applies to the fmt and math packages. - g.P("import " + g.Pkg["proto"] + " " + strconv.Quote(g.ImportPrefix+"github.com/golang/protobuf/proto")) + g.P("import "+g.Pkg["proto"]+" ", GoImportPath(g.ImportPrefix)+"github.com/golang/protobuf/proto") g.P("import " + g.Pkg["fmt"] + ` "fmt"`) g.P("import " + g.Pkg["math"] + ` "math"`) + var ( + imports = make(map[GoImportPath]bool) + strongImports = make(map[GoImportPath]bool) + importPaths []string + ) for i, s := range g.file.Dependency { fd := g.fileByName(s) + importPath := fd.importPath // Do not import our own package. - if fd.PackageName() == g.packageName { + if importPath == g.file.importPath { continue } - filename := fd.goFileName() - // By default, import path is the dirname of the Go filename. - importPath := path.Dir(filename) - if substitution, ok := g.ImportMap[s]; ok { - importPath = substitution + if !imports[importPath] { + importPaths = append(importPaths, string(importPath)) + } + imports[importPath] = true + if !g.weak(int32(i)) { + strongImports[importPath] = true } - importPath = g.ImportPrefix + importPath + } + sort.Strings(importPaths) + for i := range importPaths { + importPath := GoImportPath(importPaths[i]) + packageName := g.GoPackageName(importPath) + fullPath := GoImportPath(g.ImportPrefix) + importPath // Skip weak imports. - if g.weak(int32(i)) { - g.P("// skipping weak import ", fd.PackageName(), " ", strconv.Quote(importPath)) + if !strongImports[importPath] { + g.P("// skipping weak import ", packageName, " ", fullPath) continue } // We need to import all the dependencies, even if we don't reference them, // because other code and tools depend on having the full transitive closure // of protocol buffer types in the binary. - pname := fd.PackageName() - if _, ok := g.usedPackages[pname]; !ok { - pname = "_" + if _, ok := g.usedPackages[importPath]; !ok { + packageName = "_" } - g.P("import ", pname, " ", strconv.Quote(importPath)) + g.P("import ", packageName, " ", fullPath) } g.P() // TODO: may need to worry about uniqueness across plugins @@ -1363,26 +1345,24 @@ func (g *Generator) generateImports() { } func (g *Generator) generateImported(id *ImportedDescriptor) { - // Don't generate public import symbols for files that we are generating - // code for, since those symbols will already be in this package. - // We can't simply avoid creating the ImportedDescriptor objects, - // because g.genFiles isn't populated at that stage. tn := id.TypeName() sn := tn[len(tn)-1] - df := g.FileOf(id.o.File()) + df := id.o.File() filename := *df.Name - for _, fd := range g.genFiles { - if *fd.Name == filename { - g.P("// Ignoring public import of ", sn, " from ", filename) - g.P() - return - } + if df.importPath == g.file.importPath { + // Don't generate type aliases for files in the same Go package as this one. + g.P("// Ignoring public import of ", sn, " from ", filename) + g.P() + return + } + if !supportTypeAliases { + g.Fail(fmt.Sprintf("%s: public imports require at least go1.9", filename)) } g.P("// ", sn, " from public import ", filename) - g.usedPackages[df.PackageName()] = true + g.usedPackages[df.importPath] = true for _, sym := range df.exported[id.o] { - sym.GenerateAlias(g, df.PackageName()) + sym.GenerateAlias(g, g.GoPackageName(df.importPath)) } g.P() @@ -1396,16 +1376,26 @@ func (g *Generator) generateEnum(enum *EnumDescriptor) { ccTypeName := CamelCaseSlice(typeName) ccPrefix := enum.prefix() + deprecatedEnum := "" + if enum.GetOptions().GetDeprecated() { + deprecatedEnum = deprecationComment + } g.PrintComments(enum.path) - g.P("type ", ccTypeName, " int32") + g.P("type ", Annotate(enum.file, enum.path, ccTypeName), " int32", deprecatedEnum) g.file.addExport(enum, enumSymbol{ccTypeName, enum.proto3()}) g.P("const (") g.In() for i, e := range enum.Value { - g.PrintComments(fmt.Sprintf("%s,%d,%d", enum.path, enumValuePath, i)) + etorPath := fmt.Sprintf("%s,%d,%d", enum.path, enumValuePath, i) + g.PrintComments(etorPath) + + deprecatedValue := "" + if e.GetOptions().GetDeprecated() { + deprecatedValue = deprecationComment + } name := ccPrefix + *e.Name - g.P(name, " ", ccTypeName, " = ", e.Number) + g.P(Annotate(enum.file, etorPath, name), " ", ccTypeName, " = ", e.Number, " ", deprecatedValue) g.file.addExport(enum, constOrVarSymbol{name, "const", ccTypeName}) } g.Out() @@ -1468,7 +1458,11 @@ func (g *Generator) generateEnum(enum *EnumDescriptor) { indexes = append([]string{strconv.Itoa(m.index)}, indexes...) } indexes = append(indexes, strconv.Itoa(enum.index)) - g.P("func (", ccTypeName, ") EnumDescriptor() ([]byte, []int) { return ", g.file.VarName(), ", []int{", strings.Join(indexes, ", "), "} }") + g.P("func (", ccTypeName, ") EnumDescriptor() ([]byte, []int) {") + g.In() + g.P("return ", g.file.VarName(), ", []int{", strings.Join(indexes, ", "), "}") + g.Out() + g.P("}") if enum.file.GetPackage() == "google.protobuf" && enum.GetName() == "NullValue" { g.P("func (", ccTypeName, `) XXX_WellKnownType() string { return "`, enum.GetName(), `" }`) } @@ -1535,7 +1529,7 @@ func (g *Generator) goTag(message *Descriptor, field *descriptor.FieldDescriptor } enum := "" if *field.Type == descriptor.FieldDescriptorProto_TYPE_ENUM { - // We avoid using obj.PackageName(), because we want to use the + // We avoid using obj.GoPackageName(), because we want to use the // original (proto-world) package name. obj := g.ObjectNamed(field.GetTypeName()) if id, ok := obj.(*ImportedDescriptor); ok { @@ -1574,12 +1568,7 @@ func (g *Generator) goTag(message *Descriptor, field *descriptor.FieldDescriptor } name = ",name=" + name if message.proto3() { - // We only need the extra tag for []byte fields; - // no need to add noise for the others. - if *field.Type == descriptor.FieldDescriptorProto_TYPE_BYTES { - name += ",proto3" - } - + name += ",proto3" } oneof := "" if field.OneofIndex != nil { @@ -1617,12 +1606,6 @@ func (g *Generator) TypeName(obj Object) string { return g.DefaultPackageName(obj) + CamelCaseSlice(obj.TypeName()) } -// TypeNameWithPackage is like TypeName, but always includes the package -// name even if the object is in our own package. -func (g *Generator) TypeNameWithPackage(obj Object) string { - return obj.PackageName() + CamelCaseSlice(obj.TypeName()) -} - // GoType returns a string representing the type name, and the wire type func (g *Generator) GoType(message *Descriptor, field *descriptor.FieldDescriptorProto) (typ string, wire string) { // TODO: Options. @@ -1682,10 +1665,10 @@ func (g *Generator) GoType(message *Descriptor, field *descriptor.FieldDescripto } func (g *Generator) RecordTypeUse(t string) { - if obj, ok := g.typeNameToObject[t]; ok { + if _, ok := g.typeNameToObject[t]; ok { // Call ObjectNamed to get the true object to record the use. - obj = g.ObjectNamed(t) - g.usedPackages[obj.PackageName()] = true + obj := g.ObjectNamed(t) + g.usedPackages[obj.GoImportPath()] = true } } @@ -1746,8 +1729,19 @@ func (g *Generator) generateMessage(message *Descriptor) { oneofTypeName := make(map[*descriptor.FieldDescriptorProto]string) // without star oneofInsertPoints := make(map[int32]int) // oneof_index => offset of g.Buffer - g.PrintComments(message.path) - g.P("type ", ccTypeName, " struct {") + comments := g.PrintComments(message.path) + + // Guarantee deprecation comments appear after user-provided comments. + if message.GetOptions().GetDeprecated() { + if comments { + // Convention: Separate deprecation comments from original + // comments with an empty line. + g.P("//") + } + g.P(deprecationComment) + } + + g.P("type ", Annotate(message.file, message.path, ccTypeName), " struct {") g.In() // allocNames finds a conflict-free variation of the given strings, @@ -1794,7 +1788,8 @@ func (g *Generator) generateMessage(message *Descriptor) { // This is the first field of a oneof we haven't seen before. // Generate the union field. - com := g.PrintComments(fmt.Sprintf("%s,%d,%d", message.path, messageOneofPath, *field.OneofIndex)) + oneofFullPath := fmt.Sprintf("%s,%d,%d", message.path, messageOneofPath, *field.OneofIndex) + com := g.PrintComments(oneofFullPath) if com { g.P("//") } @@ -1807,7 +1802,7 @@ func (g *Generator) generateMessage(message *Descriptor) { oneofFieldName[*field.OneofIndex] = fname oneofDisc[*field.OneofIndex] = dname tag := `protobuf_oneof:"` + odp.GetName() + `"` - g.P(fname, " ", dname, " `", tag, "`") + g.P(Annotate(message.file, oneofFullPath, fname), " ", dname, " `", tag, "`") } if *field.Type == descriptor.FieldDescriptorProto_TYPE_MESSAGE { @@ -1871,16 +1866,26 @@ func (g *Generator) generateMessage(message *Descriptor) { continue } - g.PrintComments(fmt.Sprintf("%s,%d,%d", message.path, messageFieldPath, i)) - g.P(fieldName, "\t", typename, "\t`", tag, "`") + fieldDeprecated := "" + if field.GetOptions().GetDeprecated() { + fieldDeprecated = deprecationComment + } + + fieldFullPath := fmt.Sprintf("%s,%d,%d", message.path, messageFieldPath, i) + g.PrintComments(fieldFullPath) + g.P(Annotate(message.file, fieldFullPath, fieldName), "\t", typename, "\t`", tag, "`", fieldDeprecated) g.RecordTypeUse(field.GetTypeName()) } + g.P("XXX_NoUnkeyedLiteral\tstruct{} `json:\"-\"`") // prevent unkeyed struct literals if len(message.ExtensionRange) > 0 { - g.P(g.Pkg["proto"], ".XXX_InternalExtensions `json:\"-\"`") - } - if !message.proto3() { - g.P("XXX_unrecognized\t[]byte `json:\"-\"`") + messageset := "" + if opts := message.Options; opts != nil && opts.GetMessageSetWireFormat() { + messageset = "protobuf_messageset:\"1\" " + } + g.P(g.Pkg["proto"], ".XXX_InternalExtensions `", messageset, "json:\"-\"`") } + g.P("XXX_unrecognized\t[]byte `json:\"-\"`") + g.P("XXX_sizecache\tint32 `json:\"-\"`") g.Out() g.P("}") @@ -1892,12 +1897,25 @@ func (g *Generator) generateMessage(message *Descriptor) { all := g.Buffer.Bytes() rem := all[ip:] g.Buffer = bytes.NewBuffer(all[:ip:ip]) // set cap so we don't scribble on rem + oldLen := g.Buffer.Len() for _, field := range message.Field { if field.OneofIndex == nil || *field.OneofIndex != oi { continue } g.P("//\t*", oneofTypeName[field]) } + // If we've inserted text, we also need to fix up affected annotations (as + // they contain offsets that may need to be changed). + offset := int32(g.Buffer.Len() - oldLen) + ip32 := int32(ip) + for _, anno := range g.annotations { + if *anno.Begin >= ip32 { + *anno.Begin += offset + } + if *anno.End >= ip32 { + *anno.End += offset + } + } g.Buffer.Write(rem) } @@ -1909,7 +1927,11 @@ func (g *Generator) generateMessage(message *Descriptor) { for m := message; m != nil; m = m.parent { indexes = append([]string{strconv.Itoa(m.index)}, indexes...) } - g.P("func (*", ccTypeName, ") Descriptor() ([]byte, []int) { return ", g.file.VarName(), ", []int{", strings.Join(indexes, ", "), "} }") + g.P("func (*", ccTypeName, ") Descriptor() ([]byte, []int) {") + g.In() + g.P("return ", g.file.VarName(), ", []int{", strings.Join(indexes, ", "), "}") + g.Out() + g.P("}") // TODO: Revisit the decision to use a XXX_WellKnownType method // if we change proto.MessageName to work with multiple equivalents. if message.file.GetPackage() == "google.protobuf" && wellKnownTypes[message.GetName()] { @@ -1924,16 +1946,6 @@ func (g *Generator) generateMessage(message *Descriptor) { if opts := message.Options; opts != nil && opts.GetMessageSetWireFormat() { isMessageSet = true g.P() - g.P("func (m *", ccTypeName, ") Marshal() ([]byte, error) {") - g.In() - g.P("return ", g.Pkg["proto"], ".MarshalMessageSet(&m.XXX_InternalExtensions)") - g.Out() - g.P("}") - g.P("func (m *", ccTypeName, ") Unmarshal(buf []byte) error {") - g.In() - g.P("return ", g.Pkg["proto"], ".UnmarshalMessageSet(buf, &m.XXX_InternalExtensions)") - g.Out() - g.P("}") g.P("func (m *", ccTypeName, ") MarshalJSON() ([]byte, error) {") g.In() g.P("return ", g.Pkg["proto"], ".MarshalMessageSetJSON(&m.XXX_InternalExtensions)") @@ -1944,9 +1956,6 @@ func (g *Generator) generateMessage(message *Descriptor) { g.P("return ", g.Pkg["proto"], ".UnmarshalMessageSetJSON(buf, &m.XXX_InternalExtensions)") g.Out() g.P("}") - g.P("// ensure ", ccTypeName, " satisfies proto.Marshaler and proto.Unmarshaler") - g.P("var _ ", g.Pkg["proto"], ".Marshaler = (*", ccTypeName, ")(nil)") - g.P("var _ ", g.Pkg["proto"], ".Unmarshaler = (*", ccTypeName, ")(nil)") } g.P() @@ -1954,7 +1963,7 @@ func (g *Generator) generateMessage(message *Descriptor) { g.In() for _, r := range message.ExtensionRange { end := fmt.Sprint(*r.End - 1) // make range inclusive on both ends - g.P("{", r.Start, ", ", end, "},") + g.P("{Start: ", r.Start, ", End: ", end, "},") } g.Out() g.P("}") @@ -1965,6 +1974,45 @@ func (g *Generator) generateMessage(message *Descriptor) { g.P("}") } + // TODO: It does not scale to keep adding another method for every + // operation on protos that we want to switch over to using the + // table-driven approach. Instead, we should only add a single method + // that allows getting access to the *InternalMessageInfo struct and then + // calling Unmarshal, Marshal, Merge, Size, and Discard directly on that. + + // Wrapper for table-driven marshaling and unmarshaling. + g.P("func (m *", ccTypeName, ") XXX_Unmarshal(b []byte) error {") + g.In() + g.P("return xxx_messageInfo_", ccTypeName, ".Unmarshal(m, b)") + g.Out() + g.P("}") + + g.P("func (m *", ccTypeName, ") XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {") + g.In() + g.P("return xxx_messageInfo_", ccTypeName, ".Marshal(b, m, deterministic)") + g.Out() + g.P("}") + + g.P("func (dst *", ccTypeName, ") XXX_Merge(src ", g.Pkg["proto"], ".Message) {") + g.In() + g.P("xxx_messageInfo_", ccTypeName, ".Merge(dst, src)") + g.Out() + g.P("}") + + g.P("func (m *", ccTypeName, ") XXX_Size() int {") // avoid name clash with "Size" field in some message + g.In() + g.P("return xxx_messageInfo_", ccTypeName, ".Size(m)") + g.Out() + g.P("}") + + g.P("func (m *", ccTypeName, ") XXX_DiscardUnknown() {") + g.In() + g.P("xxx_messageInfo_", ccTypeName, ".DiscardUnknown(m)") + g.Out() + g.P("}") + + g.P("var xxx_messageInfo_", ccTypeName, " ", g.Pkg["proto"], ".InternalMessageInfo") + // Default constants defNames := make(map[*descriptor.FieldDescriptorProto]string) for _, field := range message.Field { @@ -2036,14 +2084,17 @@ func (g *Generator) generateMessage(message *Descriptor) { g.P("}") } g.P() - for _, field := range message.Field { + var oneofTypes []string + for i, field := range message.Field { if field.OneofIndex == nil { continue } _, wiretype := g.GoType(message, field) tag := "protobuf:" + g.goTag(message, field, wiretype) - g.P("type ", oneofTypeName[field], " struct{ ", fieldNames[field], " ", fieldTypes[field], " `", tag, "` }") + fieldFullPath := fmt.Sprintf("%s,%d,%d", message.path, messageFieldPath, i) + g.P("type ", Annotate(message.file, fieldFullPath, oneofTypeName[field]), " struct{ ", Annotate(message.file, fieldFullPath, fieldNames[field]), " ", fieldTypes[field], " `", tag, "` }") g.RecordTypeUse(field.GetTypeName()) + oneofTypes = append(oneofTypes, oneofTypeName[field]) } g.P() for _, field := range message.Field { @@ -2055,7 +2106,8 @@ func (g *Generator) generateMessage(message *Descriptor) { g.P() for oi := range message.OneofDecl { fname := oneofFieldName[int32(oi)] - g.P("func (m *", ccTypeName, ") Get", fname, "() ", oneofDisc[int32(oi)], " {") + oneofFullPath := fmt.Sprintf("%s,%d,%d", message.path, messageOneofPath, oi) + g.P("func (m *", ccTypeName, ") ", Annotate(message.file, oneofFullPath, "Get"+fname), "() ", oneofDisc[int32(oi)], " {") g.P("if m != nil { return m.", fname, " }") g.P("return nil") g.P("}") @@ -2063,8 +2115,7 @@ func (g *Generator) generateMessage(message *Descriptor) { g.P() // Field getters - var getters []getterSymbol - for _, field := range message.Field { + for i, field := range message.Field { oneof := field.OneofIndex != nil fname := fieldNames[field] @@ -2078,38 +2129,13 @@ func (g *Generator) generateMessage(message *Descriptor) { typename = typename[1:] star = "*" } + fieldFullPath := fmt.Sprintf("%s,%d,%d", message.path, messageFieldPath, i) - // Only export getter symbols for basic types, - // and for messages and enums in the same package. - // Groups are not exported. - // Foreign types can't be hoisted through a public import because - // the importer may not already be importing the defining .proto. - // As an example, imagine we have an import tree like this: - // A.proto -> B.proto -> C.proto - // If A publicly imports B, we need to generate the getters from B in A's output, - // but if one such getter returns something from C then we cannot do that - // because A is not importing C already. - var getter, genType bool - switch *field.Type { - case descriptor.FieldDescriptorProto_TYPE_GROUP: - getter = false - case descriptor.FieldDescriptorProto_TYPE_MESSAGE, descriptor.FieldDescriptorProto_TYPE_ENUM: - // Only export getter if its return type is in this package. - getter = g.ObjectNamed(field.GetTypeName()).PackageName() == message.PackageName() - genType = true - default: - getter = true - } - if getter { - getters = append(getters, getterSymbol{ - name: mname, - typ: typename, - typeName: field.GetTypeName(), - genType: genType, - }) + if field.GetOptions().GetDeprecated() { + g.P(deprecationComment) } - g.P("func (m *", ccTypeName, ") "+mname+"() "+typename+" {") + g.P("func (m *", ccTypeName, ") ", Annotate(message.file, fieldFullPath, mname), "() "+typename+" {") g.In() def, hasDef := defNames[field] typeDefaultIsNil := false // whether this field type's default value is a literal nil unless specified @@ -2207,8 +2233,7 @@ func (g *Generator) generateMessage(message *Descriptor) { sym: ccTypeName, hasExtensions: hasExtensions, isMessageSet: isMessageSet, - hasOneof: len(message.OneofDecl) > 0, - getters: getters, + oneofTypes: oneofTypes, } g.file.addExport(message, ms) } @@ -2428,58 +2453,49 @@ func (g *Generator) generateMessage(message *Descriptor) { } g.P("case *", oneofTypeName[field], ":") val := "x." + fieldNames[field] - var wire, varint, fixed string + var varint, fixed string switch *field.Type { case descriptor.FieldDescriptorProto_TYPE_DOUBLE: - wire = "WireFixed64" fixed = "8" case descriptor.FieldDescriptorProto_TYPE_FLOAT: - wire = "WireFixed32" fixed = "4" case descriptor.FieldDescriptorProto_TYPE_INT64, descriptor.FieldDescriptorProto_TYPE_UINT64, descriptor.FieldDescriptorProto_TYPE_INT32, descriptor.FieldDescriptorProto_TYPE_UINT32, descriptor.FieldDescriptorProto_TYPE_ENUM: - wire = "WireVarint" varint = val case descriptor.FieldDescriptorProto_TYPE_FIXED64, descriptor.FieldDescriptorProto_TYPE_SFIXED64: - wire = "WireFixed64" fixed = "8" case descriptor.FieldDescriptorProto_TYPE_FIXED32, descriptor.FieldDescriptorProto_TYPE_SFIXED32: - wire = "WireFixed32" fixed = "4" case descriptor.FieldDescriptorProto_TYPE_BOOL: - wire = "WireVarint" fixed = "1" case descriptor.FieldDescriptorProto_TYPE_STRING: - wire = "WireBytes" fixed = "len(" + val + ")" varint = fixed case descriptor.FieldDescriptorProto_TYPE_GROUP: - wire = "WireStartGroup" fixed = g.Pkg["proto"] + ".Size(" + val + ")" case descriptor.FieldDescriptorProto_TYPE_MESSAGE: - wire = "WireBytes" g.P("s := ", g.Pkg["proto"], ".Size(", val, ")") fixed = "s" varint = fixed case descriptor.FieldDescriptorProto_TYPE_BYTES: - wire = "WireBytes" fixed = "len(" + val + ")" varint = fixed case descriptor.FieldDescriptorProto_TYPE_SINT32: - wire = "WireVarint" varint = "(uint32(" + val + ") << 1) ^ uint32((int32(" + val + ") >> 31))" case descriptor.FieldDescriptorProto_TYPE_SINT64: - wire = "WireVarint" varint = "uint64(" + val + " << 1) ^ uint64((int64(" + val + ") >> 63))" default: g.Fail("unhandled oneof field type ", field.Type.String()) } - g.P("n += ", g.Pkg["proto"], ".SizeVarint(", field.Number, "<<3|", g.Pkg["proto"], ".", wire, ")") + // Tag and wire varint is known statically, + // so don't generate code for that part of the size computation. + tagAndWireSize := proto.SizeVarint(uint64(*field.Number << 3)) // wire doesn't affect varint size + g.P("n += ", tagAndWireSize, " // tag and wire") if varint != "" { g.P("n += ", g.Pkg["proto"], ".SizeVarint(uint64(", varint, "))") } @@ -2487,7 +2503,7 @@ func (g *Generator) generateMessage(message *Descriptor) { g.P("n += ", fixed) } if *field.Type == descriptor.FieldDescriptorProto_TYPE_GROUP { - g.P("n += ", g.Pkg["proto"], ".SizeVarint(", field.Number, "<<3|", g.Pkg["proto"], ".WireEndGroup)") + g.P("n += ", tagAndWireSize, " // tag and wire") } } g.P("case nil:") @@ -2510,6 +2526,27 @@ func (g *Generator) generateMessage(message *Descriptor) { } g.addInitf("%s.RegisterType((*%s)(nil), %q)", g.Pkg["proto"], ccTypeName, fullName) + // Register types for native map types. + for _, k := range mapFieldKeys(mapFieldTypes) { + fullName := strings.TrimPrefix(*k.TypeName, ".") + g.addInitf("%s.RegisterMapType((%s)(nil), %q)", g.Pkg["proto"], mapFieldTypes[k], fullName) + } +} + +type byTypeName []*descriptor.FieldDescriptorProto + +func (a byTypeName) Len() int { return len(a) } +func (a byTypeName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byTypeName) Less(i, j int) bool { return *a[i].TypeName < *a[j].TypeName } + +// mapFieldKeys returns the keys of m in a consistent order. +func mapFieldKeys(m map[*descriptor.FieldDescriptorProto]string) []*descriptor.FieldDescriptorProto { + keys := make([]*descriptor.FieldDescriptorProto, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Sort(byTypeName(keys)) + return keys } var escapeChars = [256]byte{ @@ -2598,10 +2635,15 @@ func (g *Generator) generateExtension(ext *ExtensionDescriptor) { typeName := ext.TypeName() // Special case for proto2 message sets: If this extension is extending - // proto2_bridge.MessageSet, and its final name component is "message_set_extension", + // proto2.bridge.MessageSet, and its final name component is "message_set_extension", // then drop that last component. + // + // TODO: This should be implemented in the text formatter rather than the generator. + // In addition, the situation for when to apply this special case is implemented + // differently in other languages: + // https://github.com/google/protobuf/blob/aff10976/src/google/protobuf/text_format.cc#L1560 mset := false - if extendedType == "*proto2_bridge.MessageSet" && typeName[len(typeName)-1] == "message_set_extension" { + if extDesc.GetOptions().GetMessageSetWireFormat() && typeName[len(typeName)-1] == "message_set_extension" { typeName = typeName[:len(typeName)-1] mset = true } @@ -2868,3 +2910,14 @@ const ( // tag numbers in EnumDescriptorProto enumValuePath = 2 // value ) + +var supportTypeAliases bool + +func init() { + for _, tag := range build.Default.ReleaseTags { + if tag == "go1.9" { + supportTypeAliases = true + return + } + } +} diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/generator/internal/remap/remap.go b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/generator/internal/remap/remap.go new file mode 100644 index 0000000..a9b6103 --- /dev/null +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/generator/internal/remap/remap.go @@ -0,0 +1,117 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2017 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package remap handles tracking the locations of Go tokens in a source text +across a rewrite by the Go formatter. +*/ +package remap + +import ( + "fmt" + "go/scanner" + "go/token" +) + +// A Location represents a span of byte offsets in the source text. +type Location struct { + Pos, End int // End is exclusive +} + +// A Map represents a mapping between token locations in an input source text +// and locations in the correspnding output text. +type Map map[Location]Location + +// Find reports whether the specified span is recorded by m, and if so returns +// the new location it was mapped to. If the input span was not found, the +// returned location is the same as the input. +func (m Map) Find(pos, end int) (Location, bool) { + key := Location{ + Pos: pos, + End: end, + } + if loc, ok := m[key]; ok { + return loc, true + } + return key, false +} + +func (m Map) add(opos, oend, npos, nend int) { + m[Location{Pos: opos, End: oend}] = Location{Pos: npos, End: nend} +} + +// Compute constructs a location mapping from input to output. An error is +// reported if any of the tokens of output cannot be mapped. +func Compute(input, output []byte) (Map, error) { + itok := tokenize(input) + otok := tokenize(output) + if len(itok) != len(otok) { + return nil, fmt.Errorf("wrong number of tokens, %d ≠ %d", len(itok), len(otok)) + } + m := make(Map) + for i, ti := range itok { + to := otok[i] + if ti.Token != to.Token { + return nil, fmt.Errorf("token %d type mismatch: %s ≠ %s", i+1, ti, to) + } + m.add(ti.pos, ti.end, to.pos, to.end) + } + return m, nil +} + +// tokinfo records the span and type of a source token. +type tokinfo struct { + pos, end int + token.Token +} + +func tokenize(src []byte) []tokinfo { + fs := token.NewFileSet() + var s scanner.Scanner + s.Init(fs.AddFile("src", fs.Base(), len(src)), src, nil, scanner.ScanComments) + var info []tokinfo + for { + pos, next, lit := s.Scan() + switch next { + case token.SEMICOLON: + continue + } + info = append(info, tokinfo{ + pos: int(pos - 1), + end: int(pos + token.Pos(len(lit)) - 1), + Token: next, + }) + if next == token.EOF { + break + } + } + return info +} diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/grpc/grpc.go b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/grpc/grpc.go index 2660e47..faef1ab 100644 --- a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/grpc/grpc.go +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/grpc/grpc.go @@ -130,19 +130,23 @@ func (g *grpc) GenerateImports(file *generator.FileDescriptor) { return } g.P("import (") - g.P(contextPkg, " ", strconv.Quote(path.Join(g.gen.ImportPrefix, contextPkgPath))) - g.P(grpcPkg, " ", strconv.Quote(path.Join(g.gen.ImportPrefix, grpcPkgPath))) + g.P(contextPkg, " ", generator.GoImportPath(path.Join(string(g.gen.ImportPrefix), contextPkgPath))) + g.P(grpcPkg, " ", generator.GoImportPath(path.Join(string(g.gen.ImportPrefix), grpcPkgPath))) g.P(")") g.P() } // reservedClientName records whether a client name is reserved on the client side. var reservedClientName = map[string]bool{ -// TODO: do we need any in gRPC? + // TODO: do we need any in gRPC? } func unexport(s string) string { return strings.ToLower(s[:1]) + s[1:] } +// deprecationComment is the standard comment added to deprecated +// messages, fields, enums, and enum values. +var deprecationComment = "// Deprecated: Do not use." + // generateService generates all the code for the named service. func (g *grpc) generateService(file *generator.FileDescriptor, service *pb.ServiceDescriptorProto, index int) { path := fmt.Sprintf("6,%d", index) // 6 means service. @@ -153,12 +157,18 @@ func (g *grpc) generateService(file *generator.FileDescriptor, service *pb.Servi fullServName = pkg + "." + fullServName } servName := generator.CamelCase(origServName) + deprecated := service.GetOptions().GetDeprecated() g.P() - g.P("// Client API for ", servName, " service") - g.P() + g.P(fmt.Sprintf(`// %sClient is the client API for %s service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.`, servName, servName)) // Client interface. + if deprecated { + g.P("//") + g.P(deprecationComment) + } g.P("type ", servName, "Client interface {") for i, method := range service.Method { g.gen.PrintComments(fmt.Sprintf("%s,2,%d", path, i)) // 2 means method in a service. @@ -174,6 +184,9 @@ func (g *grpc) generateService(file *generator.FileDescriptor, service *pb.Servi g.P() // NewClient factory. + if deprecated { + g.P(deprecationComment) + } g.P("func New", servName, "Client (cc *", grpcPkg, ".ClientConn) ", servName, "Client {") g.P("return &", unexport(servName), "Client{cc}") g.P("}") @@ -196,11 +209,13 @@ func (g *grpc) generateService(file *generator.FileDescriptor, service *pb.Servi g.generateClientMethod(servName, fullServName, serviceDescVar, method, descExpr) } - g.P("// Server API for ", servName, " service") - g.P() - // Server interface. serverType := servName + "Server" + g.P("// ", serverType, " is the server API for ", servName, " service.") + if deprecated { + g.P("//") + g.P(deprecationComment) + } g.P("type ", serverType, " interface {") for i, method := range service.Method { g.gen.PrintComments(fmt.Sprintf("%s,2,%d", path, i)) // 2 means method in a service. @@ -210,6 +225,9 @@ func (g *grpc) generateService(file *generator.FileDescriptor, service *pb.Servi g.P() // Server registration. + if deprecated { + g.P(deprecationComment) + } g.P("func Register", servName, "Server(s *", grpcPkg, ".Server, srv ", serverType, ") {") g.P("s.RegisterService(&", serviceDescVar, `, srv)`) g.P("}") @@ -283,11 +301,14 @@ func (g *grpc) generateClientMethod(servName, fullServName, serviceDescVar strin inType := g.typeName(method.GetInputType()) outType := g.typeName(method.GetOutputType()) + if method.GetOptions().GetDeprecated() { + g.P(deprecationComment) + } g.P("func (c *", unexport(servName), "Client) ", g.generateClientSignature(servName, method), "{") if !method.GetServerStreaming() && !method.GetClientStreaming() { g.P("out := new(", outType, ")") // TODO: Pass descExpr to Invoke. - g.P("err := ", grpcPkg, `.Invoke(ctx, "`, sname, `", in, out, c.cc, opts...)`) + g.P(`err := c.cc.Invoke(ctx, "`, sname, `", in, out, opts...)`) g.P("if err != nil { return nil, err }") g.P("return out, nil") g.P("}") @@ -295,7 +316,7 @@ func (g *grpc) generateClientMethod(servName, fullServName, serviceDescVar strin return } streamType := unexport(servName) + methName + "Client" - g.P("stream, err := ", grpcPkg, ".NewClientStream(ctx, ", descExpr, `, c.cc, "`, sname, `", opts...)`) + g.P("stream, err := c.cc.NewStream(ctx, ", descExpr, `, "`, sname, `", opts...)`) g.P("if err != nil { return nil, err }") g.P("x := &", streamType, "{stream}") if !method.GetClientStreaming() { diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/Makefile b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/Makefile deleted file mode 100644 index bc0463d..0000000 --- a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/Makefile +++ /dev/null @@ -1,45 +0,0 @@ -# Go support for Protocol Buffers - Google's data interchange format -# -# Copyright 2010 The Go Authors. All rights reserved. -# https://github.com/golang/protobuf -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -# Not stored here, but plugin.proto is in https://github.com/google/protobuf/ -# at src/google/protobuf/compiler/plugin.proto -# Also we need to fix an import. -regenerate: - @echo WARNING! THIS RULE IS PROBABLY NOT RIGHT FOR YOUR INSTALLATION - cp $(HOME)/src/protobuf/include/google/protobuf/compiler/plugin.proto . - protoc --go_out=Mgoogle/protobuf/descriptor.proto=github.com/golang/protobuf/protoc-gen-go/descriptor:../../../../.. \ - -I$(HOME)/src/protobuf/include $(HOME)/src/protobuf/include/google/protobuf/compiler/plugin.proto - -restore: - cp plugin.pb.golden plugin.pb.go - -preserve: - cp plugin.pb.go plugin.pb.golden diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go index c608a24..61bfc10 100644 --- a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go @@ -37,14 +37,33 @@ type Version struct { Patch *int32 `protobuf:"varint,3,opt,name=patch" json:"patch,omitempty"` // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should // be empty for mainline stable releases. - Suffix *string `protobuf:"bytes,4,opt,name=suffix" json:"suffix,omitempty"` - XXX_unrecognized []byte `json:"-"` + Suffix *string `protobuf:"bytes,4,opt,name=suffix" json:"suffix,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *Version) Reset() { *m = Version{} } func (m *Version) String() string { return proto.CompactTextString(m) } func (*Version) ProtoMessage() {} func (*Version) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (m *Version) Unmarshal(b []byte) error { + return xxx_messageInfo_Version.Unmarshal(m, b) +} +func (m *Version) Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Version.Marshal(b, m, deterministic) +} +func (dst *Version) XXX_Merge(src proto.Message) { + xxx_messageInfo_Version.Merge(dst, src) +} +func (m *Version) XXX_Size() int { + return xxx_messageInfo_Version.Size(m) +} +func (m *Version) XXX_DiscardUnknown() { + xxx_messageInfo_Version.DiscardUnknown(m) +} + +var xxx_messageInfo_Version proto.InternalMessageInfo func (m *Version) GetMajor() int32 { if m != nil && m.Major != nil { @@ -98,14 +117,33 @@ type CodeGeneratorRequest struct { // fully qualified. ProtoFile []*google_protobuf.FileDescriptorProto `protobuf:"bytes,15,rep,name=proto_file,json=protoFile" json:"proto_file,omitempty"` // The version number of protocol compiler. - CompilerVersion *Version `protobuf:"bytes,3,opt,name=compiler_version,json=compilerVersion" json:"compiler_version,omitempty"` - XXX_unrecognized []byte `json:"-"` + CompilerVersion *Version `protobuf:"bytes,3,opt,name=compiler_version,json=compilerVersion" json:"compiler_version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *CodeGeneratorRequest) Reset() { *m = CodeGeneratorRequest{} } func (m *CodeGeneratorRequest) String() string { return proto.CompactTextString(m) } func (*CodeGeneratorRequest) ProtoMessage() {} func (*CodeGeneratorRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } +func (m *CodeGeneratorRequest) Unmarshal(b []byte) error { + return xxx_messageInfo_CodeGeneratorRequest.Unmarshal(m, b) +} +func (m *CodeGeneratorRequest) Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CodeGeneratorRequest.Marshal(b, m, deterministic) +} +func (dst *CodeGeneratorRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CodeGeneratorRequest.Merge(dst, src) +} +func (m *CodeGeneratorRequest) XXX_Size() int { + return xxx_messageInfo_CodeGeneratorRequest.Size(m) +} +func (m *CodeGeneratorRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CodeGeneratorRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CodeGeneratorRequest proto.InternalMessageInfo func (m *CodeGeneratorRequest) GetFileToGenerate() []string { if m != nil { @@ -145,15 +183,34 @@ type CodeGeneratorResponse struct { // problem in protoc itself -- such as the input CodeGeneratorRequest being // unparseable -- should be reported by writing a message to stderr and // exiting with a non-zero status code. - Error *string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"` - File []*CodeGeneratorResponse_File `protobuf:"bytes,15,rep,name=file" json:"file,omitempty"` - XXX_unrecognized []byte `json:"-"` + Error *string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"` + File []*CodeGeneratorResponse_File `protobuf:"bytes,15,rep,name=file" json:"file,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *CodeGeneratorResponse) Reset() { *m = CodeGeneratorResponse{} } func (m *CodeGeneratorResponse) String() string { return proto.CompactTextString(m) } func (*CodeGeneratorResponse) ProtoMessage() {} func (*CodeGeneratorResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } +func (m *CodeGeneratorResponse) Unmarshal(b []byte) error { + return xxx_messageInfo_CodeGeneratorResponse.Unmarshal(m, b) +} +func (m *CodeGeneratorResponse) Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CodeGeneratorResponse.Marshal(b, m, deterministic) +} +func (dst *CodeGeneratorResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_CodeGeneratorResponse.Merge(dst, src) +} +func (m *CodeGeneratorResponse) XXX_Size() int { + return xxx_messageInfo_CodeGeneratorResponse.Size(m) +} +func (m *CodeGeneratorResponse) XXX_DiscardUnknown() { + xxx_messageInfo_CodeGeneratorResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_CodeGeneratorResponse proto.InternalMessageInfo func (m *CodeGeneratorResponse) GetError() string { if m != nil && m.Error != nil { @@ -222,14 +279,33 @@ type CodeGeneratorResponse_File struct { // If |insertion_point| is present, |name| must also be present. InsertionPoint *string `protobuf:"bytes,2,opt,name=insertion_point,json=insertionPoint" json:"insertion_point,omitempty"` // The file contents. - Content *string `protobuf:"bytes,15,opt,name=content" json:"content,omitempty"` - XXX_unrecognized []byte `json:"-"` + Content *string `protobuf:"bytes,15,opt,name=content" json:"content,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *CodeGeneratorResponse_File) Reset() { *m = CodeGeneratorResponse_File{} } func (m *CodeGeneratorResponse_File) String() string { return proto.CompactTextString(m) } func (*CodeGeneratorResponse_File) ProtoMessage() {} func (*CodeGeneratorResponse_File) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} } +func (m *CodeGeneratorResponse_File) Unmarshal(b []byte) error { + return xxx_messageInfo_CodeGeneratorResponse_File.Unmarshal(m, b) +} +func (m *CodeGeneratorResponse_File) Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CodeGeneratorResponse_File.Marshal(b, m, deterministic) +} +func (dst *CodeGeneratorResponse_File) XXX_Merge(src proto.Message) { + xxx_messageInfo_CodeGeneratorResponse_File.Merge(dst, src) +} +func (m *CodeGeneratorResponse_File) XXX_Size() int { + return xxx_messageInfo_CodeGeneratorResponse_File.Size(m) +} +func (m *CodeGeneratorResponse_File) XXX_DiscardUnknown() { + xxx_messageInfo_CodeGeneratorResponse_File.DiscardUnknown(m) +} + +var xxx_messageInfo_CodeGeneratorResponse_File proto.InternalMessageInfo func (m *CodeGeneratorResponse_File) GetName() string { if m != nil && m.Name != nil { diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/deprecated/deprecated.pb.go b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/deprecated/deprecated.pb.go new file mode 100644 index 0000000..2989e7c --- /dev/null +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/deprecated/deprecated.pb.go @@ -0,0 +1,235 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// deprecated/deprecated.proto is a deprecated file. + +package deprecated // import "github.com/golang/protobuf/protoc-gen-go/testdata/deprecated" + +/* +package deprecated contains only deprecated messages and services. +*/ + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +import ( + "context" + + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// DeprecatedEnum contains deprecated values. +type DeprecatedEnum int32 // Deprecated: Do not use. +const ( + // DEPRECATED is the iota value of this enum. + DeprecatedEnum_DEPRECATED DeprecatedEnum = 0 // Deprecated: Do not use. +) + +var DeprecatedEnum_name = map[int32]string{ + 0: "DEPRECATED", +} +var DeprecatedEnum_value = map[string]int32{ + "DEPRECATED": 0, +} + +func (x DeprecatedEnum) String() string { + return proto.EnumName(DeprecatedEnum_name, int32(x)) +} +func (DeprecatedEnum) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_deprecated_9e1889ba21817fad, []int{0} +} + +// DeprecatedRequest is a request to DeprecatedCall. +// +// Deprecated: Do not use. +type DeprecatedRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeprecatedRequest) Reset() { *m = DeprecatedRequest{} } +func (m *DeprecatedRequest) String() string { return proto.CompactTextString(m) } +func (*DeprecatedRequest) ProtoMessage() {} +func (*DeprecatedRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_deprecated_9e1889ba21817fad, []int{0} +} +func (m *DeprecatedRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeprecatedRequest.Unmarshal(m, b) +} +func (m *DeprecatedRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeprecatedRequest.Marshal(b, m, deterministic) +} +func (dst *DeprecatedRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeprecatedRequest.Merge(dst, src) +} +func (m *DeprecatedRequest) XXX_Size() int { + return xxx_messageInfo_DeprecatedRequest.Size(m) +} +func (m *DeprecatedRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeprecatedRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeprecatedRequest proto.InternalMessageInfo + +// Deprecated: Do not use. +type DeprecatedResponse struct { + // DeprecatedField contains a DeprecatedEnum. + DeprecatedField DeprecatedEnum `protobuf:"varint,1,opt,name=deprecated_field,json=deprecatedField,proto3,enum=deprecated.DeprecatedEnum" json:"deprecated_field,omitempty"` // Deprecated: Do not use. + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeprecatedResponse) Reset() { *m = DeprecatedResponse{} } +func (m *DeprecatedResponse) String() string { return proto.CompactTextString(m) } +func (*DeprecatedResponse) ProtoMessage() {} +func (*DeprecatedResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_deprecated_9e1889ba21817fad, []int{1} +} +func (m *DeprecatedResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeprecatedResponse.Unmarshal(m, b) +} +func (m *DeprecatedResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeprecatedResponse.Marshal(b, m, deterministic) +} +func (dst *DeprecatedResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeprecatedResponse.Merge(dst, src) +} +func (m *DeprecatedResponse) XXX_Size() int { + return xxx_messageInfo_DeprecatedResponse.Size(m) +} +func (m *DeprecatedResponse) XXX_DiscardUnknown() { + xxx_messageInfo_DeprecatedResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_DeprecatedResponse proto.InternalMessageInfo + +// Deprecated: Do not use. +func (m *DeprecatedResponse) GetDeprecatedField() DeprecatedEnum { + if m != nil { + return m.DeprecatedField + } + return DeprecatedEnum_DEPRECATED +} + +func init() { + proto.RegisterType((*DeprecatedRequest)(nil), "deprecated.DeprecatedRequest") + proto.RegisterType((*DeprecatedResponse)(nil), "deprecated.DeprecatedResponse") + proto.RegisterEnum("deprecated.DeprecatedEnum", DeprecatedEnum_name, DeprecatedEnum_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// DeprecatedServiceClient is the client API for DeprecatedService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +// +// Deprecated: Do not use. +type DeprecatedServiceClient interface { + // DeprecatedCall takes a DeprecatedRequest and returns a DeprecatedResponse. + DeprecatedCall(ctx context.Context, in *DeprecatedRequest, opts ...grpc.CallOption) (*DeprecatedResponse, error) +} + +type deprecatedServiceClient struct { + cc *grpc.ClientConn +} + +// Deprecated: Do not use. +func NewDeprecatedServiceClient(cc *grpc.ClientConn) DeprecatedServiceClient { + return &deprecatedServiceClient{cc} +} + +// Deprecated: Do not use. +func (c *deprecatedServiceClient) DeprecatedCall(ctx context.Context, in *DeprecatedRequest, opts ...grpc.CallOption) (*DeprecatedResponse, error) { + out := new(DeprecatedResponse) + err := c.cc.Invoke(ctx, "/deprecated.DeprecatedService/DeprecatedCall", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// DeprecatedServiceServer is the server API for DeprecatedService service. +// +// Deprecated: Do not use. +type DeprecatedServiceServer interface { + // DeprecatedCall takes a DeprecatedRequest and returns a DeprecatedResponse. + DeprecatedCall(context.Context, *DeprecatedRequest) (*DeprecatedResponse, error) +} + +// Deprecated: Do not use. +func RegisterDeprecatedServiceServer(s *grpc.Server, srv DeprecatedServiceServer) { + s.RegisterService(&_DeprecatedService_serviceDesc, srv) +} + +func _DeprecatedService_DeprecatedCall_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeprecatedRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DeprecatedServiceServer).DeprecatedCall(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/deprecated.DeprecatedService/DeprecatedCall", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DeprecatedServiceServer).DeprecatedCall(ctx, req.(*DeprecatedRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _DeprecatedService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "deprecated.DeprecatedService", + HandlerType: (*DeprecatedServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "DeprecatedCall", + Handler: _DeprecatedService_DeprecatedCall_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "deprecated/deprecated.proto", +} + +func init() { + proto.RegisterFile("deprecated/deprecated.proto", fileDescriptor_deprecated_9e1889ba21817fad) +} + +var fileDescriptor_deprecated_9e1889ba21817fad = []byte{ + // 248 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0x49, 0x2d, 0x28, + 0x4a, 0x4d, 0x4e, 0x2c, 0x49, 0x4d, 0xd1, 0x47, 0x30, 0xf5, 0x0a, 0x8a, 0xf2, 0x4b, 0xf2, 0x85, + 0xb8, 0x10, 0x22, 0x4a, 0xe2, 0x5c, 0x82, 0x2e, 0x70, 0x5e, 0x50, 0x6a, 0x61, 0x69, 0x6a, 0x71, + 0x89, 0x15, 0x93, 0x04, 0xa3, 0x52, 0x32, 0x97, 0x10, 0xb2, 0x44, 0x71, 0x41, 0x7e, 0x5e, 0x71, + 0xaa, 0x90, 0x27, 0x97, 0x00, 0x42, 0x73, 0x7c, 0x5a, 0x66, 0x6a, 0x4e, 0x8a, 0x04, 0xa3, 0x02, + 0xa3, 0x06, 0x9f, 0x91, 0x94, 0x1e, 0x92, 0x3d, 0x08, 0x9d, 0xae, 0x79, 0xa5, 0xb9, 0x4e, 0x4c, + 0x12, 0x8c, 0x41, 0xfc, 0x08, 0x69, 0x37, 0x90, 0x36, 0x90, 0x25, 0x5a, 0x1a, 0x5c, 0x7c, 0xa8, + 0x4a, 0x85, 0x84, 0xb8, 0xb8, 0x5c, 0x5c, 0x03, 0x82, 0x5c, 0x9d, 0x1d, 0x43, 0x5c, 0x5d, 0x04, + 0x18, 0xa4, 0x98, 0x38, 0x18, 0xa5, 0x98, 0x24, 0x18, 0x8d, 0xf2, 0x90, 0xdd, 0x19, 0x9c, 0x5a, + 0x54, 0x96, 0x99, 0x9c, 0x2a, 0x14, 0x82, 0xac, 0xdd, 0x39, 0x31, 0x27, 0x47, 0x48, 0x16, 0xbb, + 0x2b, 0xa0, 0x1e, 0x93, 0x92, 0xc3, 0x25, 0x0d, 0xf1, 0x9e, 0x12, 0x73, 0x07, 0x13, 0xa3, 0x14, + 0x88, 0x70, 0x72, 0x8c, 0xb2, 0x49, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, + 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x07, 0x07, 0x5f, 0x52, 0x69, 0x1a, 0x84, 0x91, 0xac, + 0x9b, 0x9e, 0x9a, 0xa7, 0x9b, 0x9e, 0xaf, 0x5f, 0x92, 0x5a, 0x5c, 0x92, 0x92, 0x58, 0x92, 0x88, + 0x14, 0xd2, 0x3b, 0x18, 0x19, 0x93, 0xd8, 0xc0, 0xaa, 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, + 0x0e, 0xf5, 0x6c, 0x87, 0x8c, 0x01, 0x00, 0x00, +} diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/deprecated/deprecated.proto b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/deprecated/deprecated.proto new file mode 100644 index 0000000..b314166 --- /dev/null +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/deprecated/deprecated.proto @@ -0,0 +1,69 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2018 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +// package deprecated contains only deprecated messages and services. +package deprecated; + +option go_package = "github.com/golang/protobuf/protoc-gen-go/testdata/deprecated"; + +option deprecated = true; // file-level deprecation + +// DeprecatedRequest is a request to DeprecatedCall. +message DeprecatedRequest { + option deprecated = true; +} + +message DeprecatedResponse { + // comment for DeprecatedResponse is omitted to guarantee deprecation + // message doesn't append unnecessary comments. + option deprecated = true; + // DeprecatedField contains a DeprecatedEnum. + DeprecatedEnum deprecated_field = 1 [deprecated=true]; +} + +// DeprecatedEnum contains deprecated values. +enum DeprecatedEnum { + option deprecated = true; + // DEPRECATED is the iota value of this enum. + DEPRECATED = 0 [deprecated=true]; +} + +// DeprecatedService is for making DeprecatedCalls +service DeprecatedService { + option deprecated = true; + + // DeprecatedCall takes a DeprecatedRequest and returns a DeprecatedResponse. + rpc DeprecatedCall(DeprecatedRequest) returns (DeprecatedResponse) { + option deprecated = true; + } +} diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_base/extension_base.pb.go b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_base/extension_base.pb.go new file mode 100644 index 0000000..a08e8ed --- /dev/null +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_base/extension_base.pb.go @@ -0,0 +1,139 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: extension_base/extension_base.proto + +package extension_base // import "github.com/golang/protobuf/protoc-gen-go/testdata/extension_base" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type BaseMessage struct { + Height *int32 `protobuf:"varint,1,opt,name=height" json:"height,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BaseMessage) Reset() { *m = BaseMessage{} } +func (m *BaseMessage) String() string { return proto.CompactTextString(m) } +func (*BaseMessage) ProtoMessage() {} +func (*BaseMessage) Descriptor() ([]byte, []int) { + return fileDescriptor_extension_base_41d3c712c9fc37fc, []int{0} +} + +var extRange_BaseMessage = []proto.ExtensionRange{ + {Start: 4, End: 9}, + {Start: 16, End: 536870911}, +} + +func (*BaseMessage) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_BaseMessage +} +func (m *BaseMessage) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BaseMessage.Unmarshal(m, b) +} +func (m *BaseMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BaseMessage.Marshal(b, m, deterministic) +} +func (dst *BaseMessage) XXX_Merge(src proto.Message) { + xxx_messageInfo_BaseMessage.Merge(dst, src) +} +func (m *BaseMessage) XXX_Size() int { + return xxx_messageInfo_BaseMessage.Size(m) +} +func (m *BaseMessage) XXX_DiscardUnknown() { + xxx_messageInfo_BaseMessage.DiscardUnknown(m) +} + +var xxx_messageInfo_BaseMessage proto.InternalMessageInfo + +func (m *BaseMessage) GetHeight() int32 { + if m != nil && m.Height != nil { + return *m.Height + } + return 0 +} + +// Another message that may be extended, using message_set_wire_format. +type OldStyleMessage struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `protobuf_messageset:"1" json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *OldStyleMessage) Reset() { *m = OldStyleMessage{} } +func (m *OldStyleMessage) String() string { return proto.CompactTextString(m) } +func (*OldStyleMessage) ProtoMessage() {} +func (*OldStyleMessage) Descriptor() ([]byte, []int) { + return fileDescriptor_extension_base_41d3c712c9fc37fc, []int{1} +} + +func (m *OldStyleMessage) MarshalJSON() ([]byte, error) { + return proto.MarshalMessageSetJSON(&m.XXX_InternalExtensions) +} +func (m *OldStyleMessage) UnmarshalJSON(buf []byte) error { + return proto.UnmarshalMessageSetJSON(buf, &m.XXX_InternalExtensions) +} + +var extRange_OldStyleMessage = []proto.ExtensionRange{ + {Start: 100, End: 2147483646}, +} + +func (*OldStyleMessage) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_OldStyleMessage +} +func (m *OldStyleMessage) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OldStyleMessage.Unmarshal(m, b) +} +func (m *OldStyleMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OldStyleMessage.Marshal(b, m, deterministic) +} +func (dst *OldStyleMessage) XXX_Merge(src proto.Message) { + xxx_messageInfo_OldStyleMessage.Merge(dst, src) +} +func (m *OldStyleMessage) XXX_Size() int { + return xxx_messageInfo_OldStyleMessage.Size(m) +} +func (m *OldStyleMessage) XXX_DiscardUnknown() { + xxx_messageInfo_OldStyleMessage.DiscardUnknown(m) +} + +var xxx_messageInfo_OldStyleMessage proto.InternalMessageInfo + +func init() { + proto.RegisterType((*BaseMessage)(nil), "extension_base.BaseMessage") + proto.RegisterType((*OldStyleMessage)(nil), "extension_base.OldStyleMessage") +} + +func init() { + proto.RegisterFile("extension_base/extension_base.proto", fileDescriptor_extension_base_41d3c712c9fc37fc) +} + +var fileDescriptor_extension_base_41d3c712c9fc37fc = []byte{ + // 179 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x4e, 0xad, 0x28, 0x49, + 0xcd, 0x2b, 0xce, 0xcc, 0xcf, 0x8b, 0x4f, 0x4a, 0x2c, 0x4e, 0xd5, 0x47, 0xe5, 0xea, 0x15, 0x14, + 0xe5, 0x97, 0xe4, 0x0b, 0xf1, 0xa1, 0x8a, 0x2a, 0x99, 0x72, 0x71, 0x3b, 0x25, 0x16, 0xa7, 0xfa, + 0xa6, 0x16, 0x17, 0x27, 0xa6, 0xa7, 0x0a, 0x89, 0x71, 0xb1, 0x65, 0xa4, 0x66, 0xa6, 0x67, 0x94, + 0x48, 0x30, 0x2a, 0x30, 0x6a, 0xb0, 0x06, 0x41, 0x79, 0x5a, 0x2c, 0x1c, 0x2c, 0x02, 0x5c, 0x5a, + 0x1c, 0x1c, 0x02, 0x02, 0x0d, 0x0d, 0x0d, 0x0d, 0x4c, 0x4a, 0xf2, 0x5c, 0xfc, 0xfe, 0x39, 0x29, + 0xc1, 0x25, 0x95, 0x39, 0x30, 0xad, 0x5a, 0x1c, 0x1c, 0x29, 0x02, 0xff, 0xff, 0xff, 0xff, 0xcf, + 0x6e, 0xc5, 0xc4, 0xc1, 0xe8, 0xe4, 0x14, 0xe5, 0x90, 0x9e, 0x59, 0x92, 0x51, 0x9a, 0xa4, 0x97, + 0x9c, 0x9f, 0xab, 0x9f, 0x9e, 0x9f, 0x93, 0x98, 0x97, 0xae, 0x0f, 0x76, 0x42, 0x52, 0x69, 0x1a, + 0x84, 0x91, 0xac, 0x9b, 0x9e, 0x9a, 0xa7, 0x9b, 0x9e, 0xaf, 0x5f, 0x92, 0x5a, 0x5c, 0x92, 0x92, + 0x58, 0x92, 0x88, 0xe6, 0x62, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x7a, 0x7f, 0xb7, 0x2a, 0xd1, + 0x00, 0x00, 0x00, +} diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_base/extension_base.proto b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_base/extension_base.proto new file mode 100644 index 0000000..0ba74de --- /dev/null +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_base/extension_base.proto @@ -0,0 +1,48 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto2"; + +package extension_base; + +option go_package = "github.com/golang/protobuf/protoc-gen-go/testdata/extension_base"; + +message BaseMessage { + optional int32 height = 1; + extensions 4 to 9; + extensions 16 to max; +} + +// Another message that may be extended, using message_set_wire_format. +message OldStyleMessage { + option message_set_wire_format = true; + extensions 100 to max; +} diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_extra/extension_extra.pb.go b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_extra/extension_extra.pb.go new file mode 100644 index 0000000..b373216 --- /dev/null +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_extra/extension_extra.pb.go @@ -0,0 +1,78 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: extension_extra/extension_extra.proto + +package extension_extra // import "github.com/golang/protobuf/protoc-gen-go/testdata/extension_extra" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type ExtraMessage struct { + Width *int32 `protobuf:"varint,1,opt,name=width" json:"width,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExtraMessage) Reset() { *m = ExtraMessage{} } +func (m *ExtraMessage) String() string { return proto.CompactTextString(m) } +func (*ExtraMessage) ProtoMessage() {} +func (*ExtraMessage) Descriptor() ([]byte, []int) { + return fileDescriptor_extension_extra_83adf2410f49f816, []int{0} +} +func (m *ExtraMessage) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExtraMessage.Unmarshal(m, b) +} +func (m *ExtraMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExtraMessage.Marshal(b, m, deterministic) +} +func (dst *ExtraMessage) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtraMessage.Merge(dst, src) +} +func (m *ExtraMessage) XXX_Size() int { + return xxx_messageInfo_ExtraMessage.Size(m) +} +func (m *ExtraMessage) XXX_DiscardUnknown() { + xxx_messageInfo_ExtraMessage.DiscardUnknown(m) +} + +var xxx_messageInfo_ExtraMessage proto.InternalMessageInfo + +func (m *ExtraMessage) GetWidth() int32 { + if m != nil && m.Width != nil { + return *m.Width + } + return 0 +} + +func init() { + proto.RegisterType((*ExtraMessage)(nil), "extension_extra.ExtraMessage") +} + +func init() { + proto.RegisterFile("extension_extra/extension_extra.proto", fileDescriptor_extension_extra_83adf2410f49f816) +} + +var fileDescriptor_extension_extra_83adf2410f49f816 = []byte{ + // 133 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x4d, 0xad, 0x28, 0x49, + 0xcd, 0x2b, 0xce, 0xcc, 0xcf, 0x8b, 0x4f, 0xad, 0x28, 0x29, 0x4a, 0xd4, 0x47, 0xe3, 0xeb, 0x15, + 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0xf1, 0xa3, 0x09, 0x2b, 0xa9, 0x70, 0xf1, 0xb8, 0x82, 0x18, 0xbe, + 0xa9, 0xc5, 0xc5, 0x89, 0xe9, 0xa9, 0x42, 0x22, 0x5c, 0xac, 0xe5, 0x99, 0x29, 0x25, 0x19, 0x12, + 0x8c, 0x0a, 0x8c, 0x1a, 0xac, 0x41, 0x10, 0x8e, 0x93, 0x73, 0x94, 0x63, 0x7a, 0x66, 0x49, 0x46, + 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x7e, 0x7a, 0x7e, 0x4e, 0x62, 0x5e, 0xba, 0x3e, 0xd8, 0xc4, + 0xa4, 0xd2, 0x34, 0x08, 0x23, 0x59, 0x37, 0x3d, 0x35, 0x4f, 0x37, 0x3d, 0x5f, 0xbf, 0x24, 0xb5, + 0xb8, 0x24, 0x25, 0xb1, 0x04, 0xc3, 0x05, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0xf1, 0xec, 0xe3, + 0xb7, 0xa3, 0x00, 0x00, 0x00, +} diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_extra/extension_extra.proto b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_extra/extension_extra.proto new file mode 100644 index 0000000..1dd03e7 --- /dev/null +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_extra/extension_extra.proto @@ -0,0 +1,40 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto2"; + +package extension_extra; + +option go_package = "github.com/golang/protobuf/protoc-gen-go/testdata/extension_extra"; + +message ExtraMessage { + optional int32 width = 1; +} diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_user/extension_user.pb.go b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_user/extension_user.pb.go new file mode 100644 index 0000000..c718792 --- /dev/null +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_user/extension_user.pb.go @@ -0,0 +1,401 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: extension_user/extension_user.proto + +package extension_user // import "github.com/golang/protobuf/protoc-gen-go/testdata/extension_user" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import extension_base "github.com/golang/protobuf/protoc-gen-go/testdata/extension_base" +import extension_extra "github.com/golang/protobuf/protoc-gen-go/testdata/extension_extra" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type UserMessage struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Rank *string `protobuf:"bytes,2,opt,name=rank" json:"rank,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UserMessage) Reset() { *m = UserMessage{} } +func (m *UserMessage) String() string { return proto.CompactTextString(m) } +func (*UserMessage) ProtoMessage() {} +func (*UserMessage) Descriptor() ([]byte, []int) { + return fileDescriptor_extension_user_af41b5e0bdfb7846, []int{0} +} +func (m *UserMessage) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UserMessage.Unmarshal(m, b) +} +func (m *UserMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UserMessage.Marshal(b, m, deterministic) +} +func (dst *UserMessage) XXX_Merge(src proto.Message) { + xxx_messageInfo_UserMessage.Merge(dst, src) +} +func (m *UserMessage) XXX_Size() int { + return xxx_messageInfo_UserMessage.Size(m) +} +func (m *UserMessage) XXX_DiscardUnknown() { + xxx_messageInfo_UserMessage.DiscardUnknown(m) +} + +var xxx_messageInfo_UserMessage proto.InternalMessageInfo + +func (m *UserMessage) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *UserMessage) GetRank() string { + if m != nil && m.Rank != nil { + return *m.Rank + } + return "" +} + +// Extend inside the scope of another type +type LoudMessage struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LoudMessage) Reset() { *m = LoudMessage{} } +func (m *LoudMessage) String() string { return proto.CompactTextString(m) } +func (*LoudMessage) ProtoMessage() {} +func (*LoudMessage) Descriptor() ([]byte, []int) { + return fileDescriptor_extension_user_af41b5e0bdfb7846, []int{1} +} + +var extRange_LoudMessage = []proto.ExtensionRange{ + {Start: 100, End: 536870911}, +} + +func (*LoudMessage) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_LoudMessage +} +func (m *LoudMessage) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LoudMessage.Unmarshal(m, b) +} +func (m *LoudMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LoudMessage.Marshal(b, m, deterministic) +} +func (dst *LoudMessage) XXX_Merge(src proto.Message) { + xxx_messageInfo_LoudMessage.Merge(dst, src) +} +func (m *LoudMessage) XXX_Size() int { + return xxx_messageInfo_LoudMessage.Size(m) +} +func (m *LoudMessage) XXX_DiscardUnknown() { + xxx_messageInfo_LoudMessage.DiscardUnknown(m) +} + +var xxx_messageInfo_LoudMessage proto.InternalMessageInfo + +var E_LoudMessage_Volume = &proto.ExtensionDesc{ + ExtendedType: (*extension_base.BaseMessage)(nil), + ExtensionType: (*uint32)(nil), + Field: 8, + Name: "extension_user.LoudMessage.volume", + Tag: "varint,8,opt,name=volume", + Filename: "extension_user/extension_user.proto", +} + +// Extend inside the scope of another type, using a message. +type LoginMessage struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LoginMessage) Reset() { *m = LoginMessage{} } +func (m *LoginMessage) String() string { return proto.CompactTextString(m) } +func (*LoginMessage) ProtoMessage() {} +func (*LoginMessage) Descriptor() ([]byte, []int) { + return fileDescriptor_extension_user_af41b5e0bdfb7846, []int{2} +} +func (m *LoginMessage) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LoginMessage.Unmarshal(m, b) +} +func (m *LoginMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LoginMessage.Marshal(b, m, deterministic) +} +func (dst *LoginMessage) XXX_Merge(src proto.Message) { + xxx_messageInfo_LoginMessage.Merge(dst, src) +} +func (m *LoginMessage) XXX_Size() int { + return xxx_messageInfo_LoginMessage.Size(m) +} +func (m *LoginMessage) XXX_DiscardUnknown() { + xxx_messageInfo_LoginMessage.DiscardUnknown(m) +} + +var xxx_messageInfo_LoginMessage proto.InternalMessageInfo + +var E_LoginMessage_UserMessage = &proto.ExtensionDesc{ + ExtendedType: (*extension_base.BaseMessage)(nil), + ExtensionType: (*UserMessage)(nil), + Field: 16, + Name: "extension_user.LoginMessage.user_message", + Tag: "bytes,16,opt,name=user_message,json=userMessage", + Filename: "extension_user/extension_user.proto", +} + +type Detail struct { + Color *string `protobuf:"bytes,1,opt,name=color" json:"color,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Detail) Reset() { *m = Detail{} } +func (m *Detail) String() string { return proto.CompactTextString(m) } +func (*Detail) ProtoMessage() {} +func (*Detail) Descriptor() ([]byte, []int) { + return fileDescriptor_extension_user_af41b5e0bdfb7846, []int{3} +} +func (m *Detail) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Detail.Unmarshal(m, b) +} +func (m *Detail) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Detail.Marshal(b, m, deterministic) +} +func (dst *Detail) XXX_Merge(src proto.Message) { + xxx_messageInfo_Detail.Merge(dst, src) +} +func (m *Detail) XXX_Size() int { + return xxx_messageInfo_Detail.Size(m) +} +func (m *Detail) XXX_DiscardUnknown() { + xxx_messageInfo_Detail.DiscardUnknown(m) +} + +var xxx_messageInfo_Detail proto.InternalMessageInfo + +func (m *Detail) GetColor() string { + if m != nil && m.Color != nil { + return *m.Color + } + return "" +} + +// An extension of an extension +type Announcement struct { + Words *string `protobuf:"bytes,1,opt,name=words" json:"words,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Announcement) Reset() { *m = Announcement{} } +func (m *Announcement) String() string { return proto.CompactTextString(m) } +func (*Announcement) ProtoMessage() {} +func (*Announcement) Descriptor() ([]byte, []int) { + return fileDescriptor_extension_user_af41b5e0bdfb7846, []int{4} +} +func (m *Announcement) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Announcement.Unmarshal(m, b) +} +func (m *Announcement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Announcement.Marshal(b, m, deterministic) +} +func (dst *Announcement) XXX_Merge(src proto.Message) { + xxx_messageInfo_Announcement.Merge(dst, src) +} +func (m *Announcement) XXX_Size() int { + return xxx_messageInfo_Announcement.Size(m) +} +func (m *Announcement) XXX_DiscardUnknown() { + xxx_messageInfo_Announcement.DiscardUnknown(m) +} + +var xxx_messageInfo_Announcement proto.InternalMessageInfo + +func (m *Announcement) GetWords() string { + if m != nil && m.Words != nil { + return *m.Words + } + return "" +} + +var E_Announcement_LoudExt = &proto.ExtensionDesc{ + ExtendedType: (*LoudMessage)(nil), + ExtensionType: (*Announcement)(nil), + Field: 100, + Name: "extension_user.Announcement.loud_ext", + Tag: "bytes,100,opt,name=loud_ext,json=loudExt", + Filename: "extension_user/extension_user.proto", +} + +// Something that can be put in a message set. +type OldStyleParcel struct { + Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` + Height *int32 `protobuf:"varint,2,opt,name=height" json:"height,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *OldStyleParcel) Reset() { *m = OldStyleParcel{} } +func (m *OldStyleParcel) String() string { return proto.CompactTextString(m) } +func (*OldStyleParcel) ProtoMessage() {} +func (*OldStyleParcel) Descriptor() ([]byte, []int) { + return fileDescriptor_extension_user_af41b5e0bdfb7846, []int{5} +} +func (m *OldStyleParcel) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OldStyleParcel.Unmarshal(m, b) +} +func (m *OldStyleParcel) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OldStyleParcel.Marshal(b, m, deterministic) +} +func (dst *OldStyleParcel) XXX_Merge(src proto.Message) { + xxx_messageInfo_OldStyleParcel.Merge(dst, src) +} +func (m *OldStyleParcel) XXX_Size() int { + return xxx_messageInfo_OldStyleParcel.Size(m) +} +func (m *OldStyleParcel) XXX_DiscardUnknown() { + xxx_messageInfo_OldStyleParcel.DiscardUnknown(m) +} + +var xxx_messageInfo_OldStyleParcel proto.InternalMessageInfo + +func (m *OldStyleParcel) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *OldStyleParcel) GetHeight() int32 { + if m != nil && m.Height != nil { + return *m.Height + } + return 0 +} + +var E_OldStyleParcel_MessageSetExtension = &proto.ExtensionDesc{ + ExtendedType: (*extension_base.OldStyleMessage)(nil), + ExtensionType: (*OldStyleParcel)(nil), + Field: 2001, + Name: "extension_user.OldStyleParcel", + Tag: "bytes,2001,opt,name=message_set_extension,json=messageSetExtension", + Filename: "extension_user/extension_user.proto", +} + +var E_UserMessage = &proto.ExtensionDesc{ + ExtendedType: (*extension_base.BaseMessage)(nil), + ExtensionType: (*UserMessage)(nil), + Field: 5, + Name: "extension_user.user_message", + Tag: "bytes,5,opt,name=user_message,json=userMessage", + Filename: "extension_user/extension_user.proto", +} + +var E_ExtraMessage = &proto.ExtensionDesc{ + ExtendedType: (*extension_base.BaseMessage)(nil), + ExtensionType: (*extension_extra.ExtraMessage)(nil), + Field: 9, + Name: "extension_user.extra_message", + Tag: "bytes,9,opt,name=extra_message,json=extraMessage", + Filename: "extension_user/extension_user.proto", +} + +var E_Width = &proto.ExtensionDesc{ + ExtendedType: (*extension_base.BaseMessage)(nil), + ExtensionType: (*int32)(nil), + Field: 6, + Name: "extension_user.width", + Tag: "varint,6,opt,name=width", + Filename: "extension_user/extension_user.proto", +} + +var E_Area = &proto.ExtensionDesc{ + ExtendedType: (*extension_base.BaseMessage)(nil), + ExtensionType: (*int64)(nil), + Field: 7, + Name: "extension_user.area", + Tag: "varint,7,opt,name=area", + Filename: "extension_user/extension_user.proto", +} + +var E_Detail = &proto.ExtensionDesc{ + ExtendedType: (*extension_base.BaseMessage)(nil), + ExtensionType: ([]*Detail)(nil), + Field: 17, + Name: "extension_user.detail", + Tag: "bytes,17,rep,name=detail", + Filename: "extension_user/extension_user.proto", +} + +func init() { + proto.RegisterType((*UserMessage)(nil), "extension_user.UserMessage") + proto.RegisterType((*LoudMessage)(nil), "extension_user.LoudMessage") + proto.RegisterType((*LoginMessage)(nil), "extension_user.LoginMessage") + proto.RegisterType((*Detail)(nil), "extension_user.Detail") + proto.RegisterType((*Announcement)(nil), "extension_user.Announcement") + proto.RegisterMessageSetType((*OldStyleParcel)(nil), 2001, "extension_user.OldStyleParcel") + proto.RegisterType((*OldStyleParcel)(nil), "extension_user.OldStyleParcel") + proto.RegisterExtension(E_LoudMessage_Volume) + proto.RegisterExtension(E_LoginMessage_UserMessage) + proto.RegisterExtension(E_Announcement_LoudExt) + proto.RegisterExtension(E_OldStyleParcel_MessageSetExtension) + proto.RegisterExtension(E_UserMessage) + proto.RegisterExtension(E_ExtraMessage) + proto.RegisterExtension(E_Width) + proto.RegisterExtension(E_Area) + proto.RegisterExtension(E_Detail) +} + +func init() { + proto.RegisterFile("extension_user/extension_user.proto", fileDescriptor_extension_user_af41b5e0bdfb7846) +} + +var fileDescriptor_extension_user_af41b5e0bdfb7846 = []byte{ + // 492 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0x51, 0x6f, 0x94, 0x40, + 0x10, 0x0e, 0x6d, 0x8f, 0x5e, 0x87, 0x6b, 0xad, 0xa8, 0xcd, 0xa5, 0x6a, 0x25, 0x18, 0x13, 0x62, + 0xd2, 0x23, 0x62, 0x7c, 0xe1, 0x49, 0x2f, 0xde, 0x93, 0x67, 0x34, 0x54, 0x5f, 0xf4, 0x81, 0xec, + 0xc1, 0xc8, 0x91, 0xc2, 0xae, 0xd9, 0x5d, 0xec, 0xe9, 0xd3, 0xfd, 0x26, 0xff, 0x89, 0xff, 0xc8, + 0xb0, 0x2c, 0x2d, 0x87, 0xc9, 0xc5, 0xbe, 0x90, 0xfd, 0x86, 0x6f, 0xbe, 0x99, 0xfd, 0x66, 0x00, + 0x9e, 0xe2, 0x4a, 0x22, 0x15, 0x39, 0xa3, 0x71, 0x25, 0x90, 0xfb, 0x9b, 0x70, 0xf2, 0x9d, 0x33, + 0xc9, 0xec, 0xa3, 0xcd, 0xe8, 0x69, 0x27, 0x69, 0x41, 0x04, 0xfa, 0x9b, 0xb0, 0x49, 0x3a, 0x7d, + 0x76, 0x13, 0xc5, 0x95, 0xe4, 0xc4, 0xef, 0xe1, 0x86, 0xe6, 0xbe, 0x02, 0xeb, 0xb3, 0x40, 0xfe, + 0x1e, 0x85, 0x20, 0x19, 0xda, 0x36, 0xec, 0x51, 0x52, 0xe2, 0xd8, 0x70, 0x0c, 0xef, 0x20, 0x52, + 0xe7, 0x3a, 0xc6, 0x09, 0xbd, 0x1c, 0xef, 0x34, 0xb1, 0xfa, 0xec, 0xce, 0xc1, 0x9a, 0xb3, 0x2a, + 0xd5, 0x69, 0xcf, 0x87, 0xc3, 0xf4, 0x78, 0xbd, 0x5e, 0xaf, 0x77, 0x82, 0x97, 0x60, 0xfe, 0x60, + 0x45, 0x55, 0xa2, 0xfd, 0x70, 0xd2, 0xeb, 0x6b, 0x4a, 0x04, 0xea, 0x84, 0xf1, 0xd0, 0x31, 0xbc, + 0xc3, 0x48, 0x53, 0xdd, 0x4b, 0x18, 0xcd, 0x59, 0x96, 0x53, 0xfd, 0x36, 0xf8, 0x0a, 0xa3, 0xfa, + 0xa2, 0x71, 0xa9, 0xbb, 0xda, 0x2a, 0x75, 0xec, 0x18, 0x9e, 0x15, 0x74, 0x29, 0xca, 0xba, 0xce, + 0xad, 0x22, 0xab, 0xba, 0x01, 0xee, 0x19, 0x98, 0x6f, 0x51, 0x92, 0xbc, 0xb0, 0xef, 0xc3, 0x20, + 0x61, 0x05, 0xe3, 0xfa, 0xb6, 0x0d, 0x70, 0x7f, 0xc1, 0xe8, 0x0d, 0xa5, 0xac, 0xa2, 0x09, 0x96, + 0x48, 0x65, 0xcd, 0xba, 0x62, 0x3c, 0x15, 0x2d, 0x4b, 0x81, 0xe0, 0x13, 0x0c, 0x0b, 0x56, 0xa5, + 0xb5, 0x97, 0xf6, 0x3f, 0xb5, 0x3b, 0xd6, 0x8c, 0x53, 0xd5, 0xde, 0xa3, 0x3e, 0xa5, 0x5b, 0x22, + 0xda, 0xaf, 0xa5, 0x66, 0x2b, 0xe9, 0xfe, 0x36, 0xe0, 0xe8, 0x43, 0x91, 0x5e, 0xc8, 0x9f, 0x05, + 0x7e, 0x24, 0x3c, 0xc1, 0xa2, 0x33, 0x91, 0x9d, 0xeb, 0x89, 0x9c, 0x80, 0xb9, 0xc4, 0x3c, 0x5b, + 0x4a, 0x35, 0x93, 0x41, 0xa4, 0x51, 0x20, 0xe1, 0x81, 0xb6, 0x2c, 0x16, 0x28, 0xe3, 0xeb, 0x92, + 0xf6, 0x93, 0xbe, 0x81, 0x6d, 0x91, 0xb6, 0xcb, 0x3f, 0x77, 0x54, 0x9b, 0x67, 0xfd, 0x36, 0x37, + 0x9b, 0x89, 0xee, 0x69, 0xf9, 0x0b, 0x94, 0xb3, 0x96, 0x18, 0xde, 0x6a, 0x5a, 0x83, 0xdb, 0x4d, + 0x2b, 0x8c, 0xe1, 0x50, 0xad, 0xeb, 0xff, 0xa9, 0x1f, 0x28, 0xf5, 0xc7, 0x93, 0xfe, 0xae, 0xcf, + 0xea, 0x67, 0xab, 0x3f, 0xc2, 0x0e, 0x0a, 0x5f, 0xc0, 0xe0, 0x2a, 0x4f, 0xe5, 0x72, 0xbb, 0xb0, + 0xa9, 0x7c, 0x6e, 0x98, 0xa1, 0x0f, 0x7b, 0x84, 0x23, 0xd9, 0x9e, 0xb1, 0xef, 0x18, 0xde, 0x6e, + 0xa4, 0x88, 0xe1, 0x3b, 0x30, 0xd3, 0x66, 0xe5, 0xb6, 0xa6, 0xdc, 0x75, 0x76, 0x3d, 0x2b, 0x38, + 0xe9, 0x7b, 0xd3, 0x6c, 0x6b, 0xa4, 0x25, 0xa6, 0xd3, 0x2f, 0xaf, 0xb3, 0x5c, 0x2e, 0xab, 0xc5, + 0x24, 0x61, 0xa5, 0x9f, 0xb1, 0x82, 0xd0, 0xcc, 0x57, 0x1f, 0xf3, 0xa2, 0xfa, 0xd6, 0x1c, 0x92, + 0xf3, 0x0c, 0xe9, 0x79, 0xc6, 0x7c, 0x89, 0x42, 0xa6, 0x44, 0x92, 0xde, 0x7f, 0xe5, 0x6f, 0x00, + 0x00, 0x00, 0xff, 0xff, 0xdf, 0x18, 0x64, 0x15, 0x77, 0x04, 0x00, 0x00, +} diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_user/extension_user.proto b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_user/extension_user.proto new file mode 100644 index 0000000..033c186 --- /dev/null +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_user/extension_user.proto @@ -0,0 +1,102 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto2"; + +import "extension_base/extension_base.proto"; +import "extension_extra/extension_extra.proto"; + +package extension_user; + +option go_package = "github.com/golang/protobuf/protoc-gen-go/testdata/extension_user"; + +message UserMessage { + optional string name = 1; + optional string rank = 2; +} + +// Extend with a message +extend extension_base.BaseMessage { + optional UserMessage user_message = 5; +} + +// Extend with a foreign message +extend extension_base.BaseMessage { + optional extension_extra.ExtraMessage extra_message = 9; +} + +// Extend with some primitive types +extend extension_base.BaseMessage { + optional int32 width = 6; + optional int64 area = 7; +} + +// Extend inside the scope of another type +message LoudMessage { + extend extension_base.BaseMessage { + optional uint32 volume = 8; + } + extensions 100 to max; +} + +// Extend inside the scope of another type, using a message. +message LoginMessage { + extend extension_base.BaseMessage { + optional UserMessage user_message = 16; + } +} + +// Extend with a repeated field +extend extension_base.BaseMessage { + repeated Detail detail = 17; +} + +message Detail { + optional string color = 1; +} + +// An extension of an extension +message Announcement { + optional string words = 1; + extend LoudMessage { + optional Announcement loud_ext = 100; + } +} + +// Something that can be put in a message set. +message OldStyleParcel { + extend extension_base.OldStyleMessage { + optional OldStyleParcel message_set_extension = 2001; + } + + required string name = 1; + optional int32 height = 2; +} diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/grpc/grpc.pb.go b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/grpc/grpc.pb.go new file mode 100644 index 0000000..1bc0283 --- /dev/null +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/grpc/grpc.pb.go @@ -0,0 +1,444 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: grpc/grpc.proto + +package testing // import "github.com/golang/protobuf/protoc-gen-go/testdata/grpc" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type SimpleRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SimpleRequest) Reset() { *m = SimpleRequest{} } +func (m *SimpleRequest) String() string { return proto.CompactTextString(m) } +func (*SimpleRequest) ProtoMessage() {} +func (*SimpleRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_grpc_65bf3902e49ee873, []int{0} +} +func (m *SimpleRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SimpleRequest.Unmarshal(m, b) +} +func (m *SimpleRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SimpleRequest.Marshal(b, m, deterministic) +} +func (dst *SimpleRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SimpleRequest.Merge(dst, src) +} +func (m *SimpleRequest) XXX_Size() int { + return xxx_messageInfo_SimpleRequest.Size(m) +} +func (m *SimpleRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SimpleRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SimpleRequest proto.InternalMessageInfo + +type SimpleResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SimpleResponse) Reset() { *m = SimpleResponse{} } +func (m *SimpleResponse) String() string { return proto.CompactTextString(m) } +func (*SimpleResponse) ProtoMessage() {} +func (*SimpleResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_grpc_65bf3902e49ee873, []int{1} +} +func (m *SimpleResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SimpleResponse.Unmarshal(m, b) +} +func (m *SimpleResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SimpleResponse.Marshal(b, m, deterministic) +} +func (dst *SimpleResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_SimpleResponse.Merge(dst, src) +} +func (m *SimpleResponse) XXX_Size() int { + return xxx_messageInfo_SimpleResponse.Size(m) +} +func (m *SimpleResponse) XXX_DiscardUnknown() { + xxx_messageInfo_SimpleResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_SimpleResponse proto.InternalMessageInfo + +type StreamMsg struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StreamMsg) Reset() { *m = StreamMsg{} } +func (m *StreamMsg) String() string { return proto.CompactTextString(m) } +func (*StreamMsg) ProtoMessage() {} +func (*StreamMsg) Descriptor() ([]byte, []int) { + return fileDescriptor_grpc_65bf3902e49ee873, []int{2} +} +func (m *StreamMsg) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StreamMsg.Unmarshal(m, b) +} +func (m *StreamMsg) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StreamMsg.Marshal(b, m, deterministic) +} +func (dst *StreamMsg) XXX_Merge(src proto.Message) { + xxx_messageInfo_StreamMsg.Merge(dst, src) +} +func (m *StreamMsg) XXX_Size() int { + return xxx_messageInfo_StreamMsg.Size(m) +} +func (m *StreamMsg) XXX_DiscardUnknown() { + xxx_messageInfo_StreamMsg.DiscardUnknown(m) +} + +var xxx_messageInfo_StreamMsg proto.InternalMessageInfo + +type StreamMsg2 struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StreamMsg2) Reset() { *m = StreamMsg2{} } +func (m *StreamMsg2) String() string { return proto.CompactTextString(m) } +func (*StreamMsg2) ProtoMessage() {} +func (*StreamMsg2) Descriptor() ([]byte, []int) { + return fileDescriptor_grpc_65bf3902e49ee873, []int{3} +} +func (m *StreamMsg2) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StreamMsg2.Unmarshal(m, b) +} +func (m *StreamMsg2) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StreamMsg2.Marshal(b, m, deterministic) +} +func (dst *StreamMsg2) XXX_Merge(src proto.Message) { + xxx_messageInfo_StreamMsg2.Merge(dst, src) +} +func (m *StreamMsg2) XXX_Size() int { + return xxx_messageInfo_StreamMsg2.Size(m) +} +func (m *StreamMsg2) XXX_DiscardUnknown() { + xxx_messageInfo_StreamMsg2.DiscardUnknown(m) +} + +var xxx_messageInfo_StreamMsg2 proto.InternalMessageInfo + +func init() { + proto.RegisterType((*SimpleRequest)(nil), "grpc.testing.SimpleRequest") + proto.RegisterType((*SimpleResponse)(nil), "grpc.testing.SimpleResponse") + proto.RegisterType((*StreamMsg)(nil), "grpc.testing.StreamMsg") + proto.RegisterType((*StreamMsg2)(nil), "grpc.testing.StreamMsg2") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// TestClient is the client API for Test service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type TestClient interface { + UnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error) + // This RPC streams from the server only. + Downstream(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (Test_DownstreamClient, error) + // This RPC streams from the client. + Upstream(ctx context.Context, opts ...grpc.CallOption) (Test_UpstreamClient, error) + // This one streams in both directions. + Bidi(ctx context.Context, opts ...grpc.CallOption) (Test_BidiClient, error) +} + +type testClient struct { + cc *grpc.ClientConn +} + +func NewTestClient(cc *grpc.ClientConn) TestClient { + return &testClient{cc} +} + +func (c *testClient) UnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error) { + out := new(SimpleResponse) + err := c.cc.Invoke(ctx, "/grpc.testing.Test/UnaryCall", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *testClient) Downstream(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (Test_DownstreamClient, error) { + stream, err := c.cc.NewStream(ctx, &_Test_serviceDesc.Streams[0], "/grpc.testing.Test/Downstream", opts...) + if err != nil { + return nil, err + } + x := &testDownstreamClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Test_DownstreamClient interface { + Recv() (*StreamMsg, error) + grpc.ClientStream +} + +type testDownstreamClient struct { + grpc.ClientStream +} + +func (x *testDownstreamClient) Recv() (*StreamMsg, error) { + m := new(StreamMsg) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *testClient) Upstream(ctx context.Context, opts ...grpc.CallOption) (Test_UpstreamClient, error) { + stream, err := c.cc.NewStream(ctx, &_Test_serviceDesc.Streams[1], "/grpc.testing.Test/Upstream", opts...) + if err != nil { + return nil, err + } + x := &testUpstreamClient{stream} + return x, nil +} + +type Test_UpstreamClient interface { + Send(*StreamMsg) error + CloseAndRecv() (*SimpleResponse, error) + grpc.ClientStream +} + +type testUpstreamClient struct { + grpc.ClientStream +} + +func (x *testUpstreamClient) Send(m *StreamMsg) error { + return x.ClientStream.SendMsg(m) +} + +func (x *testUpstreamClient) CloseAndRecv() (*SimpleResponse, error) { + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + m := new(SimpleResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *testClient) Bidi(ctx context.Context, opts ...grpc.CallOption) (Test_BidiClient, error) { + stream, err := c.cc.NewStream(ctx, &_Test_serviceDesc.Streams[2], "/grpc.testing.Test/Bidi", opts...) + if err != nil { + return nil, err + } + x := &testBidiClient{stream} + return x, nil +} + +type Test_BidiClient interface { + Send(*StreamMsg) error + Recv() (*StreamMsg2, error) + grpc.ClientStream +} + +type testBidiClient struct { + grpc.ClientStream +} + +func (x *testBidiClient) Send(m *StreamMsg) error { + return x.ClientStream.SendMsg(m) +} + +func (x *testBidiClient) Recv() (*StreamMsg2, error) { + m := new(StreamMsg2) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// TestServer is the server API for Test service. +type TestServer interface { + UnaryCall(context.Context, *SimpleRequest) (*SimpleResponse, error) + // This RPC streams from the server only. + Downstream(*SimpleRequest, Test_DownstreamServer) error + // This RPC streams from the client. + Upstream(Test_UpstreamServer) error + // This one streams in both directions. + Bidi(Test_BidiServer) error +} + +func RegisterTestServer(s *grpc.Server, srv TestServer) { + s.RegisterService(&_Test_serviceDesc, srv) +} + +func _Test_UnaryCall_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SimpleRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TestServer).UnaryCall(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/grpc.testing.Test/UnaryCall", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TestServer).UnaryCall(ctx, req.(*SimpleRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Test_Downstream_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(SimpleRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(TestServer).Downstream(m, &testDownstreamServer{stream}) +} + +type Test_DownstreamServer interface { + Send(*StreamMsg) error + grpc.ServerStream +} + +type testDownstreamServer struct { + grpc.ServerStream +} + +func (x *testDownstreamServer) Send(m *StreamMsg) error { + return x.ServerStream.SendMsg(m) +} + +func _Test_Upstream_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(TestServer).Upstream(&testUpstreamServer{stream}) +} + +type Test_UpstreamServer interface { + SendAndClose(*SimpleResponse) error + Recv() (*StreamMsg, error) + grpc.ServerStream +} + +type testUpstreamServer struct { + grpc.ServerStream +} + +func (x *testUpstreamServer) SendAndClose(m *SimpleResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *testUpstreamServer) Recv() (*StreamMsg, error) { + m := new(StreamMsg) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _Test_Bidi_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(TestServer).Bidi(&testBidiServer{stream}) +} + +type Test_BidiServer interface { + Send(*StreamMsg2) error + Recv() (*StreamMsg, error) + grpc.ServerStream +} + +type testBidiServer struct { + grpc.ServerStream +} + +func (x *testBidiServer) Send(m *StreamMsg2) error { + return x.ServerStream.SendMsg(m) +} + +func (x *testBidiServer) Recv() (*StreamMsg, error) { + m := new(StreamMsg) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _Test_serviceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.testing.Test", + HandlerType: (*TestServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "UnaryCall", + Handler: _Test_UnaryCall_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "Downstream", + Handler: _Test_Downstream_Handler, + ServerStreams: true, + }, + { + StreamName: "Upstream", + Handler: _Test_Upstream_Handler, + ClientStreams: true, + }, + { + StreamName: "Bidi", + Handler: _Test_Bidi_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "grpc/grpc.proto", +} + +func init() { proto.RegisterFile("grpc/grpc.proto", fileDescriptor_grpc_65bf3902e49ee873) } + +var fileDescriptor_grpc_65bf3902e49ee873 = []byte{ + // 244 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x4f, 0x2f, 0x2a, 0x48, + 0xd6, 0x07, 0x11, 0x7a, 0x05, 0x45, 0xf9, 0x25, 0xf9, 0x42, 0x3c, 0x60, 0x76, 0x49, 0x6a, 0x71, + 0x49, 0x66, 0x5e, 0xba, 0x12, 0x3f, 0x17, 0x6f, 0x70, 0x66, 0x6e, 0x41, 0x4e, 0x6a, 0x50, 0x6a, + 0x61, 0x69, 0x6a, 0x71, 0x89, 0x92, 0x00, 0x17, 0x1f, 0x4c, 0xa0, 0xb8, 0x20, 0x3f, 0xaf, 0x38, + 0x55, 0x89, 0x9b, 0x8b, 0x33, 0xb8, 0xa4, 0x28, 0x35, 0x31, 0xd7, 0xb7, 0x38, 0x5d, 0x89, 0x87, + 0x8b, 0x0b, 0xce, 0x31, 0x32, 0x9a, 0xc1, 0xc4, 0xc5, 0x12, 0x92, 0x5a, 0x5c, 0x22, 0xe4, 0xc6, + 0xc5, 0x19, 0x9a, 0x97, 0x58, 0x54, 0xe9, 0x9c, 0x98, 0x93, 0x23, 0x24, 0xad, 0x87, 0x6c, 0x85, + 0x1e, 0x8a, 0xf9, 0x52, 0x32, 0xd8, 0x25, 0x21, 0x76, 0x09, 0xb9, 0x70, 0x71, 0xb9, 0xe4, 0x97, + 0xe7, 0x15, 0x83, 0xad, 0xc0, 0x6f, 0x90, 0x38, 0x9a, 0x24, 0xcc, 0x55, 0x06, 0x8c, 0x42, 0xce, + 0x5c, 0x1c, 0xa1, 0x05, 0x50, 0x33, 0x70, 0x29, 0xc3, 0xef, 0x10, 0x0d, 0x46, 0x21, 0x5b, 0x2e, + 0x16, 0xa7, 0xcc, 0x94, 0x4c, 0xdc, 0x06, 0x48, 0xe0, 0x90, 0x30, 0xd2, 0x60, 0x34, 0x60, 0x74, + 0x72, 0x88, 0xb2, 0x4b, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, + 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x07, 0xc7, 0x40, 0x52, 0x69, 0x1a, 0x84, 0x91, 0xac, 0x9b, 0x9e, + 0x9a, 0xa7, 0x9b, 0x9e, 0xaf, 0x0f, 0x32, 0x22, 0x25, 0xb1, 0x24, 0x11, 0x1c, 0x4d, 0xd6, 0x50, + 0x03, 0x93, 0xd8, 0xc0, 0x8a, 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0x90, 0xb9, 0x95, 0x42, + 0xc2, 0x01, 0x00, 0x00, +} diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/grpc/grpc.proto b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/grpc/grpc.proto new file mode 100644 index 0000000..0e5c64a --- /dev/null +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/grpc/grpc.proto @@ -0,0 +1,61 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2015 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package grpc.testing; + +option go_package = "github.com/golang/protobuf/protoc-gen-go/testdata/grpc;testing"; + +message SimpleRequest { +} + +message SimpleResponse { +} + +message StreamMsg { +} + +message StreamMsg2 { +} + +service Test { + rpc UnaryCall(SimpleRequest) returns (SimpleResponse); + + // This RPC streams from the server only. + rpc Downstream(SimpleRequest) returns (stream StreamMsg); + + // This RPC streams from the client. + rpc Upstream(stream StreamMsg) returns (SimpleResponse); + + // This one streams in both directions. + rpc Bidi(stream StreamMsg) returns (stream StreamMsg2); +} diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/a.pb.go b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/a.pb.go new file mode 100644 index 0000000..d67ada6 --- /dev/null +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/a.pb.go @@ -0,0 +1,110 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: import_public/a.proto + +package import_public // import "github.com/golang/protobuf/protoc-gen-go/testdata/import_public" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import sub "github.com/golang/protobuf/protoc-gen-go/testdata/import_public/sub" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// M from public import import_public/sub/a.proto +type M = sub.M + +// E from public import import_public/sub/a.proto +type E = sub.E + +var E_name = sub.E_name +var E_value = sub.E_value + +const E_ZERO = E(sub.E_ZERO) + +// Ignoring public import of Local from import_public/b.proto + +type Public struct { + M *sub.M `protobuf:"bytes,1,opt,name=m,proto3" json:"m,omitempty"` + E sub.E `protobuf:"varint,2,opt,name=e,proto3,enum=goproto.test.import_public.sub.E" json:"e,omitempty"` + Local *Local `protobuf:"bytes,3,opt,name=local,proto3" json:"local,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Public) Reset() { *m = Public{} } +func (m *Public) String() string { return proto.CompactTextString(m) } +func (*Public) ProtoMessage() {} +func (*Public) Descriptor() ([]byte, []int) { + return fileDescriptor_a_c0314c022b7c17d8, []int{0} +} +func (m *Public) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Public.Unmarshal(m, b) +} +func (m *Public) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Public.Marshal(b, m, deterministic) +} +func (dst *Public) XXX_Merge(src proto.Message) { + xxx_messageInfo_Public.Merge(dst, src) +} +func (m *Public) XXX_Size() int { + return xxx_messageInfo_Public.Size(m) +} +func (m *Public) XXX_DiscardUnknown() { + xxx_messageInfo_Public.DiscardUnknown(m) +} + +var xxx_messageInfo_Public proto.InternalMessageInfo + +func (m *Public) GetM() *sub.M { + if m != nil { + return m.M + } + return nil +} + +func (m *Public) GetE() sub.E { + if m != nil { + return m.E + } + return sub.E_ZERO +} + +func (m *Public) GetLocal() *Local { + if m != nil { + return m.Local + } + return nil +} + +func init() { + proto.RegisterType((*Public)(nil), "goproto.test.import_public.Public") +} + +func init() { proto.RegisterFile("import_public/a.proto", fileDescriptor_a_c0314c022b7c17d8) } + +var fileDescriptor_a_c0314c022b7c17d8 = []byte{ + // 200 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0xcd, 0xcc, 0x2d, 0xc8, + 0x2f, 0x2a, 0x89, 0x2f, 0x28, 0x4d, 0xca, 0xc9, 0x4c, 0xd6, 0x4f, 0xd4, 0x2b, 0x28, 0xca, 0x2f, + 0xc9, 0x17, 0x92, 0x4a, 0xcf, 0x07, 0x33, 0xf4, 0x4a, 0x52, 0x8b, 0x4b, 0xf4, 0x50, 0xd4, 0x48, + 0x49, 0xa2, 0x6a, 0x29, 0x2e, 0x4d, 0x82, 0x69, 0x93, 0x42, 0x33, 0x2d, 0x09, 0x22, 0xac, 0xb4, + 0x98, 0x91, 0x8b, 0x2d, 0x00, 0x2c, 0x24, 0xa4, 0xcf, 0xc5, 0x98, 0x2b, 0xc1, 0xa8, 0xc0, 0xa8, + 0xc1, 0x6d, 0xa4, 0xa8, 0x87, 0xdb, 0x12, 0xbd, 0xe2, 0xd2, 0x24, 0x3d, 0xdf, 0x20, 0xc6, 0x5c, + 0x90, 0x86, 0x54, 0x09, 0x26, 0x05, 0x46, 0x0d, 0x3e, 0xc2, 0x1a, 0x5c, 0x83, 0x18, 0x53, 0x85, + 0xcc, 0xb9, 0x58, 0x73, 0xf2, 0x93, 0x13, 0x73, 0x24, 0x98, 0x09, 0xdb, 0xe2, 0x03, 0x52, 0x18, + 0x04, 0x51, 0xef, 0xe4, 0x18, 0x65, 0x9f, 0x9e, 0x59, 0x92, 0x51, 0x9a, 0xa4, 0x97, 0x9c, 0x9f, + 0xab, 0x9f, 0x9e, 0x9f, 0x93, 0x98, 0x97, 0xae, 0x0f, 0xd6, 0x9a, 0x54, 0x9a, 0x06, 0x61, 0x24, + 0xeb, 0xa6, 0xa7, 0xe6, 0xe9, 0xa6, 0xe7, 0xeb, 0x83, 0xcc, 0x4a, 0x49, 0x2c, 0x49, 0xd4, 0x47, + 0x31, 0x2f, 0x80, 0x21, 0x80, 0x31, 0x89, 0x0d, 0xac, 0xd2, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, + 0x70, 0xc5, 0xc3, 0x79, 0x5a, 0x01, 0x00, 0x00, +} diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/a.proto b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/a.proto new file mode 100644 index 0000000..957ad89 --- /dev/null +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/a.proto @@ -0,0 +1,45 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2018 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package goproto.test.import_public; + +option go_package = "github.com/golang/protobuf/protoc-gen-go/testdata/import_public"; + +import public "import_public/sub/a.proto"; // Different Go package. +import public "import_public/b.proto"; // Same Go package. + +message Public { + goproto.test.import_public.sub.M m = 1; + goproto.test.import_public.sub.E e = 2; + Local local = 3; +} diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/b.pb.go b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/b.pb.go new file mode 100644 index 0000000..24569ab --- /dev/null +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/b.pb.go @@ -0,0 +1,87 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: import_public/b.proto + +package import_public // import "github.com/golang/protobuf/protoc-gen-go/testdata/import_public" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import sub "github.com/golang/protobuf/protoc-gen-go/testdata/import_public/sub" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type Local struct { + M *sub.M `protobuf:"bytes,1,opt,name=m,proto3" json:"m,omitempty"` + E sub.E `protobuf:"varint,2,opt,name=e,proto3,enum=goproto.test.import_public.sub.E" json:"e,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Local) Reset() { *m = Local{} } +func (m *Local) String() string { return proto.CompactTextString(m) } +func (*Local) ProtoMessage() {} +func (*Local) Descriptor() ([]byte, []int) { + return fileDescriptor_b_7f20a805fad67bd0, []int{0} +} +func (m *Local) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Local.Unmarshal(m, b) +} +func (m *Local) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Local.Marshal(b, m, deterministic) +} +func (dst *Local) XXX_Merge(src proto.Message) { + xxx_messageInfo_Local.Merge(dst, src) +} +func (m *Local) XXX_Size() int { + return xxx_messageInfo_Local.Size(m) +} +func (m *Local) XXX_DiscardUnknown() { + xxx_messageInfo_Local.DiscardUnknown(m) +} + +var xxx_messageInfo_Local proto.InternalMessageInfo + +func (m *Local) GetM() *sub.M { + if m != nil { + return m.M + } + return nil +} + +func (m *Local) GetE() sub.E { + if m != nil { + return m.E + } + return sub.E_ZERO +} + +func init() { + proto.RegisterType((*Local)(nil), "goproto.test.import_public.Local") +} + +func init() { proto.RegisterFile("import_public/b.proto", fileDescriptor_b_7f20a805fad67bd0) } + +var fileDescriptor_b_7f20a805fad67bd0 = []byte{ + // 174 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0xcd, 0xcc, 0x2d, 0xc8, + 0x2f, 0x2a, 0x89, 0x2f, 0x28, 0x4d, 0xca, 0xc9, 0x4c, 0xd6, 0x4f, 0xd2, 0x2b, 0x28, 0xca, 0x2f, + 0xc9, 0x17, 0x92, 0x4a, 0xcf, 0x07, 0x33, 0xf4, 0x4a, 0x52, 0x8b, 0x4b, 0xf4, 0x50, 0xd4, 0x48, + 0x49, 0xa2, 0x6a, 0x29, 0x2e, 0x4d, 0xd2, 0x4f, 0x84, 0x68, 0x53, 0xca, 0xe4, 0x62, 0xf5, 0xc9, + 0x4f, 0x4e, 0xcc, 0x11, 0xd2, 0xe7, 0x62, 0xcc, 0x95, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x36, 0x52, + 0xd4, 0xc3, 0x6d, 0x96, 0x5e, 0x71, 0x69, 0x92, 0x9e, 0x6f, 0x10, 0x63, 0x2e, 0x48, 0x43, 0xaa, + 0x04, 0x93, 0x02, 0xa3, 0x06, 0x1f, 0x61, 0x0d, 0xae, 0x41, 0x8c, 0xa9, 0x4e, 0x8e, 0x51, 0xf6, + 0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0xe9, 0xf9, 0x39, 0x89, 0x79, + 0xe9, 0xfa, 0x60, 0x6d, 0x49, 0xa5, 0x69, 0x10, 0x46, 0xb2, 0x6e, 0x7a, 0x6a, 0x9e, 0x6e, 0x7a, + 0xbe, 0x3e, 0xc8, 0x9c, 0x94, 0xc4, 0x92, 0x44, 0x7d, 0x14, 0xb3, 0x92, 0xd8, 0xc0, 0xaa, 0x8c, + 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xd6, 0x2b, 0x5f, 0x8e, 0x04, 0x01, 0x00, 0x00, +} diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/b.proto b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/b.proto new file mode 100644 index 0000000..1dbca3e --- /dev/null +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/b.proto @@ -0,0 +1,43 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2018 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package goproto.test.import_public; + +option go_package = "github.com/golang/protobuf/protoc-gen-go/testdata/import_public"; + +import "import_public/sub/a.proto"; + +message Local { + goproto.test.import_public.sub.M m = 1; + goproto.test.import_public.sub.E e = 2; +} diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/sub/a.pb.go b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/sub/a.pb.go new file mode 100644 index 0000000..be667c9 --- /dev/null +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/sub/a.pb.go @@ -0,0 +1,100 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: import_public/sub/a.proto + +package sub // import "github.com/golang/protobuf/protoc-gen-go/testdata/import_public/sub" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type E int32 + +const ( + E_ZERO E = 0 +) + +var E_name = map[int32]string{ + 0: "ZERO", +} +var E_value = map[string]int32{ + "ZERO": 0, +} + +func (x E) String() string { + return proto.EnumName(E_name, int32(x)) +} +func (E) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_a_91ca0264a534463a, []int{0} +} + +type M struct { + // Field using a type in the same Go package, but a different source file. + M2 *M2 `protobuf:"bytes,1,opt,name=m2,proto3" json:"m2,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *M) Reset() { *m = M{} } +func (m *M) String() string { return proto.CompactTextString(m) } +func (*M) ProtoMessage() {} +func (*M) Descriptor() ([]byte, []int) { + return fileDescriptor_a_91ca0264a534463a, []int{0} +} +func (m *M) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_M.Unmarshal(m, b) +} +func (m *M) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_M.Marshal(b, m, deterministic) +} +func (dst *M) XXX_Merge(src proto.Message) { + xxx_messageInfo_M.Merge(dst, src) +} +func (m *M) XXX_Size() int { + return xxx_messageInfo_M.Size(m) +} +func (m *M) XXX_DiscardUnknown() { + xxx_messageInfo_M.DiscardUnknown(m) +} + +var xxx_messageInfo_M proto.InternalMessageInfo + +func (m *M) GetM2() *M2 { + if m != nil { + return m.M2 + } + return nil +} + +func init() { + proto.RegisterType((*M)(nil), "goproto.test.import_public.sub.M") + proto.RegisterEnum("goproto.test.import_public.sub.E", E_name, E_value) +} + +func init() { proto.RegisterFile("import_public/sub/a.proto", fileDescriptor_a_91ca0264a534463a) } + +var fileDescriptor_a_91ca0264a534463a = []byte{ + // 172 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0xcc, 0xcc, 0x2d, 0xc8, + 0x2f, 0x2a, 0x89, 0x2f, 0x28, 0x4d, 0xca, 0xc9, 0x4c, 0xd6, 0x2f, 0x2e, 0x4d, 0xd2, 0x4f, 0xd4, + 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x92, 0x4b, 0xcf, 0x07, 0x33, 0xf4, 0x4a, 0x52, 0x8b, 0x4b, + 0xf4, 0x50, 0xd4, 0xe9, 0x15, 0x97, 0x26, 0x49, 0x61, 0xd1, 0x9a, 0x04, 0xd1, 0xaa, 0x64, 0xce, + 0xc5, 0xe8, 0x2b, 0x64, 0xc4, 0xc5, 0x94, 0x6b, 0x24, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0x6d, 0xa4, + 0xa4, 0x87, 0xdf, 0x30, 0x3d, 0x5f, 0xa3, 0x20, 0xa6, 0x5c, 0x23, 0x2d, 0x5e, 0x2e, 0x46, 0x57, + 0x21, 0x0e, 0x2e, 0x96, 0x28, 0xd7, 0x20, 0x7f, 0x01, 0x06, 0x27, 0xd7, 0x28, 0xe7, 0xf4, 0xcc, + 0x92, 0x8c, 0xd2, 0x24, 0xbd, 0xe4, 0xfc, 0x5c, 0xfd, 0xf4, 0xfc, 0x9c, 0xc4, 0xbc, 0x74, 0x7d, + 0xb0, 0x39, 0x49, 0xa5, 0x69, 0x10, 0x46, 0xb2, 0x6e, 0x7a, 0x6a, 0x9e, 0x6e, 0x7a, 0xbe, 0x3e, + 0xc8, 0xe0, 0x94, 0xc4, 0x92, 0x44, 0x7d, 0x0c, 0x67, 0x25, 0xb1, 0x81, 0x55, 0x1a, 0x03, 0x02, + 0x00, 0x00, 0xff, 0xff, 0x81, 0xcc, 0x07, 0x7d, 0xed, 0x00, 0x00, 0x00, +} diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/sub/a.proto b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/sub/a.proto new file mode 100644 index 0000000..4494c81 --- /dev/null +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/sub/a.proto @@ -0,0 +1,47 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2018 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package goproto.test.import_public.sub; + +option go_package = "github.com/golang/protobuf/protoc-gen-go/testdata/import_public/sub"; + +import "import_public/sub/b.proto"; + +message M { + // Field using a type in the same Go package, but a different source file. + M2 m2 = 1; +} + +enum E { + ZERO = 0; +} diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/sub/b.pb.go b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/sub/b.pb.go new file mode 100644 index 0000000..d57a3bb --- /dev/null +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/sub/b.pb.go @@ -0,0 +1,67 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: import_public/sub/b.proto + +package sub // import "github.com/golang/protobuf/protoc-gen-go/testdata/import_public/sub" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type M2 struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *M2) Reset() { *m = M2{} } +func (m *M2) String() string { return proto.CompactTextString(m) } +func (*M2) ProtoMessage() {} +func (*M2) Descriptor() ([]byte, []int) { + return fileDescriptor_b_eba25180453d86b4, []int{0} +} +func (m *M2) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_M2.Unmarshal(m, b) +} +func (m *M2) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_M2.Marshal(b, m, deterministic) +} +func (dst *M2) XXX_Merge(src proto.Message) { + xxx_messageInfo_M2.Merge(dst, src) +} +func (m *M2) XXX_Size() int { + return xxx_messageInfo_M2.Size(m) +} +func (m *M2) XXX_DiscardUnknown() { + xxx_messageInfo_M2.DiscardUnknown(m) +} + +var xxx_messageInfo_M2 proto.InternalMessageInfo + +func init() { + proto.RegisterType((*M2)(nil), "goproto.test.import_public.sub.M2") +} + +func init() { proto.RegisterFile("import_public/sub/b.proto", fileDescriptor_b_eba25180453d86b4) } + +var fileDescriptor_b_eba25180453d86b4 = []byte{ + // 127 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0xcc, 0xcc, 0x2d, 0xc8, + 0x2f, 0x2a, 0x89, 0x2f, 0x28, 0x4d, 0xca, 0xc9, 0x4c, 0xd6, 0x2f, 0x2e, 0x4d, 0xd2, 0x4f, 0xd2, + 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x92, 0x4b, 0xcf, 0x07, 0x33, 0xf4, 0x4a, 0x52, 0x8b, 0x4b, + 0xf4, 0x50, 0xd4, 0xe9, 0x15, 0x97, 0x26, 0x29, 0xb1, 0x70, 0x31, 0xf9, 0x1a, 0x39, 0xb9, 0x46, + 0x39, 0xa7, 0x67, 0x96, 0x64, 0x94, 0x26, 0xe9, 0x25, 0xe7, 0xe7, 0xea, 0xa7, 0xe7, 0xe7, 0x24, + 0xe6, 0xa5, 0xeb, 0x83, 0xf5, 0x25, 0x95, 0xa6, 0x41, 0x18, 0xc9, 0xba, 0xe9, 0xa9, 0x79, 0xba, + 0xe9, 0xf9, 0xfa, 0x20, 0x83, 0x52, 0x12, 0x4b, 0x12, 0xf5, 0x31, 0x2c, 0x4d, 0x62, 0x03, 0xab, + 0x34, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0x64, 0x42, 0xe4, 0xa8, 0x90, 0x00, 0x00, 0x00, +} diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/sub/b.proto b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/sub/b.proto new file mode 100644 index 0000000..c7299e0 --- /dev/null +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/sub/b.proto @@ -0,0 +1,39 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2018 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package goproto.test.import_public.sub; + +option go_package = "github.com/golang/protobuf/protoc-gen-go/testdata/import_public/sub"; + +message M2 { +} diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/fmt/m.pb.go b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/fmt/m.pb.go new file mode 100644 index 0000000..ca312d6 --- /dev/null +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/fmt/m.pb.go @@ -0,0 +1,66 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: imports/fmt/m.proto + +package fmt // import "github.com/golang/protobuf/protoc-gen-go/testdata/imports/fmt" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type M struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *M) Reset() { *m = M{} } +func (m *M) String() string { return proto.CompactTextString(m) } +func (*M) ProtoMessage() {} +func (*M) Descriptor() ([]byte, []int) { + return fileDescriptor_m_867dd34c461422b8, []int{0} +} +func (m *M) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_M.Unmarshal(m, b) +} +func (m *M) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_M.Marshal(b, m, deterministic) +} +func (dst *M) XXX_Merge(src proto.Message) { + xxx_messageInfo_M.Merge(dst, src) +} +func (m *M) XXX_Size() int { + return xxx_messageInfo_M.Size(m) +} +func (m *M) XXX_DiscardUnknown() { + xxx_messageInfo_M.DiscardUnknown(m) +} + +var xxx_messageInfo_M proto.InternalMessageInfo + +func init() { + proto.RegisterType((*M)(nil), "fmt.M") +} + +func init() { proto.RegisterFile("imports/fmt/m.proto", fileDescriptor_m_867dd34c461422b8) } + +var fileDescriptor_m_867dd34c461422b8 = []byte{ + // 109 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0xce, 0xcc, 0x2d, 0xc8, + 0x2f, 0x2a, 0x29, 0xd6, 0x4f, 0xcb, 0x2d, 0xd1, 0xcf, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, + 0x62, 0x4e, 0xcb, 0x2d, 0x51, 0x62, 0xe6, 0x62, 0xf4, 0x75, 0xb2, 0x8f, 0xb2, 0x4d, 0xcf, 0x2c, + 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x07, + 0x2b, 0x4a, 0x2a, 0x4d, 0x83, 0x30, 0x92, 0x75, 0xd3, 0x53, 0xf3, 0x74, 0xd3, 0xf3, 0xf5, 0x4b, + 0x52, 0x8b, 0x4b, 0x52, 0x12, 0x4b, 0x12, 0xf5, 0x91, 0x8c, 0x4c, 0x62, 0x03, 0xab, 0x31, 0x06, + 0x04, 0x00, 0x00, 0xff, 0xff, 0xc4, 0xc9, 0xee, 0xbe, 0x68, 0x00, 0x00, 0x00, +} diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/fmt/m.proto b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/fmt/m.proto new file mode 100644 index 0000000..142d8cf --- /dev/null +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/fmt/m.proto @@ -0,0 +1,35 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2018 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; +package fmt; +option go_package = "github.com/golang/protobuf/protoc-gen-go/testdata/imports/fmt"; +message M {} diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_1/m1.pb.go b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_1/m1.pb.go new file mode 100644 index 0000000..963f7c7 --- /dev/null +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_1/m1.pb.go @@ -0,0 +1,130 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: imports/test_a_1/m1.proto + +package test_a_1 // import "github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type E1 int32 + +const ( + E1_E1_ZERO E1 = 0 +) + +var E1_name = map[int32]string{ + 0: "E1_ZERO", +} +var E1_value = map[string]int32{ + "E1_ZERO": 0, +} + +func (x E1) String() string { + return proto.EnumName(E1_name, int32(x)) +} +func (E1) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_m1_56a2598431d21e61, []int{0} +} + +type M1 struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *M1) Reset() { *m = M1{} } +func (m *M1) String() string { return proto.CompactTextString(m) } +func (*M1) ProtoMessage() {} +func (*M1) Descriptor() ([]byte, []int) { + return fileDescriptor_m1_56a2598431d21e61, []int{0} +} +func (m *M1) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_M1.Unmarshal(m, b) +} +func (m *M1) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_M1.Marshal(b, m, deterministic) +} +func (dst *M1) XXX_Merge(src proto.Message) { + xxx_messageInfo_M1.Merge(dst, src) +} +func (m *M1) XXX_Size() int { + return xxx_messageInfo_M1.Size(m) +} +func (m *M1) XXX_DiscardUnknown() { + xxx_messageInfo_M1.DiscardUnknown(m) +} + +var xxx_messageInfo_M1 proto.InternalMessageInfo + +type M1_1 struct { + M1 *M1 `protobuf:"bytes,1,opt,name=m1,proto3" json:"m1,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *M1_1) Reset() { *m = M1_1{} } +func (m *M1_1) String() string { return proto.CompactTextString(m) } +func (*M1_1) ProtoMessage() {} +func (*M1_1) Descriptor() ([]byte, []int) { + return fileDescriptor_m1_56a2598431d21e61, []int{1} +} +func (m *M1_1) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_M1_1.Unmarshal(m, b) +} +func (m *M1_1) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_M1_1.Marshal(b, m, deterministic) +} +func (dst *M1_1) XXX_Merge(src proto.Message) { + xxx_messageInfo_M1_1.Merge(dst, src) +} +func (m *M1_1) XXX_Size() int { + return xxx_messageInfo_M1_1.Size(m) +} +func (m *M1_1) XXX_DiscardUnknown() { + xxx_messageInfo_M1_1.DiscardUnknown(m) +} + +var xxx_messageInfo_M1_1 proto.InternalMessageInfo + +func (m *M1_1) GetM1() *M1 { + if m != nil { + return m.M1 + } + return nil +} + +func init() { + proto.RegisterType((*M1)(nil), "test.a.M1") + proto.RegisterType((*M1_1)(nil), "test.a.M1_1") + proto.RegisterEnum("test.a.E1", E1_name, E1_value) +} + +func init() { proto.RegisterFile("imports/test_a_1/m1.proto", fileDescriptor_m1_56a2598431d21e61) } + +var fileDescriptor_m1_56a2598431d21e61 = []byte{ + // 165 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0xcc, 0xcc, 0x2d, 0xc8, + 0x2f, 0x2a, 0x29, 0xd6, 0x2f, 0x49, 0x2d, 0x2e, 0x89, 0x4f, 0x8c, 0x37, 0xd4, 0xcf, 0x35, 0xd4, + 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x03, 0x09, 0xe9, 0x25, 0x2a, 0xb1, 0x70, 0x31, 0xf9, + 0x1a, 0x2a, 0x29, 0x71, 0xb1, 0xf8, 0x1a, 0xc6, 0x1b, 0x0a, 0x49, 0x71, 0x31, 0xe5, 0x1a, 0x4a, + 0x30, 0x2a, 0x30, 0x6a, 0x70, 0x1b, 0x71, 0xe9, 0x41, 0x94, 0xe8, 0xf9, 0x1a, 0x06, 0x31, 0xe5, + 0x1a, 0x6a, 0x09, 0x72, 0x31, 0xb9, 0x1a, 0x0a, 0x71, 0x73, 0xb1, 0xbb, 0x1a, 0xc6, 0x47, 0xb9, + 0x06, 0xf9, 0x0b, 0x30, 0x38, 0xb9, 0x44, 0x39, 0xa5, 0x67, 0x96, 0x64, 0x94, 0x26, 0xe9, 0x25, + 0xe7, 0xe7, 0xea, 0xa7, 0xe7, 0xe7, 0x24, 0xe6, 0xa5, 0xeb, 0x83, 0xcd, 0x4f, 0x2a, 0x4d, 0x83, + 0x30, 0x92, 0x75, 0xd3, 0x53, 0xf3, 0x74, 0xd3, 0xf3, 0xc1, 0x4e, 0x48, 0x49, 0x2c, 0x49, 0xd4, + 0x47, 0x77, 0x53, 0x12, 0x1b, 0x58, 0xa1, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0xcc, 0xae, 0xc9, + 0xcd, 0xae, 0x00, 0x00, 0x00, +} diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_1/m1.proto b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_1/m1.proto new file mode 100644 index 0000000..da54c1e --- /dev/null +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_1/m1.proto @@ -0,0 +1,44 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2018 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; +package test.a; +option go_package = "github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_1"; + +message M1 {} + +message M1_1 { + M1 m1 = 1; +} + +enum E1 { + E1_ZERO = 0; +} diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_1/m2.pb.go b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_1/m2.pb.go new file mode 100644 index 0000000..1b629bf --- /dev/null +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_1/m2.pb.go @@ -0,0 +1,67 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: imports/test_a_1/m2.proto + +package test_a_1 // import "github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type M2 struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *M2) Reset() { *m = M2{} } +func (m *M2) String() string { return proto.CompactTextString(m) } +func (*M2) ProtoMessage() {} +func (*M2) Descriptor() ([]byte, []int) { + return fileDescriptor_m2_ccd6356c045a9ac3, []int{0} +} +func (m *M2) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_M2.Unmarshal(m, b) +} +func (m *M2) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_M2.Marshal(b, m, deterministic) +} +func (dst *M2) XXX_Merge(src proto.Message) { + xxx_messageInfo_M2.Merge(dst, src) +} +func (m *M2) XXX_Size() int { + return xxx_messageInfo_M2.Size(m) +} +func (m *M2) XXX_DiscardUnknown() { + xxx_messageInfo_M2.DiscardUnknown(m) +} + +var xxx_messageInfo_M2 proto.InternalMessageInfo + +func init() { + proto.RegisterType((*M2)(nil), "test.a.M2") +} + +func init() { proto.RegisterFile("imports/test_a_1/m2.proto", fileDescriptor_m2_ccd6356c045a9ac3) } + +var fileDescriptor_m2_ccd6356c045a9ac3 = []byte{ + // 114 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0xcc, 0xcc, 0x2d, 0xc8, + 0x2f, 0x2a, 0x29, 0xd6, 0x2f, 0x49, 0x2d, 0x2e, 0x89, 0x4f, 0x8c, 0x37, 0xd4, 0xcf, 0x35, 0xd2, + 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x03, 0x09, 0xe9, 0x25, 0x2a, 0xb1, 0x70, 0x31, 0xf9, + 0x1a, 0x39, 0xb9, 0x44, 0x39, 0xa5, 0x67, 0x96, 0x64, 0x94, 0x26, 0xe9, 0x25, 0xe7, 0xe7, 0xea, + 0xa7, 0xe7, 0xe7, 0x24, 0xe6, 0xa5, 0xeb, 0x83, 0x15, 0x26, 0x95, 0xa6, 0x41, 0x18, 0xc9, 0xba, + 0xe9, 0xa9, 0x79, 0xba, 0xe9, 0xf9, 0x60, 0xb3, 0x52, 0x12, 0x4b, 0x12, 0xf5, 0xd1, 0x0d, 0x4f, + 0x62, 0x03, 0x2b, 0x34, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xe3, 0xe0, 0x7e, 0xc0, 0x77, 0x00, + 0x00, 0x00, +} diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_1/m2.proto b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_1/m2.proto new file mode 100644 index 0000000..49499dc --- /dev/null +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_1/m2.proto @@ -0,0 +1,35 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2018 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; +package test.a; +option go_package = "github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_1"; +message M2 {} diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_2/m3.pb.go b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_2/m3.pb.go new file mode 100644 index 0000000..e3895d2 --- /dev/null +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_2/m3.pb.go @@ -0,0 +1,67 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: imports/test_a_2/m3.proto + +package test_a_2 // import "github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_2" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type M3 struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *M3) Reset() { *m = M3{} } +func (m *M3) String() string { return proto.CompactTextString(m) } +func (*M3) ProtoMessage() {} +func (*M3) Descriptor() ([]byte, []int) { + return fileDescriptor_m3_de310e87d08d4216, []int{0} +} +func (m *M3) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_M3.Unmarshal(m, b) +} +func (m *M3) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_M3.Marshal(b, m, deterministic) +} +func (dst *M3) XXX_Merge(src proto.Message) { + xxx_messageInfo_M3.Merge(dst, src) +} +func (m *M3) XXX_Size() int { + return xxx_messageInfo_M3.Size(m) +} +func (m *M3) XXX_DiscardUnknown() { + xxx_messageInfo_M3.DiscardUnknown(m) +} + +var xxx_messageInfo_M3 proto.InternalMessageInfo + +func init() { + proto.RegisterType((*M3)(nil), "test.a.M3") +} + +func init() { proto.RegisterFile("imports/test_a_2/m3.proto", fileDescriptor_m3_de310e87d08d4216) } + +var fileDescriptor_m3_de310e87d08d4216 = []byte{ + // 114 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0xcc, 0xcc, 0x2d, 0xc8, + 0x2f, 0x2a, 0x29, 0xd6, 0x2f, 0x49, 0x2d, 0x2e, 0x89, 0x4f, 0x8c, 0x37, 0xd2, 0xcf, 0x35, 0xd6, + 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x03, 0x09, 0xe9, 0x25, 0x2a, 0xb1, 0x70, 0x31, 0xf9, + 0x1a, 0x3b, 0xb9, 0x44, 0x39, 0xa5, 0x67, 0x96, 0x64, 0x94, 0x26, 0xe9, 0x25, 0xe7, 0xe7, 0xea, + 0xa7, 0xe7, 0xe7, 0x24, 0xe6, 0xa5, 0xeb, 0x83, 0x15, 0x26, 0x95, 0xa6, 0x41, 0x18, 0xc9, 0xba, + 0xe9, 0xa9, 0x79, 0xba, 0xe9, 0xf9, 0x60, 0xb3, 0x52, 0x12, 0x4b, 0x12, 0xf5, 0xd1, 0x0d, 0x4f, + 0x62, 0x03, 0x2b, 0x34, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0x23, 0x86, 0x27, 0x47, 0x77, 0x00, + 0x00, 0x00, +} diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_2/m3.proto b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_2/m3.proto new file mode 100644 index 0000000..5e811ef --- /dev/null +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_2/m3.proto @@ -0,0 +1,35 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2018 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; +package test.a; +option go_package = "github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_2"; +message M3 {} diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_2/m4.pb.go b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_2/m4.pb.go new file mode 100644 index 0000000..65a3bad --- /dev/null +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_2/m4.pb.go @@ -0,0 +1,67 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: imports/test_a_2/m4.proto + +package test_a_2 // import "github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_2" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type M4 struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *M4) Reset() { *m = M4{} } +func (m *M4) String() string { return proto.CompactTextString(m) } +func (*M4) ProtoMessage() {} +func (*M4) Descriptor() ([]byte, []int) { + return fileDescriptor_m4_da12b386229f3791, []int{0} +} +func (m *M4) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_M4.Unmarshal(m, b) +} +func (m *M4) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_M4.Marshal(b, m, deterministic) +} +func (dst *M4) XXX_Merge(src proto.Message) { + xxx_messageInfo_M4.Merge(dst, src) +} +func (m *M4) XXX_Size() int { + return xxx_messageInfo_M4.Size(m) +} +func (m *M4) XXX_DiscardUnknown() { + xxx_messageInfo_M4.DiscardUnknown(m) +} + +var xxx_messageInfo_M4 proto.InternalMessageInfo + +func init() { + proto.RegisterType((*M4)(nil), "test.a.M4") +} + +func init() { proto.RegisterFile("imports/test_a_2/m4.proto", fileDescriptor_m4_da12b386229f3791) } + +var fileDescriptor_m4_da12b386229f3791 = []byte{ + // 114 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0xcc, 0xcc, 0x2d, 0xc8, + 0x2f, 0x2a, 0x29, 0xd6, 0x2f, 0x49, 0x2d, 0x2e, 0x89, 0x4f, 0x8c, 0x37, 0xd2, 0xcf, 0x35, 0xd1, + 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x03, 0x09, 0xe9, 0x25, 0x2a, 0xb1, 0x70, 0x31, 0xf9, + 0x9a, 0x38, 0xb9, 0x44, 0x39, 0xa5, 0x67, 0x96, 0x64, 0x94, 0x26, 0xe9, 0x25, 0xe7, 0xe7, 0xea, + 0xa7, 0xe7, 0xe7, 0x24, 0xe6, 0xa5, 0xeb, 0x83, 0x15, 0x26, 0x95, 0xa6, 0x41, 0x18, 0xc9, 0xba, + 0xe9, 0xa9, 0x79, 0xba, 0xe9, 0xf9, 0x60, 0xb3, 0x52, 0x12, 0x4b, 0x12, 0xf5, 0xd1, 0x0d, 0x4f, + 0x62, 0x03, 0x2b, 0x34, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0x58, 0xcb, 0x10, 0xc8, 0x77, 0x00, + 0x00, 0x00, +} diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_2/m4.proto b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_2/m4.proto new file mode 100644 index 0000000..8f8fe3e --- /dev/null +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_2/m4.proto @@ -0,0 +1,35 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2018 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; +package test.a; +option go_package = "github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_2"; +message M4 {} diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_b_1/m1.pb.go b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_b_1/m1.pb.go new file mode 100644 index 0000000..831f414 --- /dev/null +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_b_1/m1.pb.go @@ -0,0 +1,67 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: imports/test_b_1/m1.proto + +package beta // import "github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_b_1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type M1 struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *M1) Reset() { *m = M1{} } +func (m *M1) String() string { return proto.CompactTextString(m) } +func (*M1) ProtoMessage() {} +func (*M1) Descriptor() ([]byte, []int) { + return fileDescriptor_m1_aff127b054aec649, []int{0} +} +func (m *M1) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_M1.Unmarshal(m, b) +} +func (m *M1) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_M1.Marshal(b, m, deterministic) +} +func (dst *M1) XXX_Merge(src proto.Message) { + xxx_messageInfo_M1.Merge(dst, src) +} +func (m *M1) XXX_Size() int { + return xxx_messageInfo_M1.Size(m) +} +func (m *M1) XXX_DiscardUnknown() { + xxx_messageInfo_M1.DiscardUnknown(m) +} + +var xxx_messageInfo_M1 proto.InternalMessageInfo + +func init() { + proto.RegisterType((*M1)(nil), "test.b.part1.M1") +} + +func init() { proto.RegisterFile("imports/test_b_1/m1.proto", fileDescriptor_m1_aff127b054aec649) } + +var fileDescriptor_m1_aff127b054aec649 = []byte{ + // 125 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0xcc, 0xcc, 0x2d, 0xc8, + 0x2f, 0x2a, 0x29, 0xd6, 0x2f, 0x49, 0x2d, 0x2e, 0x89, 0x4f, 0x8a, 0x37, 0xd4, 0xcf, 0x35, 0xd4, + 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x01, 0x09, 0xe9, 0x25, 0xe9, 0x15, 0x24, 0x16, 0x95, + 0x18, 0x2a, 0xb1, 0x70, 0x31, 0xf9, 0x1a, 0x3a, 0x79, 0x46, 0xb9, 0xa7, 0x67, 0x96, 0x64, 0x94, + 0x26, 0xe9, 0x25, 0xe7, 0xe7, 0xea, 0xa7, 0xe7, 0xe7, 0x24, 0xe6, 0xa5, 0xeb, 0x83, 0x95, 0x27, + 0x95, 0xa6, 0x41, 0x18, 0xc9, 0xba, 0xe9, 0xa9, 0x79, 0xba, 0xe9, 0xf9, 0x60, 0x13, 0x53, 0x12, + 0x4b, 0x12, 0xf5, 0xd1, 0xad, 0xb0, 0x4e, 0x4a, 0x2d, 0x49, 0x4c, 0x62, 0x03, 0xab, 0x36, 0x06, + 0x04, 0x00, 0x00, 0xff, 0xff, 0x4a, 0xf1, 0x3b, 0x7f, 0x82, 0x00, 0x00, 0x00, +} diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_b_1/m1.proto b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_b_1/m1.proto new file mode 100644 index 0000000..2c35ec4 --- /dev/null +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_b_1/m1.proto @@ -0,0 +1,35 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2018 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; +package test.b.part1; +option go_package = "github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_b_1;beta"; +message M1 {} diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_b_1/m2.pb.go b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_b_1/m2.pb.go new file mode 100644 index 0000000..bc74105 --- /dev/null +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_b_1/m2.pb.go @@ -0,0 +1,67 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: imports/test_b_1/m2.proto + +package beta // import "github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_b_1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type M2 struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *M2) Reset() { *m = M2{} } +func (m *M2) String() string { return proto.CompactTextString(m) } +func (*M2) ProtoMessage() {} +func (*M2) Descriptor() ([]byte, []int) { + return fileDescriptor_m2_0c59cab35ba1b0d8, []int{0} +} +func (m *M2) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_M2.Unmarshal(m, b) +} +func (m *M2) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_M2.Marshal(b, m, deterministic) +} +func (dst *M2) XXX_Merge(src proto.Message) { + xxx_messageInfo_M2.Merge(dst, src) +} +func (m *M2) XXX_Size() int { + return xxx_messageInfo_M2.Size(m) +} +func (m *M2) XXX_DiscardUnknown() { + xxx_messageInfo_M2.DiscardUnknown(m) +} + +var xxx_messageInfo_M2 proto.InternalMessageInfo + +func init() { + proto.RegisterType((*M2)(nil), "test.b.part2.M2") +} + +func init() { proto.RegisterFile("imports/test_b_1/m2.proto", fileDescriptor_m2_0c59cab35ba1b0d8) } + +var fileDescriptor_m2_0c59cab35ba1b0d8 = []byte{ + // 125 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0xcc, 0xcc, 0x2d, 0xc8, + 0x2f, 0x2a, 0x29, 0xd6, 0x2f, 0x49, 0x2d, 0x2e, 0x89, 0x4f, 0x8a, 0x37, 0xd4, 0xcf, 0x35, 0xd2, + 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x01, 0x09, 0xe9, 0x25, 0xe9, 0x15, 0x24, 0x16, 0x95, + 0x18, 0x29, 0xb1, 0x70, 0x31, 0xf9, 0x1a, 0x39, 0x79, 0x46, 0xb9, 0xa7, 0x67, 0x96, 0x64, 0x94, + 0x26, 0xe9, 0x25, 0xe7, 0xe7, 0xea, 0xa7, 0xe7, 0xe7, 0x24, 0xe6, 0xa5, 0xeb, 0x83, 0x95, 0x27, + 0x95, 0xa6, 0x41, 0x18, 0xc9, 0xba, 0xe9, 0xa9, 0x79, 0xba, 0xe9, 0xf9, 0x60, 0x13, 0x53, 0x12, + 0x4b, 0x12, 0xf5, 0xd1, 0xad, 0xb0, 0x4e, 0x4a, 0x2d, 0x49, 0x4c, 0x62, 0x03, 0xab, 0x36, 0x06, + 0x04, 0x00, 0x00, 0xff, 0xff, 0x44, 0x29, 0xbe, 0x6d, 0x82, 0x00, 0x00, 0x00, +} diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_b_1/m2.proto b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_b_1/m2.proto new file mode 100644 index 0000000..13723be --- /dev/null +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_b_1/m2.proto @@ -0,0 +1,35 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2018 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; +package test.b.part2; +option go_package = "github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_b_1;beta"; +message M2 {} diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_import_a1m1.pb.go b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_import_a1m1.pb.go new file mode 100644 index 0000000..4f79694 --- /dev/null +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_import_a1m1.pb.go @@ -0,0 +1,80 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: imports/test_import_a1m1.proto + +package imports // import "github.com/golang/protobuf/protoc-gen-go/testdata/imports" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import test_a_1 "github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_1" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type A1M1 struct { + F *test_a_1.M1 `protobuf:"bytes,1,opt,name=f,proto3" json:"f,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *A1M1) Reset() { *m = A1M1{} } +func (m *A1M1) String() string { return proto.CompactTextString(m) } +func (*A1M1) ProtoMessage() {} +func (*A1M1) Descriptor() ([]byte, []int) { + return fileDescriptor_test_import_a1m1_d7f2b5c638a69f6e, []int{0} +} +func (m *A1M1) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_A1M1.Unmarshal(m, b) +} +func (m *A1M1) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_A1M1.Marshal(b, m, deterministic) +} +func (dst *A1M1) XXX_Merge(src proto.Message) { + xxx_messageInfo_A1M1.Merge(dst, src) +} +func (m *A1M1) XXX_Size() int { + return xxx_messageInfo_A1M1.Size(m) +} +func (m *A1M1) XXX_DiscardUnknown() { + xxx_messageInfo_A1M1.DiscardUnknown(m) +} + +var xxx_messageInfo_A1M1 proto.InternalMessageInfo + +func (m *A1M1) GetF() *test_a_1.M1 { + if m != nil { + return m.F + } + return nil +} + +func init() { + proto.RegisterType((*A1M1)(nil), "test.A1M1") +} + +func init() { + proto.RegisterFile("imports/test_import_a1m1.proto", fileDescriptor_test_import_a1m1_d7f2b5c638a69f6e) +} + +var fileDescriptor_test_import_a1m1_d7f2b5c638a69f6e = []byte{ + // 149 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0xcb, 0xcc, 0x2d, 0xc8, + 0x2f, 0x2a, 0x29, 0xd6, 0x2f, 0x49, 0x2d, 0x2e, 0x89, 0x87, 0x70, 0xe2, 0x13, 0x0d, 0x73, 0x0d, + 0xf5, 0x0a, 0x8a, 0xf2, 0x4b, 0xf2, 0x85, 0x58, 0x40, 0xe2, 0x52, 0x92, 0x28, 0xaa, 0x12, 0xe3, + 0x0d, 0xf5, 0x61, 0x0a, 0x94, 0x14, 0xb8, 0x58, 0x1c, 0x0d, 0x7d, 0x0d, 0x85, 0x24, 0xb8, 0x18, + 0xd3, 0x24, 0x18, 0x15, 0x18, 0x35, 0xb8, 0x8d, 0xb8, 0xf4, 0x40, 0xca, 0xf4, 0x12, 0xf5, 0x7c, + 0x0d, 0x83, 0x18, 0xd3, 0x9c, 0xac, 0xa3, 0x2c, 0xd3, 0x33, 0x4b, 0x32, 0x4a, 0x93, 0xf4, 0x92, + 0xf3, 0x73, 0xf5, 0xd3, 0xf3, 0x73, 0x12, 0xf3, 0xd2, 0xf5, 0xc1, 0x9a, 0x93, 0x4a, 0xd3, 0x20, + 0x8c, 0x64, 0xdd, 0xf4, 0xd4, 0x3c, 0xdd, 0xf4, 0x7c, 0xb0, 0xf9, 0x29, 0x89, 0x25, 0x89, 0xfa, + 0x50, 0x0b, 0x93, 0xd8, 0xc0, 0xf2, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x84, 0x2f, 0x18, + 0x23, 0xa8, 0x00, 0x00, 0x00, +} diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_import_a1m1.proto b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_import_a1m1.proto new file mode 100644 index 0000000..abf07f2 --- /dev/null +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_import_a1m1.proto @@ -0,0 +1,42 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2018 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package test; + +option go_package = "github.com/golang/protobuf/protoc-gen-go/testdata/imports"; + +import "imports/test_a_1/m1.proto"; + +message A1M1 { + test.a.M1 f = 1; +} diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_import_a1m2.pb.go b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_import_a1m2.pb.go new file mode 100644 index 0000000..f5aa2e8 --- /dev/null +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_import_a1m2.pb.go @@ -0,0 +1,80 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: imports/test_import_a1m2.proto + +package imports // import "github.com/golang/protobuf/protoc-gen-go/testdata/imports" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import test_a_1 "github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_1" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type A1M2 struct { + F *test_a_1.M2 `protobuf:"bytes,1,opt,name=f,proto3" json:"f,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *A1M2) Reset() { *m = A1M2{} } +func (m *A1M2) String() string { return proto.CompactTextString(m) } +func (*A1M2) ProtoMessage() {} +func (*A1M2) Descriptor() ([]byte, []int) { + return fileDescriptor_test_import_a1m2_9a3281ce9464e116, []int{0} +} +func (m *A1M2) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_A1M2.Unmarshal(m, b) +} +func (m *A1M2) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_A1M2.Marshal(b, m, deterministic) +} +func (dst *A1M2) XXX_Merge(src proto.Message) { + xxx_messageInfo_A1M2.Merge(dst, src) +} +func (m *A1M2) XXX_Size() int { + return xxx_messageInfo_A1M2.Size(m) +} +func (m *A1M2) XXX_DiscardUnknown() { + xxx_messageInfo_A1M2.DiscardUnknown(m) +} + +var xxx_messageInfo_A1M2 proto.InternalMessageInfo + +func (m *A1M2) GetF() *test_a_1.M2 { + if m != nil { + return m.F + } + return nil +} + +func init() { + proto.RegisterType((*A1M2)(nil), "test.A1M2") +} + +func init() { + proto.RegisterFile("imports/test_import_a1m2.proto", fileDescriptor_test_import_a1m2_9a3281ce9464e116) +} + +var fileDescriptor_test_import_a1m2_9a3281ce9464e116 = []byte{ + // 149 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0xcb, 0xcc, 0x2d, 0xc8, + 0x2f, 0x2a, 0x29, 0xd6, 0x2f, 0x49, 0x2d, 0x2e, 0x89, 0x87, 0x70, 0xe2, 0x13, 0x0d, 0x73, 0x8d, + 0xf4, 0x0a, 0x8a, 0xf2, 0x4b, 0xf2, 0x85, 0x58, 0x40, 0xe2, 0x52, 0x92, 0x28, 0xaa, 0x12, 0xe3, + 0x0d, 0xf5, 0x61, 0x0a, 0x94, 0x14, 0xb8, 0x58, 0x1c, 0x0d, 0x7d, 0x8d, 0x84, 0x24, 0xb8, 0x18, + 0xd3, 0x24, 0x18, 0x15, 0x18, 0x35, 0xb8, 0x8d, 0xb8, 0xf4, 0x40, 0xca, 0xf4, 0x12, 0xf5, 0x7c, + 0x8d, 0x82, 0x18, 0xd3, 0x9c, 0xac, 0xa3, 0x2c, 0xd3, 0x33, 0x4b, 0x32, 0x4a, 0x93, 0xf4, 0x92, + 0xf3, 0x73, 0xf5, 0xd3, 0xf3, 0x73, 0x12, 0xf3, 0xd2, 0xf5, 0xc1, 0x9a, 0x93, 0x4a, 0xd3, 0x20, + 0x8c, 0x64, 0xdd, 0xf4, 0xd4, 0x3c, 0xdd, 0xf4, 0x7c, 0xb0, 0xf9, 0x29, 0x89, 0x25, 0x89, 0xfa, + 0x50, 0x0b, 0x93, 0xd8, 0xc0, 0xf2, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x1f, 0x88, 0xfb, + 0xea, 0xa8, 0x00, 0x00, 0x00, +} diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_import_a1m2.proto b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_import_a1m2.proto new file mode 100644 index 0000000..5c53950 --- /dev/null +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_import_a1m2.proto @@ -0,0 +1,42 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2018 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package test; + +option go_package = "github.com/golang/protobuf/protoc-gen-go/testdata/imports"; + +import "imports/test_a_1/m2.proto"; + +message A1M2 { + test.a.M2 f = 1; +} diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_import_all.pb.go b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_import_all.pb.go new file mode 100644 index 0000000..4f9fd04 --- /dev/null +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_import_all.pb.go @@ -0,0 +1,138 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: imports/test_import_all.proto + +package imports // import "github.com/golang/protobuf/protoc-gen-go/testdata/imports" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import fmt1 "github.com/golang/protobuf/protoc-gen-go/testdata/imports/fmt" +import test_a_1 "github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_1" +import test_a_2 "github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_2" +import test_b_1 "github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_b_1" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type All struct { + Am1 *test_a_1.M1 `protobuf:"bytes,1,opt,name=am1,proto3" json:"am1,omitempty"` + Am2 *test_a_1.M2 `protobuf:"bytes,2,opt,name=am2,proto3" json:"am2,omitempty"` + Am3 *test_a_2.M3 `protobuf:"bytes,3,opt,name=am3,proto3" json:"am3,omitempty"` + Am4 *test_a_2.M4 `protobuf:"bytes,4,opt,name=am4,proto3" json:"am4,omitempty"` + Bm1 *test_b_1.M1 `protobuf:"bytes,5,opt,name=bm1,proto3" json:"bm1,omitempty"` + Bm2 *test_b_1.M2 `protobuf:"bytes,6,opt,name=bm2,proto3" json:"bm2,omitempty"` + Fmt *fmt1.M `protobuf:"bytes,7,opt,name=fmt,proto3" json:"fmt,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *All) Reset() { *m = All{} } +func (m *All) String() string { return proto.CompactTextString(m) } +func (*All) ProtoMessage() {} +func (*All) Descriptor() ([]byte, []int) { + return fileDescriptor_test_import_all_b41dc4592e4a4f3b, []int{0} +} +func (m *All) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_All.Unmarshal(m, b) +} +func (m *All) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_All.Marshal(b, m, deterministic) +} +func (dst *All) XXX_Merge(src proto.Message) { + xxx_messageInfo_All.Merge(dst, src) +} +func (m *All) XXX_Size() int { + return xxx_messageInfo_All.Size(m) +} +func (m *All) XXX_DiscardUnknown() { + xxx_messageInfo_All.DiscardUnknown(m) +} + +var xxx_messageInfo_All proto.InternalMessageInfo + +func (m *All) GetAm1() *test_a_1.M1 { + if m != nil { + return m.Am1 + } + return nil +} + +func (m *All) GetAm2() *test_a_1.M2 { + if m != nil { + return m.Am2 + } + return nil +} + +func (m *All) GetAm3() *test_a_2.M3 { + if m != nil { + return m.Am3 + } + return nil +} + +func (m *All) GetAm4() *test_a_2.M4 { + if m != nil { + return m.Am4 + } + return nil +} + +func (m *All) GetBm1() *test_b_1.M1 { + if m != nil { + return m.Bm1 + } + return nil +} + +func (m *All) GetBm2() *test_b_1.M2 { + if m != nil { + return m.Bm2 + } + return nil +} + +func (m *All) GetFmt() *fmt1.M { + if m != nil { + return m.Fmt + } + return nil +} + +func init() { + proto.RegisterType((*All)(nil), "test.All") +} + +func init() { + proto.RegisterFile("imports/test_import_all.proto", fileDescriptor_test_import_all_b41dc4592e4a4f3b) +} + +var fileDescriptor_test_import_all_b41dc4592e4a4f3b = []byte{ + // 258 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0xd0, 0xb1, 0x4e, 0xc3, 0x30, + 0x10, 0x06, 0x60, 0x15, 0x97, 0x20, 0x99, 0x05, 0x85, 0xc5, 0x20, 0x90, 0x50, 0x27, 0x96, 0xda, + 0xb2, 0x9d, 0x05, 0x31, 0xc1, 0xde, 0xa5, 0x23, 0x4b, 0x64, 0x97, 0xc6, 0x54, 0xf2, 0xd5, 0x51, + 0x7a, 0x7d, 0x5e, 0x5e, 0x05, 0xd9, 0x07, 0x12, 0x84, 0x66, 0x4b, 0xfe, 0xef, 0xb7, 0xce, 0x3e, + 0x7e, 0xbf, 0x83, 0x3e, 0x0d, 0x78, 0x50, 0xb8, 0x3d, 0x60, 0x4b, 0x3f, 0xad, 0x8b, 0x51, 0xf6, + 0x43, 0xc2, 0x54, 0xcf, 0x73, 0x7c, 0x7b, 0xf3, 0xa7, 0xe4, 0x5a, 0xad, 0x40, 0x53, 0xe1, 0x14, + 0x99, 0x09, 0x32, 0x0a, 0xec, 0x34, 0x35, 0x27, 0xc9, 0x4f, 0xcf, 0xf2, 0xbf, 0x67, 0x5d, 0xff, + 0x50, 0x07, 0xa8, 0x80, 0xc2, 0xc5, 0xe7, 0x8c, 0xb3, 0x97, 0x18, 0xeb, 0x3b, 0xce, 0x1c, 0x68, + 0x31, 0x7b, 0x98, 0x3d, 0x5e, 0x1a, 0x2e, 0xf3, 0x69, 0xe9, 0xe4, 0x4a, 0xaf, 0x73, 0x4c, 0x6a, + 0xc4, 0xd9, 0x48, 0x4d, 0x56, 0x43, 0x6a, 0x05, 0x1b, 0xa9, 0xcd, 0x6a, 0x49, 0x1b, 0x31, 0x1f, + 0x69, 0x93, 0xb5, 0xa9, 0x17, 0x9c, 0x79, 0xd0, 0xe2, 0xbc, 0xe8, 0x15, 0xa9, 0x97, 0xbd, 0x1b, + 0x50, 0x97, 0xe9, 0x1e, 0x34, 0x75, 0x8c, 0xa8, 0xfe, 0x77, 0x4c, 0xb9, 0x83, 0x07, 0x53, 0x0b, + 0xce, 0x3a, 0x40, 0x71, 0x51, 0x3a, 0x95, 0xec, 0x00, 0xe5, 0x6a, 0x9d, 0xa3, 0xd7, 0xe7, 0xb7, + 0xa7, 0xb0, 0xc3, 0x8f, 0xa3, 0x97, 0x9b, 0x04, 0x2a, 0xa4, 0xe8, 0xf6, 0x41, 0x95, 0xc7, 0xfb, + 0x63, 0x47, 0x1f, 0x9b, 0x65, 0xd8, 0xee, 0x97, 0x21, 0x95, 0xa5, 0xbd, 0x3b, 0x74, 0xea, 0x7b, + 0x55, 0xbe, 0x2a, 0x6e, 0xbf, 0x02, 0x00, 0x00, 0xff, 0xff, 0x95, 0x39, 0xa3, 0x82, 0x03, 0x02, + 0x00, 0x00, +} diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_import_all.proto b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_import_all.proto new file mode 100644 index 0000000..582d722 --- /dev/null +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_import_all.proto @@ -0,0 +1,58 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2018 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package test; + +option go_package = "github.com/golang/protobuf/protoc-gen-go/testdata/imports"; + +// test_a_1/m*.proto are in the same Go package and proto package. +// test_a_*/*.proto are in different Go packages, but the same proto package. +// test_b_1/*.proto are in the same Go package, but different proto packages. +// fmt/m.proto has a package name which conflicts with "fmt". +import "imports/test_a_1/m1.proto"; +import "imports/test_a_1/m2.proto"; +import "imports/test_a_2/m3.proto"; +import "imports/test_a_2/m4.proto"; +import "imports/test_b_1/m1.proto"; +import "imports/test_b_1/m2.proto"; +import "imports/fmt/m.proto"; + +message All { + test.a.M1 am1 = 1; + test.a.M2 am2 = 2; + test.a.M3 am3 = 3; + test.a.M4 am4 = 4; + test.b.part1.M1 bm1 = 5; + test.b.part2.M2 bm2 = 6; + fmt.M fmt = 7; +} diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi1.pb.go b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi1.pb.go new file mode 100644 index 0000000..da0fdf8 --- /dev/null +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi1.pb.go @@ -0,0 +1,96 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: multi/multi1.proto + +package multitest // import "github.com/golang/protobuf/protoc-gen-go/testdata/multi" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type Multi1 struct { + Multi2 *Multi2 `protobuf:"bytes,1,req,name=multi2" json:"multi2,omitempty"` + Color *Multi2_Color `protobuf:"varint,2,opt,name=color,enum=multitest.Multi2_Color" json:"color,omitempty"` + HatType *Multi3_HatType `protobuf:"varint,3,opt,name=hat_type,json=hatType,enum=multitest.Multi3_HatType" json:"hat_type,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Multi1) Reset() { *m = Multi1{} } +func (m *Multi1) String() string { return proto.CompactTextString(m) } +func (*Multi1) ProtoMessage() {} +func (*Multi1) Descriptor() ([]byte, []int) { + return fileDescriptor_multi1_08e50c6822e808b8, []int{0} +} +func (m *Multi1) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Multi1.Unmarshal(m, b) +} +func (m *Multi1) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Multi1.Marshal(b, m, deterministic) +} +func (dst *Multi1) XXX_Merge(src proto.Message) { + xxx_messageInfo_Multi1.Merge(dst, src) +} +func (m *Multi1) XXX_Size() int { + return xxx_messageInfo_Multi1.Size(m) +} +func (m *Multi1) XXX_DiscardUnknown() { + xxx_messageInfo_Multi1.DiscardUnknown(m) +} + +var xxx_messageInfo_Multi1 proto.InternalMessageInfo + +func (m *Multi1) GetMulti2() *Multi2 { + if m != nil { + return m.Multi2 + } + return nil +} + +func (m *Multi1) GetColor() Multi2_Color { + if m != nil && m.Color != nil { + return *m.Color + } + return Multi2_BLUE +} + +func (m *Multi1) GetHatType() Multi3_HatType { + if m != nil && m.HatType != nil { + return *m.HatType + } + return Multi3_FEDORA +} + +func init() { + proto.RegisterType((*Multi1)(nil), "multitest.Multi1") +} + +func init() { proto.RegisterFile("multi/multi1.proto", fileDescriptor_multi1_08e50c6822e808b8) } + +var fileDescriptor_multi1_08e50c6822e808b8 = []byte{ + // 200 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0xca, 0x2d, 0xcd, 0x29, + 0xc9, 0xd4, 0x07, 0x93, 0x86, 0x7a, 0x05, 0x45, 0xf9, 0x25, 0xf9, 0x42, 0x9c, 0x60, 0x5e, 0x49, + 0x6a, 0x71, 0x89, 0x14, 0xb2, 0xb4, 0x11, 0x44, 0x1a, 0x45, 0xcc, 0x18, 0x22, 0xa6, 0x34, 0x83, + 0x91, 0x8b, 0xcd, 0x17, 0x6c, 0x86, 0x90, 0x26, 0x17, 0x1b, 0x44, 0xb9, 0x04, 0xa3, 0x02, 0x93, + 0x06, 0xb7, 0x91, 0xa0, 0x1e, 0xdc, 0x38, 0x3d, 0xb0, 0x12, 0xa3, 0x20, 0xa8, 0x02, 0x21, 0x5d, + 0x2e, 0xd6, 0xe4, 0xfc, 0x9c, 0xfc, 0x22, 0x09, 0x26, 0x05, 0x46, 0x0d, 0x3e, 0x23, 0x71, 0x0c, + 0x95, 0x7a, 0xce, 0x20, 0xe9, 0x20, 0x88, 0x2a, 0x21, 0x13, 0x2e, 0x8e, 0x8c, 0xc4, 0x92, 0xf8, + 0x92, 0xca, 0x82, 0x54, 0x09, 0x66, 0xb0, 0x0e, 0x49, 0x74, 0x1d, 0xc6, 0x7a, 0x1e, 0x89, 0x25, + 0x21, 0x95, 0x05, 0xa9, 0x41, 0xec, 0x19, 0x10, 0x86, 0x93, 0x73, 0x94, 0x63, 0x7a, 0x66, 0x49, + 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x7e, 0x7a, 0x7e, 0x4e, 0x62, 0x5e, 0xba, 0x3e, 0xd8, + 0xd5, 0x49, 0xa5, 0x69, 0x10, 0x46, 0xb2, 0x6e, 0x7a, 0x6a, 0x9e, 0x6e, 0x7a, 0xbe, 0x3e, 0xc8, + 0xa0, 0x94, 0xc4, 0x92, 0x44, 0x88, 0xe7, 0xac, 0xe1, 0x86, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, + 0x60, 0x7d, 0xfc, 0x9f, 0x27, 0x01, 0x00, 0x00, +} diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi1.proto b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi1.proto new file mode 100644 index 0000000..d3a3204 --- /dev/null +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi1.proto @@ -0,0 +1,46 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto2"; + +import "multi/multi2.proto"; +import "multi/multi3.proto"; + +package multitest; + +option go_package = "github.com/golang/protobuf/protoc-gen-go/testdata/multi;multitest"; + +message Multi1 { + required Multi2 multi2 = 1; + optional Multi2.Color color = 2; + optional Multi3.HatType hat_type = 3; +} + diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi2.pb.go b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi2.pb.go new file mode 100644 index 0000000..b66ce79 --- /dev/null +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi2.pb.go @@ -0,0 +1,128 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: multi/multi2.proto + +package multitest // import "github.com/golang/protobuf/protoc-gen-go/testdata/multi" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type Multi2_Color int32 + +const ( + Multi2_BLUE Multi2_Color = 1 + Multi2_GREEN Multi2_Color = 2 + Multi2_RED Multi2_Color = 3 +) + +var Multi2_Color_name = map[int32]string{ + 1: "BLUE", + 2: "GREEN", + 3: "RED", +} +var Multi2_Color_value = map[string]int32{ + "BLUE": 1, + "GREEN": 2, + "RED": 3, +} + +func (x Multi2_Color) Enum() *Multi2_Color { + p := new(Multi2_Color) + *p = x + return p +} +func (x Multi2_Color) String() string { + return proto.EnumName(Multi2_Color_name, int32(x)) +} +func (x *Multi2_Color) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Multi2_Color_value, data, "Multi2_Color") + if err != nil { + return err + } + *x = Multi2_Color(value) + return nil +} +func (Multi2_Color) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_multi2_c47490ad66d93e67, []int{0, 0} +} + +type Multi2 struct { + RequiredValue *int32 `protobuf:"varint,1,req,name=required_value,json=requiredValue" json:"required_value,omitempty"` + Color *Multi2_Color `protobuf:"varint,2,opt,name=color,enum=multitest.Multi2_Color" json:"color,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Multi2) Reset() { *m = Multi2{} } +func (m *Multi2) String() string { return proto.CompactTextString(m) } +func (*Multi2) ProtoMessage() {} +func (*Multi2) Descriptor() ([]byte, []int) { + return fileDescriptor_multi2_c47490ad66d93e67, []int{0} +} +func (m *Multi2) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Multi2.Unmarshal(m, b) +} +func (m *Multi2) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Multi2.Marshal(b, m, deterministic) +} +func (dst *Multi2) XXX_Merge(src proto.Message) { + xxx_messageInfo_Multi2.Merge(dst, src) +} +func (m *Multi2) XXX_Size() int { + return xxx_messageInfo_Multi2.Size(m) +} +func (m *Multi2) XXX_DiscardUnknown() { + xxx_messageInfo_Multi2.DiscardUnknown(m) +} + +var xxx_messageInfo_Multi2 proto.InternalMessageInfo + +func (m *Multi2) GetRequiredValue() int32 { + if m != nil && m.RequiredValue != nil { + return *m.RequiredValue + } + return 0 +} + +func (m *Multi2) GetColor() Multi2_Color { + if m != nil && m.Color != nil { + return *m.Color + } + return Multi2_BLUE +} + +func init() { + proto.RegisterType((*Multi2)(nil), "multitest.Multi2") + proto.RegisterEnum("multitest.Multi2_Color", Multi2_Color_name, Multi2_Color_value) +} + +func init() { proto.RegisterFile("multi/multi2.proto", fileDescriptor_multi2_c47490ad66d93e67) } + +var fileDescriptor_multi2_c47490ad66d93e67 = []byte{ + // 202 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0xca, 0x2d, 0xcd, 0x29, + 0xc9, 0xd4, 0x07, 0x93, 0x46, 0x7a, 0x05, 0x45, 0xf9, 0x25, 0xf9, 0x42, 0x9c, 0x60, 0x5e, 0x49, + 0x6a, 0x71, 0x89, 0x52, 0x2b, 0x23, 0x17, 0x9b, 0x2f, 0x58, 0x4e, 0x48, 0x95, 0x8b, 0xaf, 0x28, + 0xb5, 0xb0, 0x34, 0xb3, 0x28, 0x35, 0x25, 0xbe, 0x2c, 0x31, 0xa7, 0x34, 0x55, 0x82, 0x51, 0x81, + 0x49, 0x83, 0x35, 0x88, 0x17, 0x26, 0x1a, 0x06, 0x12, 0x14, 0xd2, 0xe5, 0x62, 0x4d, 0xce, 0xcf, + 0xc9, 0x2f, 0x92, 0x60, 0x52, 0x60, 0xd4, 0xe0, 0x33, 0x12, 0xd7, 0x83, 0x1b, 0xa6, 0x07, 0x31, + 0x48, 0xcf, 0x19, 0x24, 0x1d, 0x04, 0x51, 0xa5, 0xa4, 0xca, 0xc5, 0x0a, 0xe6, 0x0b, 0x71, 0x70, + 0xb1, 0x38, 0xf9, 0x84, 0xba, 0x0a, 0x30, 0x0a, 0x71, 0x72, 0xb1, 0xba, 0x07, 0xb9, 0xba, 0xfa, + 0x09, 0x30, 0x09, 0xb1, 0x73, 0x31, 0x07, 0xb9, 0xba, 0x08, 0x30, 0x3b, 0x39, 0x47, 0x39, 0xa6, + 0x67, 0x96, 0x64, 0x94, 0x26, 0xe9, 0x25, 0xe7, 0xe7, 0xea, 0xa7, 0xe7, 0xe7, 0x24, 0xe6, 0xa5, + 0xeb, 0x83, 0x5d, 0x9b, 0x54, 0x9a, 0x06, 0x61, 0x24, 0xeb, 0xa6, 0xa7, 0xe6, 0xe9, 0xa6, 0xe7, + 0xeb, 0x83, 0xec, 0x4a, 0x49, 0x2c, 0x49, 0x84, 0x78, 0xca, 0x1a, 0x6e, 0x3f, 0x20, 0x00, 0x00, + 0xff, 0xff, 0x49, 0x3b, 0x52, 0x44, 0xec, 0x00, 0x00, 0x00, +} diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi2.proto b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi2.proto new file mode 100644 index 0000000..ec5b431 --- /dev/null +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi2.proto @@ -0,0 +1,48 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto2"; + +package multitest; + +option go_package = "github.com/golang/protobuf/protoc-gen-go/testdata/multi;multitest"; + +message Multi2 { + required int32 required_value = 1; + + enum Color { + BLUE = 1; + GREEN = 2; + RED = 3; + }; + optional Color color = 2; +} + diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi3.pb.go b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi3.pb.go new file mode 100644 index 0000000..f03c350 --- /dev/null +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi3.pb.go @@ -0,0 +1,115 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: multi/multi3.proto + +package multitest // import "github.com/golang/protobuf/protoc-gen-go/testdata/multi" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type Multi3_HatType int32 + +const ( + Multi3_FEDORA Multi3_HatType = 1 + Multi3_FEZ Multi3_HatType = 2 +) + +var Multi3_HatType_name = map[int32]string{ + 1: "FEDORA", + 2: "FEZ", +} +var Multi3_HatType_value = map[string]int32{ + "FEDORA": 1, + "FEZ": 2, +} + +func (x Multi3_HatType) Enum() *Multi3_HatType { + p := new(Multi3_HatType) + *p = x + return p +} +func (x Multi3_HatType) String() string { + return proto.EnumName(Multi3_HatType_name, int32(x)) +} +func (x *Multi3_HatType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Multi3_HatType_value, data, "Multi3_HatType") + if err != nil { + return err + } + *x = Multi3_HatType(value) + return nil +} +func (Multi3_HatType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_multi3_d55a72b4628b7875, []int{0, 0} +} + +type Multi3 struct { + HatType *Multi3_HatType `protobuf:"varint,1,opt,name=hat_type,json=hatType,enum=multitest.Multi3_HatType" json:"hat_type,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Multi3) Reset() { *m = Multi3{} } +func (m *Multi3) String() string { return proto.CompactTextString(m) } +func (*Multi3) ProtoMessage() {} +func (*Multi3) Descriptor() ([]byte, []int) { + return fileDescriptor_multi3_d55a72b4628b7875, []int{0} +} +func (m *Multi3) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Multi3.Unmarshal(m, b) +} +func (m *Multi3) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Multi3.Marshal(b, m, deterministic) +} +func (dst *Multi3) XXX_Merge(src proto.Message) { + xxx_messageInfo_Multi3.Merge(dst, src) +} +func (m *Multi3) XXX_Size() int { + return xxx_messageInfo_Multi3.Size(m) +} +func (m *Multi3) XXX_DiscardUnknown() { + xxx_messageInfo_Multi3.DiscardUnknown(m) +} + +var xxx_messageInfo_Multi3 proto.InternalMessageInfo + +func (m *Multi3) GetHatType() Multi3_HatType { + if m != nil && m.HatType != nil { + return *m.HatType + } + return Multi3_FEDORA +} + +func init() { + proto.RegisterType((*Multi3)(nil), "multitest.Multi3") + proto.RegisterEnum("multitest.Multi3_HatType", Multi3_HatType_name, Multi3_HatType_value) +} + +func init() { proto.RegisterFile("multi/multi3.proto", fileDescriptor_multi3_d55a72b4628b7875) } + +var fileDescriptor_multi3_d55a72b4628b7875 = []byte{ + // 170 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0xca, 0x2d, 0xcd, 0x29, + 0xc9, 0xd4, 0x07, 0x93, 0xc6, 0x7a, 0x05, 0x45, 0xf9, 0x25, 0xf9, 0x42, 0x9c, 0x60, 0x5e, 0x49, + 0x6a, 0x71, 0x89, 0x52, 0x1c, 0x17, 0x9b, 0x2f, 0x58, 0x4a, 0xc8, 0x84, 0x8b, 0x23, 0x23, 0xb1, + 0x24, 0xbe, 0xa4, 0xb2, 0x20, 0x55, 0x82, 0x51, 0x81, 0x51, 0x83, 0xcf, 0x48, 0x52, 0x0f, 0xae, + 0x4e, 0x0f, 0xa2, 0x48, 0xcf, 0x23, 0xb1, 0x24, 0xa4, 0xb2, 0x20, 0x35, 0x88, 0x3d, 0x03, 0xc2, + 0x50, 0x92, 0xe3, 0x62, 0x87, 0x8a, 0x09, 0x71, 0x71, 0xb1, 0xb9, 0xb9, 0xba, 0xf8, 0x07, 0x39, + 0x0a, 0x30, 0x0a, 0xb1, 0x73, 0x31, 0xbb, 0xb9, 0x46, 0x09, 0x30, 0x39, 0x39, 0x47, 0x39, 0xa6, + 0x67, 0x96, 0x64, 0x94, 0x26, 0xe9, 0x25, 0xe7, 0xe7, 0xea, 0xa7, 0xe7, 0xe7, 0x24, 0xe6, 0xa5, + 0xeb, 0x83, 0x5d, 0x91, 0x54, 0x9a, 0x06, 0x61, 0x24, 0xeb, 0xa6, 0xa7, 0xe6, 0xe9, 0xa6, 0xe7, + 0xeb, 0x83, 0x2c, 0x4a, 0x49, 0x2c, 0x49, 0x84, 0x38, 0xd6, 0x1a, 0x6e, 0x39, 0x20, 0x00, 0x00, + 0xff, 0xff, 0xd5, 0xa4, 0x1a, 0x0e, 0xc4, 0x00, 0x00, 0x00, +} diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi3.proto b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi3.proto new file mode 100644 index 0000000..8690b88 --- /dev/null +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi3.proto @@ -0,0 +1,45 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto2"; + +package multitest; + +option go_package = "github.com/golang/protobuf/protoc-gen-go/testdata/multi;multitest"; + +message Multi3 { + enum HatType { + FEDORA = 1; + FEZ = 2; + }; + optional HatType hat_type = 1; +} + diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go new file mode 100644 index 0000000..8cf6a69 --- /dev/null +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go @@ -0,0 +1,1174 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: my_test/test.proto + +package test // import "github.com/golang/protobuf/protoc-gen-go/testdata/my_test" + +/* +This package holds interesting messages. +*/ + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/golang/protobuf/protoc-gen-go/testdata/multi" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type HatType int32 + +const ( + // deliberately skipping 0 + HatType_FEDORA HatType = 1 + HatType_FEZ HatType = 2 +) + +var HatType_name = map[int32]string{ + 1: "FEDORA", + 2: "FEZ", +} +var HatType_value = map[string]int32{ + "FEDORA": 1, + "FEZ": 2, +} + +func (x HatType) Enum() *HatType { + p := new(HatType) + *p = x + return p +} +func (x HatType) String() string { + return proto.EnumName(HatType_name, int32(x)) +} +func (x *HatType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(HatType_value, data, "HatType") + if err != nil { + return err + } + *x = HatType(value) + return nil +} +func (HatType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_test_2309d445eee26af7, []int{0} +} + +// This enum represents days of the week. +type Days int32 + +const ( + Days_MONDAY Days = 1 + Days_TUESDAY Days = 2 + Days_LUNDI Days = 1 +) + +var Days_name = map[int32]string{ + 1: "MONDAY", + 2: "TUESDAY", + // Duplicate value: 1: "LUNDI", +} +var Days_value = map[string]int32{ + "MONDAY": 1, + "TUESDAY": 2, + "LUNDI": 1, +} + +func (x Days) Enum() *Days { + p := new(Days) + *p = x + return p +} +func (x Days) String() string { + return proto.EnumName(Days_name, int32(x)) +} +func (x *Days) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Days_value, data, "Days") + if err != nil { + return err + } + *x = Days(value) + return nil +} +func (Days) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_test_2309d445eee26af7, []int{1} +} + +type Request_Color int32 + +const ( + Request_RED Request_Color = 0 + Request_GREEN Request_Color = 1 + Request_BLUE Request_Color = 2 +) + +var Request_Color_name = map[int32]string{ + 0: "RED", + 1: "GREEN", + 2: "BLUE", +} +var Request_Color_value = map[string]int32{ + "RED": 0, + "GREEN": 1, + "BLUE": 2, +} + +func (x Request_Color) Enum() *Request_Color { + p := new(Request_Color) + *p = x + return p +} +func (x Request_Color) String() string { + return proto.EnumName(Request_Color_name, int32(x)) +} +func (x *Request_Color) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Request_Color_value, data, "Request_Color") + if err != nil { + return err + } + *x = Request_Color(value) + return nil +} +func (Request_Color) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_test_2309d445eee26af7, []int{0, 0} +} + +type Reply_Entry_Game int32 + +const ( + Reply_Entry_FOOTBALL Reply_Entry_Game = 1 + Reply_Entry_TENNIS Reply_Entry_Game = 2 +) + +var Reply_Entry_Game_name = map[int32]string{ + 1: "FOOTBALL", + 2: "TENNIS", +} +var Reply_Entry_Game_value = map[string]int32{ + "FOOTBALL": 1, + "TENNIS": 2, +} + +func (x Reply_Entry_Game) Enum() *Reply_Entry_Game { + p := new(Reply_Entry_Game) + *p = x + return p +} +func (x Reply_Entry_Game) String() string { + return proto.EnumName(Reply_Entry_Game_name, int32(x)) +} +func (x *Reply_Entry_Game) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Reply_Entry_Game_value, data, "Reply_Entry_Game") + if err != nil { + return err + } + *x = Reply_Entry_Game(value) + return nil +} +func (Reply_Entry_Game) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_test_2309d445eee26af7, []int{1, 0, 0} +} + +// This is a message that might be sent somewhere. +type Request struct { + Key []int64 `protobuf:"varint,1,rep,name=key" json:"key,omitempty"` + // optional imp.ImportedMessage imported_message = 2; + Hue *Request_Color `protobuf:"varint,3,opt,name=hue,enum=my.test.Request_Color" json:"hue,omitempty"` + Hat *HatType `protobuf:"varint,4,opt,name=hat,enum=my.test.HatType,def=1" json:"hat,omitempty"` + // optional imp.ImportedMessage.Owner owner = 6; + Deadline *float32 `protobuf:"fixed32,7,opt,name=deadline,def=inf" json:"deadline,omitempty"` + Somegroup *Request_SomeGroup `protobuf:"group,8,opt,name=SomeGroup,json=somegroup" json:"somegroup,omitempty"` + // This is a map field. It will generate map[int32]string. + NameMapping map[int32]string `protobuf:"bytes,14,rep,name=name_mapping,json=nameMapping" json:"name_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // This is a map field whose value type is a message. + MsgMapping map[int64]*Reply `protobuf:"bytes,15,rep,name=msg_mapping,json=msgMapping" json:"msg_mapping,omitempty" protobuf_key:"zigzag64,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Reset_ *int32 `protobuf:"varint,12,opt,name=reset" json:"reset,omitempty"` + // This field should not conflict with any getters. + GetKey_ *string `protobuf:"bytes,16,opt,name=get_key,json=getKey" json:"get_key,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Request) Reset() { *m = Request{} } +func (m *Request) String() string { return proto.CompactTextString(m) } +func (*Request) ProtoMessage() {} +func (*Request) Descriptor() ([]byte, []int) { + return fileDescriptor_test_2309d445eee26af7, []int{0} +} +func (m *Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Request.Unmarshal(m, b) +} +func (m *Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Request.Marshal(b, m, deterministic) +} +func (dst *Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_Request.Merge(dst, src) +} +func (m *Request) XXX_Size() int { + return xxx_messageInfo_Request.Size(m) +} +func (m *Request) XXX_DiscardUnknown() { + xxx_messageInfo_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_Request proto.InternalMessageInfo + +const Default_Request_Hat HatType = HatType_FEDORA + +var Default_Request_Deadline float32 = float32(math.Inf(1)) + +func (m *Request) GetKey() []int64 { + if m != nil { + return m.Key + } + return nil +} + +func (m *Request) GetHue() Request_Color { + if m != nil && m.Hue != nil { + return *m.Hue + } + return Request_RED +} + +func (m *Request) GetHat() HatType { + if m != nil && m.Hat != nil { + return *m.Hat + } + return Default_Request_Hat +} + +func (m *Request) GetDeadline() float32 { + if m != nil && m.Deadline != nil { + return *m.Deadline + } + return Default_Request_Deadline +} + +func (m *Request) GetSomegroup() *Request_SomeGroup { + if m != nil { + return m.Somegroup + } + return nil +} + +func (m *Request) GetNameMapping() map[int32]string { + if m != nil { + return m.NameMapping + } + return nil +} + +func (m *Request) GetMsgMapping() map[int64]*Reply { + if m != nil { + return m.MsgMapping + } + return nil +} + +func (m *Request) GetReset_() int32 { + if m != nil && m.Reset_ != nil { + return *m.Reset_ + } + return 0 +} + +func (m *Request) GetGetKey_() string { + if m != nil && m.GetKey_ != nil { + return *m.GetKey_ + } + return "" +} + +type Request_SomeGroup struct { + GroupField *int32 `protobuf:"varint,9,opt,name=group_field,json=groupField" json:"group_field,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Request_SomeGroup) Reset() { *m = Request_SomeGroup{} } +func (m *Request_SomeGroup) String() string { return proto.CompactTextString(m) } +func (*Request_SomeGroup) ProtoMessage() {} +func (*Request_SomeGroup) Descriptor() ([]byte, []int) { + return fileDescriptor_test_2309d445eee26af7, []int{0, 0} +} +func (m *Request_SomeGroup) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Request_SomeGroup.Unmarshal(m, b) +} +func (m *Request_SomeGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Request_SomeGroup.Marshal(b, m, deterministic) +} +func (dst *Request_SomeGroup) XXX_Merge(src proto.Message) { + xxx_messageInfo_Request_SomeGroup.Merge(dst, src) +} +func (m *Request_SomeGroup) XXX_Size() int { + return xxx_messageInfo_Request_SomeGroup.Size(m) +} +func (m *Request_SomeGroup) XXX_DiscardUnknown() { + xxx_messageInfo_Request_SomeGroup.DiscardUnknown(m) +} + +var xxx_messageInfo_Request_SomeGroup proto.InternalMessageInfo + +func (m *Request_SomeGroup) GetGroupField() int32 { + if m != nil && m.GroupField != nil { + return *m.GroupField + } + return 0 +} + +type Reply struct { + Found []*Reply_Entry `protobuf:"bytes,1,rep,name=found" json:"found,omitempty"` + CompactKeys []int32 `protobuf:"varint,2,rep,packed,name=compact_keys,json=compactKeys" json:"compact_keys,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Reply) Reset() { *m = Reply{} } +func (m *Reply) String() string { return proto.CompactTextString(m) } +func (*Reply) ProtoMessage() {} +func (*Reply) Descriptor() ([]byte, []int) { + return fileDescriptor_test_2309d445eee26af7, []int{1} +} + +var extRange_Reply = []proto.ExtensionRange{ + {Start: 100, End: 536870911}, +} + +func (*Reply) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_Reply +} +func (m *Reply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Reply.Unmarshal(m, b) +} +func (m *Reply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Reply.Marshal(b, m, deterministic) +} +func (dst *Reply) XXX_Merge(src proto.Message) { + xxx_messageInfo_Reply.Merge(dst, src) +} +func (m *Reply) XXX_Size() int { + return xxx_messageInfo_Reply.Size(m) +} +func (m *Reply) XXX_DiscardUnknown() { + xxx_messageInfo_Reply.DiscardUnknown(m) +} + +var xxx_messageInfo_Reply proto.InternalMessageInfo + +func (m *Reply) GetFound() []*Reply_Entry { + if m != nil { + return m.Found + } + return nil +} + +func (m *Reply) GetCompactKeys() []int32 { + if m != nil { + return m.CompactKeys + } + return nil +} + +type Reply_Entry struct { + KeyThatNeeds_1234Camel_CasIng *int64 `protobuf:"varint,1,req,name=key_that_needs_1234camel_CasIng,json=keyThatNeeds1234camelCasIng" json:"key_that_needs_1234camel_CasIng,omitempty"` + Value *int64 `protobuf:"varint,2,opt,name=value,def=7" json:"value,omitempty"` + XMyFieldName_2 *int64 `protobuf:"varint,3,opt,name=_my_field_name_2,json=MyFieldName2" json:"_my_field_name_2,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Reply_Entry) Reset() { *m = Reply_Entry{} } +func (m *Reply_Entry) String() string { return proto.CompactTextString(m) } +func (*Reply_Entry) ProtoMessage() {} +func (*Reply_Entry) Descriptor() ([]byte, []int) { + return fileDescriptor_test_2309d445eee26af7, []int{1, 0} +} +func (m *Reply_Entry) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Reply_Entry.Unmarshal(m, b) +} +func (m *Reply_Entry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Reply_Entry.Marshal(b, m, deterministic) +} +func (dst *Reply_Entry) XXX_Merge(src proto.Message) { + xxx_messageInfo_Reply_Entry.Merge(dst, src) +} +func (m *Reply_Entry) XXX_Size() int { + return xxx_messageInfo_Reply_Entry.Size(m) +} +func (m *Reply_Entry) XXX_DiscardUnknown() { + xxx_messageInfo_Reply_Entry.DiscardUnknown(m) +} + +var xxx_messageInfo_Reply_Entry proto.InternalMessageInfo + +const Default_Reply_Entry_Value int64 = 7 + +func (m *Reply_Entry) GetKeyThatNeeds_1234Camel_CasIng() int64 { + if m != nil && m.KeyThatNeeds_1234Camel_CasIng != nil { + return *m.KeyThatNeeds_1234Camel_CasIng + } + return 0 +} + +func (m *Reply_Entry) GetValue() int64 { + if m != nil && m.Value != nil { + return *m.Value + } + return Default_Reply_Entry_Value +} + +func (m *Reply_Entry) GetXMyFieldName_2() int64 { + if m != nil && m.XMyFieldName_2 != nil { + return *m.XMyFieldName_2 + } + return 0 +} + +type OtherBase struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *OtherBase) Reset() { *m = OtherBase{} } +func (m *OtherBase) String() string { return proto.CompactTextString(m) } +func (*OtherBase) ProtoMessage() {} +func (*OtherBase) Descriptor() ([]byte, []int) { + return fileDescriptor_test_2309d445eee26af7, []int{2} +} + +var extRange_OtherBase = []proto.ExtensionRange{ + {Start: 100, End: 536870911}, +} + +func (*OtherBase) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_OtherBase +} +func (m *OtherBase) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OtherBase.Unmarshal(m, b) +} +func (m *OtherBase) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OtherBase.Marshal(b, m, deterministic) +} +func (dst *OtherBase) XXX_Merge(src proto.Message) { + xxx_messageInfo_OtherBase.Merge(dst, src) +} +func (m *OtherBase) XXX_Size() int { + return xxx_messageInfo_OtherBase.Size(m) +} +func (m *OtherBase) XXX_DiscardUnknown() { + xxx_messageInfo_OtherBase.DiscardUnknown(m) +} + +var xxx_messageInfo_OtherBase proto.InternalMessageInfo + +func (m *OtherBase) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +type ReplyExtensions struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReplyExtensions) Reset() { *m = ReplyExtensions{} } +func (m *ReplyExtensions) String() string { return proto.CompactTextString(m) } +func (*ReplyExtensions) ProtoMessage() {} +func (*ReplyExtensions) Descriptor() ([]byte, []int) { + return fileDescriptor_test_2309d445eee26af7, []int{3} +} +func (m *ReplyExtensions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReplyExtensions.Unmarshal(m, b) +} +func (m *ReplyExtensions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReplyExtensions.Marshal(b, m, deterministic) +} +func (dst *ReplyExtensions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplyExtensions.Merge(dst, src) +} +func (m *ReplyExtensions) XXX_Size() int { + return xxx_messageInfo_ReplyExtensions.Size(m) +} +func (m *ReplyExtensions) XXX_DiscardUnknown() { + xxx_messageInfo_ReplyExtensions.DiscardUnknown(m) +} + +var xxx_messageInfo_ReplyExtensions proto.InternalMessageInfo + +var E_ReplyExtensions_Time = &proto.ExtensionDesc{ + ExtendedType: (*Reply)(nil), + ExtensionType: (*float64)(nil), + Field: 101, + Name: "my.test.ReplyExtensions.time", + Tag: "fixed64,101,opt,name=time", + Filename: "my_test/test.proto", +} + +var E_ReplyExtensions_Carrot = &proto.ExtensionDesc{ + ExtendedType: (*Reply)(nil), + ExtensionType: (*ReplyExtensions)(nil), + Field: 105, + Name: "my.test.ReplyExtensions.carrot", + Tag: "bytes,105,opt,name=carrot", + Filename: "my_test/test.proto", +} + +var E_ReplyExtensions_Donut = &proto.ExtensionDesc{ + ExtendedType: (*OtherBase)(nil), + ExtensionType: (*ReplyExtensions)(nil), + Field: 101, + Name: "my.test.ReplyExtensions.donut", + Tag: "bytes,101,opt,name=donut", + Filename: "my_test/test.proto", +} + +type OtherReplyExtensions struct { + Key *int32 `protobuf:"varint,1,opt,name=key" json:"key,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *OtherReplyExtensions) Reset() { *m = OtherReplyExtensions{} } +func (m *OtherReplyExtensions) String() string { return proto.CompactTextString(m) } +func (*OtherReplyExtensions) ProtoMessage() {} +func (*OtherReplyExtensions) Descriptor() ([]byte, []int) { + return fileDescriptor_test_2309d445eee26af7, []int{4} +} +func (m *OtherReplyExtensions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OtherReplyExtensions.Unmarshal(m, b) +} +func (m *OtherReplyExtensions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OtherReplyExtensions.Marshal(b, m, deterministic) +} +func (dst *OtherReplyExtensions) XXX_Merge(src proto.Message) { + xxx_messageInfo_OtherReplyExtensions.Merge(dst, src) +} +func (m *OtherReplyExtensions) XXX_Size() int { + return xxx_messageInfo_OtherReplyExtensions.Size(m) +} +func (m *OtherReplyExtensions) XXX_DiscardUnknown() { + xxx_messageInfo_OtherReplyExtensions.DiscardUnknown(m) +} + +var xxx_messageInfo_OtherReplyExtensions proto.InternalMessageInfo + +func (m *OtherReplyExtensions) GetKey() int32 { + if m != nil && m.Key != nil { + return *m.Key + } + return 0 +} + +type OldReply struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `protobuf_messageset:"1" json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *OldReply) Reset() { *m = OldReply{} } +func (m *OldReply) String() string { return proto.CompactTextString(m) } +func (*OldReply) ProtoMessage() {} +func (*OldReply) Descriptor() ([]byte, []int) { + return fileDescriptor_test_2309d445eee26af7, []int{5} +} + +func (m *OldReply) MarshalJSON() ([]byte, error) { + return proto.MarshalMessageSetJSON(&m.XXX_InternalExtensions) +} +func (m *OldReply) UnmarshalJSON(buf []byte) error { + return proto.UnmarshalMessageSetJSON(buf, &m.XXX_InternalExtensions) +} + +var extRange_OldReply = []proto.ExtensionRange{ + {Start: 100, End: 2147483646}, +} + +func (*OldReply) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_OldReply +} +func (m *OldReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OldReply.Unmarshal(m, b) +} +func (m *OldReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OldReply.Marshal(b, m, deterministic) +} +func (dst *OldReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_OldReply.Merge(dst, src) +} +func (m *OldReply) XXX_Size() int { + return xxx_messageInfo_OldReply.Size(m) +} +func (m *OldReply) XXX_DiscardUnknown() { + xxx_messageInfo_OldReply.DiscardUnknown(m) +} + +var xxx_messageInfo_OldReply proto.InternalMessageInfo + +type Communique struct { + MakeMeCry *bool `protobuf:"varint,1,opt,name=make_me_cry,json=makeMeCry" json:"make_me_cry,omitempty"` + // This is a oneof, called "union". + // + // Types that are valid to be assigned to Union: + // *Communique_Number + // *Communique_Name + // *Communique_Data + // *Communique_TempC + // *Communique_Height + // *Communique_Today + // *Communique_Maybe + // *Communique_Delta_ + // *Communique_Msg + // *Communique_Somegroup + Union isCommunique_Union `protobuf_oneof:"union"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Communique) Reset() { *m = Communique{} } +func (m *Communique) String() string { return proto.CompactTextString(m) } +func (*Communique) ProtoMessage() {} +func (*Communique) Descriptor() ([]byte, []int) { + return fileDescriptor_test_2309d445eee26af7, []int{6} +} +func (m *Communique) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Communique.Unmarshal(m, b) +} +func (m *Communique) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Communique.Marshal(b, m, deterministic) +} +func (dst *Communique) XXX_Merge(src proto.Message) { + xxx_messageInfo_Communique.Merge(dst, src) +} +func (m *Communique) XXX_Size() int { + return xxx_messageInfo_Communique.Size(m) +} +func (m *Communique) XXX_DiscardUnknown() { + xxx_messageInfo_Communique.DiscardUnknown(m) +} + +var xxx_messageInfo_Communique proto.InternalMessageInfo + +type isCommunique_Union interface { + isCommunique_Union() +} + +type Communique_Number struct { + Number int32 `protobuf:"varint,5,opt,name=number,oneof"` +} +type Communique_Name struct { + Name string `protobuf:"bytes,6,opt,name=name,oneof"` +} +type Communique_Data struct { + Data []byte `protobuf:"bytes,7,opt,name=data,oneof"` +} +type Communique_TempC struct { + TempC float64 `protobuf:"fixed64,8,opt,name=temp_c,json=tempC,oneof"` +} +type Communique_Height struct { + Height float32 `protobuf:"fixed32,9,opt,name=height,oneof"` +} +type Communique_Today struct { + Today Days `protobuf:"varint,10,opt,name=today,enum=my.test.Days,oneof"` +} +type Communique_Maybe struct { + Maybe bool `protobuf:"varint,11,opt,name=maybe,oneof"` +} +type Communique_Delta_ struct { + Delta int32 `protobuf:"zigzag32,12,opt,name=delta,oneof"` +} +type Communique_Msg struct { + Msg *Reply `protobuf:"bytes,16,opt,name=msg,oneof"` +} +type Communique_Somegroup struct { + Somegroup *Communique_SomeGroup `protobuf:"group,14,opt,name=SomeGroup,json=somegroup,oneof"` +} + +func (*Communique_Number) isCommunique_Union() {} +func (*Communique_Name) isCommunique_Union() {} +func (*Communique_Data) isCommunique_Union() {} +func (*Communique_TempC) isCommunique_Union() {} +func (*Communique_Height) isCommunique_Union() {} +func (*Communique_Today) isCommunique_Union() {} +func (*Communique_Maybe) isCommunique_Union() {} +func (*Communique_Delta_) isCommunique_Union() {} +func (*Communique_Msg) isCommunique_Union() {} +func (*Communique_Somegroup) isCommunique_Union() {} + +func (m *Communique) GetUnion() isCommunique_Union { + if m != nil { + return m.Union + } + return nil +} + +func (m *Communique) GetMakeMeCry() bool { + if m != nil && m.MakeMeCry != nil { + return *m.MakeMeCry + } + return false +} + +func (m *Communique) GetNumber() int32 { + if x, ok := m.GetUnion().(*Communique_Number); ok { + return x.Number + } + return 0 +} + +func (m *Communique) GetName() string { + if x, ok := m.GetUnion().(*Communique_Name); ok { + return x.Name + } + return "" +} + +func (m *Communique) GetData() []byte { + if x, ok := m.GetUnion().(*Communique_Data); ok { + return x.Data + } + return nil +} + +func (m *Communique) GetTempC() float64 { + if x, ok := m.GetUnion().(*Communique_TempC); ok { + return x.TempC + } + return 0 +} + +func (m *Communique) GetHeight() float32 { + if x, ok := m.GetUnion().(*Communique_Height); ok { + return x.Height + } + return 0 +} + +func (m *Communique) GetToday() Days { + if x, ok := m.GetUnion().(*Communique_Today); ok { + return x.Today + } + return Days_MONDAY +} + +func (m *Communique) GetMaybe() bool { + if x, ok := m.GetUnion().(*Communique_Maybe); ok { + return x.Maybe + } + return false +} + +func (m *Communique) GetDelta() int32 { + if x, ok := m.GetUnion().(*Communique_Delta_); ok { + return x.Delta + } + return 0 +} + +func (m *Communique) GetMsg() *Reply { + if x, ok := m.GetUnion().(*Communique_Msg); ok { + return x.Msg + } + return nil +} + +func (m *Communique) GetSomegroup() *Communique_SomeGroup { + if x, ok := m.GetUnion().(*Communique_Somegroup); ok { + return x.Somegroup + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Communique) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Communique_OneofMarshaler, _Communique_OneofUnmarshaler, _Communique_OneofSizer, []interface{}{ + (*Communique_Number)(nil), + (*Communique_Name)(nil), + (*Communique_Data)(nil), + (*Communique_TempC)(nil), + (*Communique_Height)(nil), + (*Communique_Today)(nil), + (*Communique_Maybe)(nil), + (*Communique_Delta_)(nil), + (*Communique_Msg)(nil), + (*Communique_Somegroup)(nil), + } +} + +func _Communique_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Communique) + // union + switch x := m.Union.(type) { + case *Communique_Number: + b.EncodeVarint(5<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.Number)) + case *Communique_Name: + b.EncodeVarint(6<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Name) + case *Communique_Data: + b.EncodeVarint(7<<3 | proto.WireBytes) + b.EncodeRawBytes(x.Data) + case *Communique_TempC: + b.EncodeVarint(8<<3 | proto.WireFixed64) + b.EncodeFixed64(math.Float64bits(x.TempC)) + case *Communique_Height: + b.EncodeVarint(9<<3 | proto.WireFixed32) + b.EncodeFixed32(uint64(math.Float32bits(x.Height))) + case *Communique_Today: + b.EncodeVarint(10<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.Today)) + case *Communique_Maybe: + t := uint64(0) + if x.Maybe { + t = 1 + } + b.EncodeVarint(11<<3 | proto.WireVarint) + b.EncodeVarint(t) + case *Communique_Delta_: + b.EncodeVarint(12<<3 | proto.WireVarint) + b.EncodeZigzag32(uint64(x.Delta)) + case *Communique_Msg: + b.EncodeVarint(16<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Msg); err != nil { + return err + } + case *Communique_Somegroup: + b.EncodeVarint(14<<3 | proto.WireStartGroup) + if err := b.Marshal(x.Somegroup); err != nil { + return err + } + b.EncodeVarint(14<<3 | proto.WireEndGroup) + case nil: + default: + return fmt.Errorf("Communique.Union has unexpected type %T", x) + } + return nil +} + +func _Communique_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Communique) + switch tag { + case 5: // union.number + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Communique_Number{int32(x)} + return true, err + case 6: // union.name + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Union = &Communique_Name{x} + return true, err + case 7: // union.data + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.Union = &Communique_Data{x} + return true, err + case 8: // union.temp_c + if wire != proto.WireFixed64 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed64() + m.Union = &Communique_TempC{math.Float64frombits(x)} + return true, err + case 9: // union.height + if wire != proto.WireFixed32 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed32() + m.Union = &Communique_Height{math.Float32frombits(uint32(x))} + return true, err + case 10: // union.today + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Communique_Today{Days(x)} + return true, err + case 11: // union.maybe + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Communique_Maybe{x != 0} + return true, err + case 12: // union.delta + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeZigzag32() + m.Union = &Communique_Delta_{int32(x)} + return true, err + case 16: // union.msg + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Reply) + err := b.DecodeMessage(msg) + m.Union = &Communique_Msg{msg} + return true, err + case 14: // union.somegroup + if wire != proto.WireStartGroup { + return true, proto.ErrInternalBadWireType + } + msg := new(Communique_SomeGroup) + err := b.DecodeGroup(msg) + m.Union = &Communique_Somegroup{msg} + return true, err + default: + return false, nil + } +} + +func _Communique_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Communique) + // union + switch x := m.Union.(type) { + case *Communique_Number: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(x.Number)) + case *Communique_Name: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(len(x.Name))) + n += len(x.Name) + case *Communique_Data: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(len(x.Data))) + n += len(x.Data) + case *Communique_TempC: + n += 1 // tag and wire + n += 8 + case *Communique_Height: + n += 1 // tag and wire + n += 4 + case *Communique_Today: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(x.Today)) + case *Communique_Maybe: + n += 1 // tag and wire + n += 1 + case *Communique_Delta_: + n += 1 // tag and wire + n += proto.SizeVarint(uint64((uint32(x.Delta) << 1) ^ uint32((int32(x.Delta) >> 31)))) + case *Communique_Msg: + s := proto.Size(x.Msg) + n += 2 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *Communique_Somegroup: + n += 1 // tag and wire + n += proto.Size(x.Somegroup) + n += 1 // tag and wire + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type Communique_SomeGroup struct { + Member *string `protobuf:"bytes,15,opt,name=member" json:"member,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Communique_SomeGroup) Reset() { *m = Communique_SomeGroup{} } +func (m *Communique_SomeGroup) String() string { return proto.CompactTextString(m) } +func (*Communique_SomeGroup) ProtoMessage() {} +func (*Communique_SomeGroup) Descriptor() ([]byte, []int) { + return fileDescriptor_test_2309d445eee26af7, []int{6, 0} +} +func (m *Communique_SomeGroup) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Communique_SomeGroup.Unmarshal(m, b) +} +func (m *Communique_SomeGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Communique_SomeGroup.Marshal(b, m, deterministic) +} +func (dst *Communique_SomeGroup) XXX_Merge(src proto.Message) { + xxx_messageInfo_Communique_SomeGroup.Merge(dst, src) +} +func (m *Communique_SomeGroup) XXX_Size() int { + return xxx_messageInfo_Communique_SomeGroup.Size(m) +} +func (m *Communique_SomeGroup) XXX_DiscardUnknown() { + xxx_messageInfo_Communique_SomeGroup.DiscardUnknown(m) +} + +var xxx_messageInfo_Communique_SomeGroup proto.InternalMessageInfo + +func (m *Communique_SomeGroup) GetMember() string { + if m != nil && m.Member != nil { + return *m.Member + } + return "" +} + +type Communique_Delta struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Communique_Delta) Reset() { *m = Communique_Delta{} } +func (m *Communique_Delta) String() string { return proto.CompactTextString(m) } +func (*Communique_Delta) ProtoMessage() {} +func (*Communique_Delta) Descriptor() ([]byte, []int) { + return fileDescriptor_test_2309d445eee26af7, []int{6, 1} +} +func (m *Communique_Delta) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Communique_Delta.Unmarshal(m, b) +} +func (m *Communique_Delta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Communique_Delta.Marshal(b, m, deterministic) +} +func (dst *Communique_Delta) XXX_Merge(src proto.Message) { + xxx_messageInfo_Communique_Delta.Merge(dst, src) +} +func (m *Communique_Delta) XXX_Size() int { + return xxx_messageInfo_Communique_Delta.Size(m) +} +func (m *Communique_Delta) XXX_DiscardUnknown() { + xxx_messageInfo_Communique_Delta.DiscardUnknown(m) +} + +var xxx_messageInfo_Communique_Delta proto.InternalMessageInfo + +var E_Tag = &proto.ExtensionDesc{ + ExtendedType: (*Reply)(nil), + ExtensionType: (*string)(nil), + Field: 103, + Name: "my.test.tag", + Tag: "bytes,103,opt,name=tag", + Filename: "my_test/test.proto", +} + +var E_Donut = &proto.ExtensionDesc{ + ExtendedType: (*Reply)(nil), + ExtensionType: (*OtherReplyExtensions)(nil), + Field: 106, + Name: "my.test.donut", + Tag: "bytes,106,opt,name=donut", + Filename: "my_test/test.proto", +} + +func init() { + proto.RegisterType((*Request)(nil), "my.test.Request") + proto.RegisterMapType((map[int64]*Reply)(nil), "my.test.Request.MsgMappingEntry") + proto.RegisterMapType((map[int32]string)(nil), "my.test.Request.NameMappingEntry") + proto.RegisterType((*Request_SomeGroup)(nil), "my.test.Request.SomeGroup") + proto.RegisterType((*Reply)(nil), "my.test.Reply") + proto.RegisterType((*Reply_Entry)(nil), "my.test.Reply.Entry") + proto.RegisterType((*OtherBase)(nil), "my.test.OtherBase") + proto.RegisterType((*ReplyExtensions)(nil), "my.test.ReplyExtensions") + proto.RegisterType((*OtherReplyExtensions)(nil), "my.test.OtherReplyExtensions") + proto.RegisterType((*OldReply)(nil), "my.test.OldReply") + proto.RegisterType((*Communique)(nil), "my.test.Communique") + proto.RegisterType((*Communique_SomeGroup)(nil), "my.test.Communique.SomeGroup") + proto.RegisterType((*Communique_Delta)(nil), "my.test.Communique.Delta") + proto.RegisterEnum("my.test.HatType", HatType_name, HatType_value) + proto.RegisterEnum("my.test.Days", Days_name, Days_value) + proto.RegisterEnum("my.test.Request_Color", Request_Color_name, Request_Color_value) + proto.RegisterEnum("my.test.Reply_Entry_Game", Reply_Entry_Game_name, Reply_Entry_Game_value) + proto.RegisterExtension(E_ReplyExtensions_Time) + proto.RegisterExtension(E_ReplyExtensions_Carrot) + proto.RegisterExtension(E_ReplyExtensions_Donut) + proto.RegisterExtension(E_Tag) + proto.RegisterExtension(E_Donut) +} + +func init() { proto.RegisterFile("my_test/test.proto", fileDescriptor_test_2309d445eee26af7) } + +var fileDescriptor_test_2309d445eee26af7 = []byte{ + // 1033 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x55, 0xdd, 0x6e, 0xe3, 0x44, + 0x14, 0xce, 0xd8, 0x71, 0x7e, 0x4e, 0x42, 0x6b, 0x46, 0x55, 0x6b, 0x05, 0xed, 0xd6, 0x04, 0x8a, + 0x4c, 0xc5, 0xa6, 0xda, 0x80, 0xc4, 0x2a, 0x88, 0xd5, 0x36, 0x3f, 0x6d, 0xaa, 0x6d, 0x12, 0x69, + 0xda, 0x5e, 0xb0, 0x37, 0xd6, 0x34, 0x9e, 0x3a, 0xa6, 0x19, 0x3b, 0x6b, 0x8f, 0x11, 0xbe, 0xeb, + 0x53, 0xc0, 0x6b, 0x70, 0xcf, 0x0b, 0xf1, 0x16, 0x45, 0x33, 0x0e, 0x49, 0xda, 0xa0, 0xbd, 0xb1, + 0x7c, 0xce, 0xf9, 0xce, 0xe7, 0x39, 0x3f, 0xfe, 0x06, 0x30, 0xcf, 0x5c, 0xc1, 0x12, 0x71, 0x22, + 0x1f, 0xad, 0x45, 0x1c, 0x89, 0x08, 0x97, 0x79, 0xd6, 0x92, 0x66, 0x03, 0xf3, 0x74, 0x2e, 0x82, + 0x13, 0xf5, 0x7c, 0x9d, 0x07, 0x9b, 0xff, 0x14, 0xa1, 0x4c, 0xd8, 0xc7, 0x94, 0x25, 0x02, 0x9b, + 0xa0, 0xdf, 0xb3, 0xcc, 0x42, 0xb6, 0xee, 0xe8, 0x44, 0xbe, 0x62, 0x07, 0xf4, 0x59, 0xca, 0x2c, + 0xdd, 0x46, 0xce, 0x4e, 0x7b, 0xbf, 0xb5, 0x24, 0x6a, 0x2d, 0x13, 0x5a, 0xbd, 0x68, 0x1e, 0xc5, + 0x44, 0x42, 0xf0, 0x31, 0xe8, 0x33, 0x2a, 0xac, 0xa2, 0x42, 0x9a, 0x2b, 0xe4, 0x90, 0x8a, 0xeb, + 0x6c, 0xc1, 0x3a, 0xa5, 0xb3, 0x41, 0x7f, 0x42, 0x4e, 0x89, 0x04, 0xe1, 0x43, 0xa8, 0x78, 0x8c, + 0x7a, 0xf3, 0x20, 0x64, 0x56, 0xd9, 0x46, 0x8e, 0xd6, 0xd1, 0x83, 0xf0, 0x8e, 0xac, 0x9c, 0xf8, + 0x0d, 0x54, 0x93, 0x88, 0x33, 0x3f, 0x8e, 0xd2, 0x85, 0x55, 0xb1, 0x91, 0x03, 0xed, 0xc6, 0xd6, + 0xc7, 0xaf, 0x22, 0xce, 0xce, 0x25, 0x82, 0xac, 0xc1, 0xb8, 0x0f, 0xf5, 0x90, 0x72, 0xe6, 0x72, + 0xba, 0x58, 0x04, 0xa1, 0x6f, 0xed, 0xd8, 0xba, 0x53, 0x6b, 0x7f, 0xb9, 0x95, 0x3c, 0xa6, 0x9c, + 0x8d, 0x72, 0xcc, 0x20, 0x14, 0x71, 0x46, 0x6a, 0xe1, 0xda, 0x83, 0x4f, 0xa1, 0xc6, 0x13, 0x7f, + 0x45, 0xb2, 0xab, 0x48, 0xec, 0x2d, 0x92, 0x51, 0xe2, 0x3f, 0xe1, 0x00, 0xbe, 0x72, 0xe0, 0x3d, + 0x30, 0x62, 0x96, 0x30, 0x61, 0xd5, 0x6d, 0xe4, 0x18, 0x24, 0x37, 0xf0, 0x01, 0x94, 0x7d, 0x26, + 0x5c, 0xd9, 0x65, 0xd3, 0x46, 0x4e, 0x95, 0x94, 0x7c, 0x26, 0xde, 0xb3, 0xac, 0xf1, 0x1d, 0x54, + 0x57, 0xf5, 0xe0, 0x43, 0xa8, 0xa9, 0x6a, 0xdc, 0xbb, 0x80, 0xcd, 0x3d, 0xab, 0xaa, 0x18, 0x40, + 0xb9, 0xce, 0xa4, 0xa7, 0xf1, 0x16, 0xcc, 0xe7, 0x05, 0xac, 0x87, 0x27, 0xc1, 0x6a, 0x78, 0x7b, + 0x60, 0xfc, 0x46, 0xe7, 0x29, 0xb3, 0x34, 0xf5, 0xa9, 0xdc, 0xe8, 0x68, 0x6f, 0x50, 0x63, 0x04, + 0xbb, 0xcf, 0xce, 0xbe, 0x99, 0x8e, 0xf3, 0xf4, 0xaf, 0x37, 0xd3, 0x6b, 0xed, 0x9d, 0x8d, 0xf2, + 0x17, 0xf3, 0x6c, 0x83, 0xae, 0x79, 0x04, 0x86, 0xda, 0x04, 0x5c, 0x06, 0x9d, 0x0c, 0xfa, 0x66, + 0x01, 0x57, 0xc1, 0x38, 0x27, 0x83, 0xc1, 0xd8, 0x44, 0xb8, 0x02, 0xc5, 0xee, 0xe5, 0xcd, 0xc0, + 0xd4, 0x9a, 0x7f, 0x6a, 0x60, 0xa8, 0x5c, 0x7c, 0x0c, 0xc6, 0x5d, 0x94, 0x86, 0x9e, 0x5a, 0xb5, + 0x5a, 0x7b, 0xef, 0x29, 0x75, 0x2b, 0xef, 0x66, 0x0e, 0xc1, 0x47, 0x50, 0x9f, 0x46, 0x7c, 0x41, + 0xa7, 0xaa, 0x6d, 0x89, 0xa5, 0xd9, 0xba, 0x63, 0x74, 0x35, 0x13, 0x91, 0xda, 0xd2, 0xff, 0x9e, + 0x65, 0x49, 0xe3, 0x2f, 0x04, 0x46, 0x5e, 0x49, 0x1f, 0x0e, 0xef, 0x59, 0xe6, 0x8a, 0x19, 0x15, + 0x6e, 0xc8, 0x98, 0x97, 0xb8, 0xaf, 0xdb, 0xdf, 0xff, 0x30, 0xa5, 0x9c, 0xcd, 0xdd, 0x1e, 0x4d, + 0x2e, 0x42, 0xdf, 0x42, 0xb6, 0xe6, 0xe8, 0xe4, 0x8b, 0x7b, 0x96, 0x5d, 0xcf, 0xa8, 0x18, 0x4b, + 0xd0, 0x0a, 0x93, 0x43, 0xf0, 0xc1, 0x66, 0xf5, 0x7a, 0x07, 0xfd, 0xb8, 0x2c, 0x18, 0x7f, 0x03, + 0xa6, 0xcb, 0xb3, 0x7c, 0x34, 0xae, 0xda, 0xb5, 0xb6, 0xfa, 0x3f, 0x74, 0x52, 0x1f, 0x65, 0x6a, + 0x3c, 0x72, 0x34, 0xed, 0xa6, 0x0d, 0xc5, 0x73, 0xca, 0x19, 0xae, 0x43, 0xe5, 0x6c, 0x32, 0xb9, + 0xee, 0x9e, 0x5e, 0x5e, 0x9a, 0x08, 0x03, 0x94, 0xae, 0x07, 0xe3, 0xf1, 0xc5, 0x95, 0xa9, 0x1d, + 0x57, 0x2a, 0x9e, 0xf9, 0xf0, 0xf0, 0xf0, 0xa0, 0x35, 0xbf, 0x85, 0xea, 0x44, 0xcc, 0x58, 0xdc, + 0xa5, 0x09, 0xc3, 0x18, 0x8a, 0x92, 0x56, 0x8d, 0xa2, 0x4a, 0xd4, 0xfb, 0x06, 0xf4, 0x6f, 0x04, + 0xbb, 0xaa, 0x4b, 0x83, 0xdf, 0x05, 0x0b, 0x93, 0x20, 0x0a, 0x93, 0x76, 0x13, 0x8a, 0x22, 0xe0, + 0x0c, 0x3f, 0x1b, 0x91, 0xc5, 0x6c, 0xe4, 0x20, 0xa2, 0x62, 0xed, 0x77, 0x50, 0x9a, 0xd2, 0x38, + 0x8e, 0xc4, 0x16, 0x2a, 0x50, 0xe3, 0xb5, 0x9e, 0x7a, 0xd7, 0xec, 0x64, 0x99, 0xd7, 0xee, 0x82, + 0xe1, 0x45, 0x61, 0x2a, 0x30, 0x5e, 0x41, 0x57, 0x87, 0x56, 0x9f, 0xfa, 0x14, 0x49, 0x9e, 0xda, + 0x74, 0x60, 0x4f, 0xe5, 0x3c, 0x0b, 0x6f, 0x2f, 0x6f, 0xd3, 0x82, 0xca, 0x64, 0xee, 0x29, 0x9c, + 0xaa, 0xfe, 0xf1, 0xf1, 0xf1, 0xb1, 0xdc, 0xd1, 0x2a, 0xa8, 0xf9, 0x87, 0x0e, 0xd0, 0x8b, 0x38, + 0x4f, 0xc3, 0xe0, 0x63, 0xca, 0xf0, 0x4b, 0xa8, 0x71, 0x7a, 0xcf, 0x5c, 0xce, 0xdc, 0x69, 0x9c, + 0x53, 0x54, 0x48, 0x55, 0xba, 0x46, 0xac, 0x17, 0x67, 0xd8, 0x82, 0x52, 0x98, 0xf2, 0x5b, 0x16, + 0x5b, 0x86, 0x64, 0x1f, 0x16, 0xc8, 0xd2, 0xc6, 0x7b, 0xcb, 0x46, 0x97, 0x64, 0xa3, 0x87, 0x85, + 0xbc, 0xd5, 0xd2, 0xeb, 0x51, 0x41, 0x95, 0x30, 0xd5, 0xa5, 0x57, 0x5a, 0xf8, 0x00, 0x4a, 0x82, + 0xf1, 0x85, 0x3b, 0x55, 0x72, 0x84, 0x86, 0x05, 0x62, 0x48, 0xbb, 0x27, 0xe9, 0x67, 0x2c, 0xf0, + 0x67, 0x42, 0xfd, 0xa6, 0x9a, 0xa4, 0xcf, 0x6d, 0x7c, 0x04, 0x86, 0x88, 0x3c, 0x9a, 0x59, 0xa0, + 0x34, 0xf1, 0xb3, 0x55, 0x6f, 0xfa, 0x34, 0x4b, 0x14, 0x81, 0x8c, 0xe2, 0x7d, 0x30, 0x38, 0xcd, + 0x6e, 0x99, 0x55, 0x93, 0x27, 0x97, 0x7e, 0x65, 0x4a, 0xbf, 0xc7, 0xe6, 0x82, 0x2a, 0x01, 0xf9, + 0x5c, 0xfa, 0x95, 0x89, 0x9b, 0xa0, 0xf3, 0xc4, 0x57, 0xf2, 0xb1, 0xf5, 0x53, 0x0e, 0x0b, 0x44, + 0x06, 0xf1, 0xcf, 0x9b, 0xfa, 0xb9, 0xa3, 0xf4, 0xf3, 0xc5, 0x0a, 0xb9, 0xee, 0xdd, 0x5a, 0x42, + 0x87, 0x85, 0x0d, 0x11, 0x6d, 0x7c, 0xb5, 0x29, 0x46, 0xfb, 0x50, 0xe2, 0x4c, 0xf5, 0x6f, 0x37, + 0x57, 0xac, 0xdc, 0x6a, 0x94, 0xc1, 0xe8, 0xcb, 0x03, 0x75, 0xcb, 0x60, 0xa4, 0x61, 0x10, 0x85, + 0xc7, 0x2f, 0xa1, 0xbc, 0x94, 0x7b, 0xb9, 0xe6, 0xb9, 0xe0, 0x9b, 0x48, 0x8a, 0xc2, 0xd9, 0xe0, + 0x83, 0xa9, 0x1d, 0xb7, 0xa0, 0x28, 0x4b, 0x97, 0xc1, 0xd1, 0x64, 0xdc, 0x3f, 0xfd, 0xc5, 0x44, + 0xb8, 0x06, 0xe5, 0xeb, 0x9b, 0xc1, 0x95, 0x34, 0x34, 0xa9, 0x1a, 0x97, 0x37, 0xe3, 0xfe, 0x85, + 0x89, 0x1a, 0x9a, 0x89, 0x3a, 0x36, 0xe8, 0x82, 0xfa, 0x5b, 0xfb, 0xea, 0xab, 0x63, 0xc8, 0x50, + 0xa7, 0xf7, 0xdf, 0x4a, 0x3e, 0xc7, 0xfc, 0xaa, 0xba, 0xf3, 0xe2, 0xe9, 0xa2, 0xfe, 0xff, 0x4e, + 0x76, 0xdf, 0x7d, 0x78, 0xeb, 0x07, 0x62, 0x96, 0xde, 0xb6, 0xa6, 0x11, 0x3f, 0xf1, 0xa3, 0x39, + 0x0d, 0xfd, 0x13, 0x75, 0x39, 0xde, 0xa6, 0x77, 0xf9, 0xcb, 0xf4, 0x95, 0xcf, 0xc2, 0x57, 0x7e, + 0xa4, 0x6e, 0x55, 0xb9, 0x0f, 0x27, 0xcb, 0x6b, 0xf6, 0x27, 0xf9, 0xf8, 0x37, 0x00, 0x00, 0xff, + 0xff, 0x12, 0xd5, 0x46, 0x00, 0x75, 0x07, 0x00, 0x00, +} diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.proto b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.proto new file mode 100644 index 0000000..1ef3fd0 --- /dev/null +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.proto @@ -0,0 +1,158 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto2"; + +// This package holds interesting messages. +package my.test; // dotted package name + +option go_package = "github.com/golang/protobuf/protoc-gen-go/testdata/my_test;test"; + +//import "imp.proto"; +import "multi/multi1.proto"; // unused import + +enum HatType { + // deliberately skipping 0 + FEDORA = 1; + FEZ = 2; +} + +// This enum represents days of the week. +enum Days { + option allow_alias = true; + + MONDAY = 1; + TUESDAY = 2; + LUNDI = 1; // same value as MONDAY +} + +// This is a message that might be sent somewhere. +message Request { + enum Color { + RED = 0; + GREEN = 1; + BLUE = 2; + } + repeated int64 key = 1; +// optional imp.ImportedMessage imported_message = 2; + optional Color hue = 3; // no default + optional HatType hat = 4 [default=FEDORA]; +// optional imp.ImportedMessage.Owner owner = 6; + optional float deadline = 7 [default=inf]; + optional group SomeGroup = 8 { + optional int32 group_field = 9; + } + + // These foreign types are in imp2.proto, + // which is publicly imported by imp.proto. +// optional imp.PubliclyImportedMessage pub = 10; +// optional imp.PubliclyImportedEnum pub_enum = 13 [default=HAIR]; + + + // This is a map field. It will generate map[int32]string. + map name_mapping = 14; + // This is a map field whose value type is a message. + map msg_mapping = 15; + + optional int32 reset = 12; + // This field should not conflict with any getters. + optional string get_key = 16; +} + +message Reply { + message Entry { + required int64 key_that_needs_1234camel_CasIng = 1; + optional int64 value = 2 [default=7]; + optional int64 _my_field_name_2 = 3; + enum Game { + FOOTBALL = 1; + TENNIS = 2; + } + } + repeated Entry found = 1; + repeated int32 compact_keys = 2 [packed=true]; + extensions 100 to max; +} + +message OtherBase { + optional string name = 1; + extensions 100 to max; +} + +message ReplyExtensions { + extend Reply { + optional double time = 101; + optional ReplyExtensions carrot = 105; + } + extend OtherBase { + optional ReplyExtensions donut = 101; + } +} + +message OtherReplyExtensions { + optional int32 key = 1; +} + +// top-level extension +extend Reply { + optional string tag = 103; + optional OtherReplyExtensions donut = 106; +// optional imp.ImportedMessage elephant = 107; // extend with message from another file. +} + +message OldReply { + // Extensions will be encoded in MessageSet wire format. + option message_set_wire_format = true; + extensions 100 to max; +} + +message Communique { + optional bool make_me_cry = 1; + + // This is a oneof, called "union". + oneof union { + int32 number = 5; + string name = 6; + bytes data = 7; + double temp_c = 8; + float height = 9; + Days today = 10; + bool maybe = 11; + sint32 delta = 12; // name will conflict with Delta below + Reply msg = 16; // requires two bytes to encode field tag + group SomeGroup = 14 { + optional string member = 15; + } + } + + message Delta {} +} + diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/proto3/proto3.pb.go b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/proto3/proto3.pb.go new file mode 100644 index 0000000..1ad010a --- /dev/null +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/proto3/proto3.pb.go @@ -0,0 +1,196 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: proto3/proto3.proto + +package proto3 // import "github.com/golang/protobuf/protoc-gen-go/testdata/proto3" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type Request_Flavour int32 + +const ( + Request_SWEET Request_Flavour = 0 + Request_SOUR Request_Flavour = 1 + Request_UMAMI Request_Flavour = 2 + Request_GOPHERLICIOUS Request_Flavour = 3 +) + +var Request_Flavour_name = map[int32]string{ + 0: "SWEET", + 1: "SOUR", + 2: "UMAMI", + 3: "GOPHERLICIOUS", +} +var Request_Flavour_value = map[string]int32{ + "SWEET": 0, + "SOUR": 1, + "UMAMI": 2, + "GOPHERLICIOUS": 3, +} + +func (x Request_Flavour) String() string { + return proto.EnumName(Request_Flavour_name, int32(x)) +} +func (Request_Flavour) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_proto3_a752e09251f17e01, []int{0, 0} +} + +type Request struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Key []int64 `protobuf:"varint,2,rep,packed,name=key,proto3" json:"key,omitempty"` + Taste Request_Flavour `protobuf:"varint,3,opt,name=taste,proto3,enum=proto3.Request_Flavour" json:"taste,omitempty"` + Book *Book `protobuf:"bytes,4,opt,name=book,proto3" json:"book,omitempty"` + Unpacked []int64 `protobuf:"varint,5,rep,name=unpacked,proto3" json:"unpacked,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Request) Reset() { *m = Request{} } +func (m *Request) String() string { return proto.CompactTextString(m) } +func (*Request) ProtoMessage() {} +func (*Request) Descriptor() ([]byte, []int) { + return fileDescriptor_proto3_a752e09251f17e01, []int{0} +} +func (m *Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Request.Unmarshal(m, b) +} +func (m *Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Request.Marshal(b, m, deterministic) +} +func (dst *Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_Request.Merge(dst, src) +} +func (m *Request) XXX_Size() int { + return xxx_messageInfo_Request.Size(m) +} +func (m *Request) XXX_DiscardUnknown() { + xxx_messageInfo_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_Request proto.InternalMessageInfo + +func (m *Request) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Request) GetKey() []int64 { + if m != nil { + return m.Key + } + return nil +} + +func (m *Request) GetTaste() Request_Flavour { + if m != nil { + return m.Taste + } + return Request_SWEET +} + +func (m *Request) GetBook() *Book { + if m != nil { + return m.Book + } + return nil +} + +func (m *Request) GetUnpacked() []int64 { + if m != nil { + return m.Unpacked + } + return nil +} + +type Book struct { + Title string `protobuf:"bytes,1,opt,name=title,proto3" json:"title,omitempty"` + RawData []byte `protobuf:"bytes,2,opt,name=raw_data,json=rawData,proto3" json:"raw_data,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Book) Reset() { *m = Book{} } +func (m *Book) String() string { return proto.CompactTextString(m) } +func (*Book) ProtoMessage() {} +func (*Book) Descriptor() ([]byte, []int) { + return fileDescriptor_proto3_a752e09251f17e01, []int{1} +} +func (m *Book) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Book.Unmarshal(m, b) +} +func (m *Book) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Book.Marshal(b, m, deterministic) +} +func (dst *Book) XXX_Merge(src proto.Message) { + xxx_messageInfo_Book.Merge(dst, src) +} +func (m *Book) XXX_Size() int { + return xxx_messageInfo_Book.Size(m) +} +func (m *Book) XXX_DiscardUnknown() { + xxx_messageInfo_Book.DiscardUnknown(m) +} + +var xxx_messageInfo_Book proto.InternalMessageInfo + +func (m *Book) GetTitle() string { + if m != nil { + return m.Title + } + return "" +} + +func (m *Book) GetRawData() []byte { + if m != nil { + return m.RawData + } + return nil +} + +func init() { + proto.RegisterType((*Request)(nil), "proto3.Request") + proto.RegisterType((*Book)(nil), "proto3.Book") + proto.RegisterEnum("proto3.Request_Flavour", Request_Flavour_name, Request_Flavour_value) +} + +func init() { proto.RegisterFile("proto3/proto3.proto", fileDescriptor_proto3_a752e09251f17e01) } + +var fileDescriptor_proto3_a752e09251f17e01 = []byte{ + // 306 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x3c, 0x90, 0xcf, 0x4e, 0xf2, 0x40, + 0x14, 0xc5, 0x99, 0xfe, 0xf9, 0x80, 0xfb, 0xa1, 0x19, 0xaf, 0x26, 0x8e, 0x1b, 0x33, 0x61, 0xd5, + 0x0d, 0x25, 0xc1, 0x85, 0xc6, 0xb8, 0x11, 0x45, 0x25, 0x91, 0x60, 0x06, 0x89, 0x89, 0x1b, 0x33, + 0x85, 0xb1, 0x92, 0x42, 0x07, 0xcb, 0x54, 0xe2, 0xcb, 0xfa, 0x2c, 0xa6, 0x9d, 0xe2, 0xea, 0x9e, + 0x7b, 0xe7, 0xe4, 0x77, 0x32, 0x07, 0x0e, 0xd7, 0x99, 0x36, 0xfa, 0xac, 0x6b, 0x47, 0x58, 0x0e, + 0xfc, 0x67, 0xb7, 0xf6, 0x0f, 0x81, 0xba, 0x50, 0x9f, 0xb9, 0xda, 0x18, 0x44, 0xf0, 0x52, 0xb9, + 0x52, 0x8c, 0x70, 0x12, 0x34, 0x45, 0xa9, 0x91, 0x82, 0x9b, 0xa8, 0x6f, 0xe6, 0x70, 0x37, 0x70, + 0x45, 0x21, 0xb1, 0x03, 0xbe, 0x91, 0x1b, 0xa3, 0x98, 0xcb, 0x49, 0xb0, 0xdf, 0x3b, 0x0e, 0x2b, + 0x6e, 0x45, 0x09, 0xef, 0x96, 0xf2, 0x4b, 0xe7, 0x99, 0xb0, 0x2e, 0xe4, 0xe0, 0x45, 0x5a, 0x27, + 0xcc, 0xe3, 0x24, 0xf8, 0xdf, 0x6b, 0xed, 0xdc, 0x7d, 0xad, 0x13, 0x51, 0xbe, 0xe0, 0x29, 0x34, + 0xf2, 0x74, 0x2d, 0x67, 0x89, 0x9a, 0x33, 0xbf, 0xc8, 0xe9, 0x3b, 0xb4, 0x26, 0xfe, 0x6e, 0xed, + 0x2b, 0xa8, 0x57, 0x4c, 0x6c, 0x82, 0x3f, 0x79, 0x19, 0x0c, 0x9e, 0x69, 0x0d, 0x1b, 0xe0, 0x4d, + 0xc6, 0x53, 0x41, 0x49, 0x71, 0x9c, 0x8e, 0xae, 0x47, 0x43, 0xea, 0xe0, 0x01, 0xec, 0xdd, 0x8f, + 0x9f, 0x1e, 0x06, 0xe2, 0x71, 0x78, 0x33, 0x1c, 0x4f, 0x27, 0xd4, 0x6d, 0x9f, 0x83, 0x57, 0x64, + 0xe1, 0x11, 0xf8, 0x66, 0x61, 0x96, 0xbb, 0xdf, 0xd9, 0x05, 0x4f, 0xa0, 0x91, 0xc9, 0xed, 0xdb, + 0x5c, 0x1a, 0xc9, 0x1c, 0x4e, 0x82, 0x96, 0xa8, 0x67, 0x72, 0x7b, 0x2b, 0x8d, 0xec, 0x5f, 0xbe, + 0x5e, 0xc4, 0x0b, 0xf3, 0x91, 0x47, 0xe1, 0x4c, 0xaf, 0xba, 0xb1, 0x5e, 0xca, 0x34, 0xb6, 0x1d, + 0x46, 0xf9, 0xbb, 0x15, 0xb3, 0x4e, 0xac, 0xd2, 0x4e, 0xac, 0xbb, 0x46, 0x6d, 0x4c, 0xc1, 0xa8, + 0x3a, 0x8e, 0xaa, 0x76, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xec, 0x71, 0xee, 0xdb, 0x7b, 0x01, + 0x00, 0x00, +} diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/proto3/proto3.proto b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/proto3/proto3.proto new file mode 100644 index 0000000..79954e4 --- /dev/null +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/proto3/proto3.proto @@ -0,0 +1,55 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2014 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package proto3; + +option go_package = "github.com/golang/protobuf/protoc-gen-go/testdata/proto3"; + +message Request { + enum Flavour { + SWEET = 0; + SOUR = 1; + UMAMI = 2; + GOPHERLICIOUS = 3; + } + string name = 1; + repeated int64 key = 2; + Flavour taste = 3; + Book book = 4; + repeated int64 unpacked = 5 [packed=false]; +} + +message Book { + string title = 1; + bytes raw_data = 2; +} diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/ptypes/any.go b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/ptypes/any.go index b2af97f..70276e8 100644 --- a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/ptypes/any.go +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/ptypes/any.go @@ -130,10 +130,12 @@ func UnmarshalAny(any *any.Any, pb proto.Message) error { // Is returns true if any value contains a given message type. func Is(any *any.Any, pb proto.Message) bool { - aname, err := AnyMessageName(any) - if err != nil { + // The following is equivalent to AnyMessageName(any) == proto.MessageName(pb), + // but it avoids scanning TypeUrl for the slash. + if any == nil { return false } - - return aname == proto.MessageName(pb) + name := proto.MessageName(pb) + prefix := len(any.TypeUrl) - len(name) + return prefix >= 1 && any.TypeUrl[prefix-1] == '/' && any.TypeUrl[prefix:] == name } diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go index f346017..e3c56d3 100644 --- a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go @@ -1,16 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: google/protobuf/any.proto -/* -Package any is a generated protocol buffer package. - -It is generated from these files: - google/protobuf/any.proto - -It has these top-level messages: - Any -*/ -package any +package any // import "github.com/golang/protobuf/ptypes/any" import proto "github.com/golang/protobuf/proto" import fmt "fmt" @@ -130,16 +121,38 @@ type Any struct { // Schemes other than `http`, `https` (or the empty scheme) might be // used with implementation specific semantics. // - TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl" json:"type_url,omitempty"` + TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` // Must be a valid serialized protocol buffer of the above specified type. - Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Any) Reset() { *m = Any{} } +func (m *Any) String() string { return proto.CompactTextString(m) } +func (*Any) ProtoMessage() {} +func (*Any) Descriptor() ([]byte, []int) { + return fileDescriptor_any_744b9ca530f228db, []int{0} +} +func (*Any) XXX_WellKnownType() string { return "Any" } +func (m *Any) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Any.Unmarshal(m, b) +} +func (m *Any) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Any.Marshal(b, m, deterministic) +} +func (dst *Any) XXX_Merge(src proto.Message) { + xxx_messageInfo_Any.Merge(dst, src) +} +func (m *Any) XXX_Size() int { + return xxx_messageInfo_Any.Size(m) +} +func (m *Any) XXX_DiscardUnknown() { + xxx_messageInfo_Any.DiscardUnknown(m) } -func (m *Any) Reset() { *m = Any{} } -func (m *Any) String() string { return proto.CompactTextString(m) } -func (*Any) ProtoMessage() {} -func (*Any) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } -func (*Any) XXX_WellKnownType() string { return "Any" } +var xxx_messageInfo_Any proto.InternalMessageInfo func (m *Any) GetTypeUrl() string { if m != nil { @@ -159,9 +172,9 @@ func init() { proto.RegisterType((*Any)(nil), "google.protobuf.Any") } -func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor0) } +func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_any_744b9ca530f228db) } -var fileDescriptor0 = []byte{ +var fileDescriptor_any_744b9ca530f228db = []byte{ // 185 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4, diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go index b2410a0..a7beb2c 100644 --- a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go @@ -1,16 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: google/protobuf/duration.proto -/* -Package duration is a generated protocol buffer package. - -It is generated from these files: - google/protobuf/duration.proto - -It has these top-level messages: - Duration -*/ -package duration +package duration // import "github.com/golang/protobuf/ptypes/duration" import proto "github.com/golang/protobuf/proto" import fmt "fmt" @@ -91,21 +82,43 @@ type Duration struct { // Signed seconds of the span of time. Must be from -315,576,000,000 // to +315,576,000,000 inclusive. Note: these bounds are computed from: // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years - Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"` + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` // Signed fractions of a second at nanosecond resolution of the span // of time. Durations less than one second are represented with a 0 // `seconds` field and a positive or negative `nanos` field. For durations // of one second or more, a non-zero value for the `nanos` field must be // of the same sign as the `seconds` field. Must be from -999,999,999 // to +999,999,999 inclusive. - Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *Duration) Reset() { *m = Duration{} } -func (m *Duration) String() string { return proto.CompactTextString(m) } -func (*Duration) ProtoMessage() {} -func (*Duration) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } -func (*Duration) XXX_WellKnownType() string { return "Duration" } +func (m *Duration) Reset() { *m = Duration{} } +func (m *Duration) String() string { return proto.CompactTextString(m) } +func (*Duration) ProtoMessage() {} +func (*Duration) Descriptor() ([]byte, []int) { + return fileDescriptor_duration_e7d612259e3f0613, []int{0} +} +func (*Duration) XXX_WellKnownType() string { return "Duration" } +func (m *Duration) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Duration.Unmarshal(m, b) +} +func (m *Duration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Duration.Marshal(b, m, deterministic) +} +func (dst *Duration) XXX_Merge(src proto.Message) { + xxx_messageInfo_Duration.Merge(dst, src) +} +func (m *Duration) XXX_Size() int { + return xxx_messageInfo_Duration.Size(m) +} +func (m *Duration) XXX_DiscardUnknown() { + xxx_messageInfo_Duration.DiscardUnknown(m) +} + +var xxx_messageInfo_Duration proto.InternalMessageInfo func (m *Duration) GetSeconds() int64 { if m != nil { @@ -125,9 +138,11 @@ func init() { proto.RegisterType((*Duration)(nil), "google.protobuf.Duration") } -func init() { proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor0) } +func init() { + proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_duration_e7d612259e3f0613) +} -var fileDescriptor0 = []byte{ +var fileDescriptor_duration_e7d612259e3f0613 = []byte{ // 190 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a, diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go index e877b72..a69b403 100644 --- a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go @@ -1,16 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: google/protobuf/empty.proto -/* -Package empty is a generated protocol buffer package. - -It is generated from these files: - google/protobuf/empty.proto - -It has these top-level messages: - Empty -*/ -package empty +package empty // import "github.com/golang/protobuf/ptypes/empty" import proto "github.com/golang/protobuf/proto" import fmt "fmt" @@ -37,21 +28,43 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // // The JSON representation for `Empty` is empty JSON object `{}`. type Empty struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Empty) Reset() { *m = Empty{} } +func (m *Empty) String() string { return proto.CompactTextString(m) } +func (*Empty) ProtoMessage() {} +func (*Empty) Descriptor() ([]byte, []int) { + return fileDescriptor_empty_39e6d6db0632e5b2, []int{0} +} +func (*Empty) XXX_WellKnownType() string { return "Empty" } +func (m *Empty) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Empty.Unmarshal(m, b) +} +func (m *Empty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Empty.Marshal(b, m, deterministic) +} +func (dst *Empty) XXX_Merge(src proto.Message) { + xxx_messageInfo_Empty.Merge(dst, src) +} +func (m *Empty) XXX_Size() int { + return xxx_messageInfo_Empty.Size(m) +} +func (m *Empty) XXX_DiscardUnknown() { + xxx_messageInfo_Empty.DiscardUnknown(m) } -func (m *Empty) Reset() { *m = Empty{} } -func (m *Empty) String() string { return proto.CompactTextString(m) } -func (*Empty) ProtoMessage() {} -func (*Empty) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } -func (*Empty) XXX_WellKnownType() string { return "Empty" } +var xxx_messageInfo_Empty proto.InternalMessageInfo func init() { proto.RegisterType((*Empty)(nil), "google.protobuf.Empty") } -func init() { proto.RegisterFile("google/protobuf/empty.proto", fileDescriptor0) } +func init() { proto.RegisterFile("google/protobuf/empty.proto", fileDescriptor_empty_39e6d6db0632e5b2) } -var fileDescriptor0 = []byte{ +var fileDescriptor_empty_39e6d6db0632e5b2 = []byte{ // 148 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcd, 0x2d, 0x28, diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/ptypes/regen.sh b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/ptypes/regen.sh deleted file mode 100644 index b50a941..0000000 --- a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/ptypes/regen.sh +++ /dev/null @@ -1,43 +0,0 @@ -#!/bin/bash -e -# -# This script fetches and rebuilds the "well-known types" protocol buffers. -# To run this you will need protoc and goprotobuf installed; -# see https://github.com/golang/protobuf for instructions. -# You also need Go and Git installed. - -PKG=github.com/golang/protobuf/ptypes -UPSTREAM=https://github.com/google/protobuf -UPSTREAM_SUBDIR=src/google/protobuf -PROTO_FILES=(any duration empty struct timestamp wrappers) - -function die() { - echo 1>&2 $* - exit 1 -} - -# Sanity check that the right tools are accessible. -for tool in go git protoc protoc-gen-go; do - q=$(which $tool) || die "didn't find $tool" - echo 1>&2 "$tool: $q" -done - -tmpdir=$(mktemp -d -t regen-wkt.XXXXXX) -trap 'rm -rf $tmpdir' EXIT - -echo -n 1>&2 "finding package dir... " -pkgdir=$(go list -f '{{.Dir}}' $PKG) -echo 1>&2 $pkgdir -base=$(echo $pkgdir | sed "s,/$PKG\$,,") -echo 1>&2 "base: $base" -cd "$base" - -echo 1>&2 "fetching latest protos... " -git clone -q $UPSTREAM $tmpdir - -for file in ${PROTO_FILES[@]}; do - echo 1>&2 "* $file" - protoc --go_out=. -I$tmpdir/src $tmpdir/src/google/protobuf/$file.proto || die - cp $tmpdir/src/google/protobuf/$file.proto $PKG/$file -done - -echo 1>&2 "All OK" diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go index 4cfe608..2ac3e65 100644 --- a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go @@ -1,18 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: google/protobuf/struct.proto -/* -Package structpb is a generated protocol buffer package. - -It is generated from these files: - google/protobuf/struct.proto - -It has these top-level messages: - Struct - Value - ListValue -*/ -package structpb +package structpb // import "github.com/golang/protobuf/ptypes/struct" import proto "github.com/golang/protobuf/proto" import fmt "fmt" @@ -50,8 +39,10 @@ var NullValue_value = map[string]int32{ func (x NullValue) String() string { return proto.EnumName(NullValue_name, int32(x)) } -func (NullValue) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } -func (NullValue) XXX_WellKnownType() string { return "NullValue" } +func (NullValue) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_struct_3a5a94e0c7801b27, []int{0} +} +func (NullValue) XXX_WellKnownType() string { return "NullValue" } // `Struct` represents a structured data value, consisting of fields // which map to dynamically typed values. In some languages, `Struct` @@ -63,14 +54,36 @@ func (NullValue) XXX_WellKnownType() string { return "NullValue" } // The JSON representation for `Struct` is JSON object. type Struct struct { // Unordered map of dynamically typed values. - Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *Struct) Reset() { *m = Struct{} } -func (m *Struct) String() string { return proto.CompactTextString(m) } -func (*Struct) ProtoMessage() {} -func (*Struct) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } -func (*Struct) XXX_WellKnownType() string { return "Struct" } +func (m *Struct) Reset() { *m = Struct{} } +func (m *Struct) String() string { return proto.CompactTextString(m) } +func (*Struct) ProtoMessage() {} +func (*Struct) Descriptor() ([]byte, []int) { + return fileDescriptor_struct_3a5a94e0c7801b27, []int{0} +} +func (*Struct) XXX_WellKnownType() string { return "Struct" } +func (m *Struct) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Struct.Unmarshal(m, b) +} +func (m *Struct) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Struct.Marshal(b, m, deterministic) +} +func (dst *Struct) XXX_Merge(src proto.Message) { + xxx_messageInfo_Struct.Merge(dst, src) +} +func (m *Struct) XXX_Size() int { + return xxx_messageInfo_Struct.Size(m) +} +func (m *Struct) XXX_DiscardUnknown() { + xxx_messageInfo_Struct.DiscardUnknown(m) +} + +var xxx_messageInfo_Struct proto.InternalMessageInfo func (m *Struct) GetFields() map[string]*Value { if m != nil { @@ -95,36 +108,58 @@ type Value struct { // *Value_BoolValue // *Value_StructValue // *Value_ListValue - Kind isValue_Kind `protobuf_oneof:"kind"` + Kind isValue_Kind `protobuf_oneof:"kind"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Value) Reset() { *m = Value{} } +func (m *Value) String() string { return proto.CompactTextString(m) } +func (*Value) ProtoMessage() {} +func (*Value) Descriptor() ([]byte, []int) { + return fileDescriptor_struct_3a5a94e0c7801b27, []int{1} +} +func (*Value) XXX_WellKnownType() string { return "Value" } +func (m *Value) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Value.Unmarshal(m, b) +} +func (m *Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Value.Marshal(b, m, deterministic) +} +func (dst *Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_Value.Merge(dst, src) +} +func (m *Value) XXX_Size() int { + return xxx_messageInfo_Value.Size(m) +} +func (m *Value) XXX_DiscardUnknown() { + xxx_messageInfo_Value.DiscardUnknown(m) } -func (m *Value) Reset() { *m = Value{} } -func (m *Value) String() string { return proto.CompactTextString(m) } -func (*Value) ProtoMessage() {} -func (*Value) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } -func (*Value) XXX_WellKnownType() string { return "Value" } +var xxx_messageInfo_Value proto.InternalMessageInfo type isValue_Kind interface { isValue_Kind() } type Value_NullValue struct { - NullValue NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,enum=google.protobuf.NullValue,oneof"` + NullValue NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"` } type Value_NumberValue struct { - NumberValue float64 `protobuf:"fixed64,2,opt,name=number_value,json=numberValue,oneof"` + NumberValue float64 `protobuf:"fixed64,2,opt,name=number_value,json=numberValue,proto3,oneof"` } type Value_StringValue struct { - StringValue string `protobuf:"bytes,3,opt,name=string_value,json=stringValue,oneof"` + StringValue string `protobuf:"bytes,3,opt,name=string_value,json=stringValue,proto3,oneof"` } type Value_BoolValue struct { - BoolValue bool `protobuf:"varint,4,opt,name=bool_value,json=boolValue,oneof"` + BoolValue bool `protobuf:"varint,4,opt,name=bool_value,json=boolValue,proto3,oneof"` } type Value_StructValue struct { - StructValue *Struct `protobuf:"bytes,5,opt,name=struct_value,json=structValue,oneof"` + StructValue *Struct `protobuf:"bytes,5,opt,name=struct_value,json=structValue,proto3,oneof"` } type Value_ListValue struct { - ListValue *ListValue `protobuf:"bytes,6,opt,name=list_value,json=listValue,oneof"` + ListValue *ListValue `protobuf:"bytes,6,opt,name=list_value,json=listValue,proto3,oneof"` } func (*Value_NullValue) isValue_Kind() {} @@ -289,26 +324,26 @@ func _Value_OneofSizer(msg proto.Message) (n int) { // kind switch x := m.Kind.(type) { case *Value_NullValue: - n += proto.SizeVarint(1<<3 | proto.WireVarint) + n += 1 // tag and wire n += proto.SizeVarint(uint64(x.NullValue)) case *Value_NumberValue: - n += proto.SizeVarint(2<<3 | proto.WireFixed64) + n += 1 // tag and wire n += 8 case *Value_StringValue: - n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += 1 // tag and wire n += proto.SizeVarint(uint64(len(x.StringValue))) n += len(x.StringValue) case *Value_BoolValue: - n += proto.SizeVarint(4<<3 | proto.WireVarint) + n += 1 // tag and wire n += 1 case *Value_StructValue: s := proto.Size(x.StructValue) - n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += 1 // tag and wire n += proto.SizeVarint(uint64(s)) n += s case *Value_ListValue: s := proto.Size(x.ListValue) - n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += 1 // tag and wire n += proto.SizeVarint(uint64(s)) n += s case nil: @@ -323,14 +358,36 @@ func _Value_OneofSizer(msg proto.Message) (n int) { // The JSON representation for `ListValue` is JSON array. type ListValue struct { // Repeated field of dynamically typed values. - Values []*Value `protobuf:"bytes,1,rep,name=values" json:"values,omitempty"` + Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *ListValue) Reset() { *m = ListValue{} } -func (m *ListValue) String() string { return proto.CompactTextString(m) } -func (*ListValue) ProtoMessage() {} -func (*ListValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } -func (*ListValue) XXX_WellKnownType() string { return "ListValue" } +func (m *ListValue) Reset() { *m = ListValue{} } +func (m *ListValue) String() string { return proto.CompactTextString(m) } +func (*ListValue) ProtoMessage() {} +func (*ListValue) Descriptor() ([]byte, []int) { + return fileDescriptor_struct_3a5a94e0c7801b27, []int{2} +} +func (*ListValue) XXX_WellKnownType() string { return "ListValue" } +func (m *ListValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListValue.Unmarshal(m, b) +} +func (m *ListValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListValue.Marshal(b, m, deterministic) +} +func (dst *ListValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListValue.Merge(dst, src) +} +func (m *ListValue) XXX_Size() int { + return xxx_messageInfo_ListValue.Size(m) +} +func (m *ListValue) XXX_DiscardUnknown() { + xxx_messageInfo_ListValue.DiscardUnknown(m) +} + +var xxx_messageInfo_ListValue proto.InternalMessageInfo func (m *ListValue) GetValues() []*Value { if m != nil { @@ -341,14 +398,17 @@ func (m *ListValue) GetValues() []*Value { func init() { proto.RegisterType((*Struct)(nil), "google.protobuf.Struct") + proto.RegisterMapType((map[string]*Value)(nil), "google.protobuf.Struct.FieldsEntry") proto.RegisterType((*Value)(nil), "google.protobuf.Value") proto.RegisterType((*ListValue)(nil), "google.protobuf.ListValue") proto.RegisterEnum("google.protobuf.NullValue", NullValue_name, NullValue_value) } -func init() { proto.RegisterFile("google/protobuf/struct.proto", fileDescriptor0) } +func init() { + proto.RegisterFile("google/protobuf/struct.proto", fileDescriptor_struct_3a5a94e0c7801b27) +} -var fileDescriptor0 = []byte{ +var fileDescriptor_struct_3a5a94e0c7801b27 = []byte{ // 417 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0x41, 0x8b, 0xd3, 0x40, 0x14, 0xc7, 0x3b, 0xc9, 0x36, 0x98, 0x17, 0x59, 0x97, 0x11, 0xb4, 0xac, 0xa2, 0xa1, 0x7b, 0x09, diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go index e23e4a2..8e76ae9 100644 --- a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go @@ -1,16 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: google/protobuf/timestamp.proto -/* -Package timestamp is a generated protocol buffer package. - -It is generated from these files: - google/protobuf/timestamp.proto - -It has these top-level messages: - Timestamp -*/ -package timestamp +package timestamp // import "github.com/golang/protobuf/ptypes/timestamp" import proto "github.com/golang/protobuf/proto" import fmt "fmt" @@ -101,7 +92,7 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) // with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one // can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( -// http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime()) +// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--) // to obtain a formatter capable of generating timestamps in this format. // // @@ -109,19 +100,41 @@ type Timestamp struct { // Represents seconds of UTC time since Unix epoch // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to // 9999-12-31T23:59:59Z inclusive. - Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"` + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` // Non-negative fractions of a second at nanosecond resolution. Negative // second values with fractions must still have non-negative nanos values // that count forward in time. Must be from 0 to 999,999,999 // inclusive. - Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *Timestamp) Reset() { *m = Timestamp{} } -func (m *Timestamp) String() string { return proto.CompactTextString(m) } -func (*Timestamp) ProtoMessage() {} -func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } -func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" } +func (m *Timestamp) Reset() { *m = Timestamp{} } +func (m *Timestamp) String() string { return proto.CompactTextString(m) } +func (*Timestamp) ProtoMessage() {} +func (*Timestamp) Descriptor() ([]byte, []int) { + return fileDescriptor_timestamp_b826e8e5fba671a8, []int{0} +} +func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" } +func (m *Timestamp) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Timestamp.Unmarshal(m, b) +} +func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Timestamp.Marshal(b, m, deterministic) +} +func (dst *Timestamp) XXX_Merge(src proto.Message) { + xxx_messageInfo_Timestamp.Merge(dst, src) +} +func (m *Timestamp) XXX_Size() int { + return xxx_messageInfo_Timestamp.Size(m) +} +func (m *Timestamp) XXX_DiscardUnknown() { + xxx_messageInfo_Timestamp.DiscardUnknown(m) +} + +var xxx_messageInfo_Timestamp proto.InternalMessageInfo func (m *Timestamp) GetSeconds() int64 { if m != nil { @@ -141,9 +154,11 @@ func init() { proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp") } -func init() { proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor0) } +func init() { + proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_timestamp_b826e8e5fba671a8) +} -var fileDescriptor0 = []byte{ +var fileDescriptor_timestamp_b826e8e5fba671a8 = []byte{ // 191 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d, diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto index b7cbd17..06750ab 100644 --- a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto @@ -114,7 +114,7 @@ option objc_class_prefix = "GPB"; // to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) // with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one // can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( -// http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime()) +// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--) // to obtain a formatter capable of generating timestamps in this format. // // diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go index 0ed59bf..0f0fa83 100644 --- a/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go @@ -1,24 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: google/protobuf/wrappers.proto -/* -Package wrappers is a generated protocol buffer package. - -It is generated from these files: - google/protobuf/wrappers.proto - -It has these top-level messages: - DoubleValue - FloatValue - Int64Value - UInt64Value - Int32Value - UInt32Value - BoolValue - StringValue - BytesValue -*/ -package wrappers +package wrappers // import "github.com/golang/protobuf/ptypes/wrappers" import proto "github.com/golang/protobuf/proto" import fmt "fmt" @@ -40,14 +23,36 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // The JSON representation for `DoubleValue` is JSON number. type DoubleValue struct { // The double value. - Value float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *DoubleValue) Reset() { *m = DoubleValue{} } -func (m *DoubleValue) String() string { return proto.CompactTextString(m) } -func (*DoubleValue) ProtoMessage() {} -func (*DoubleValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } -func (*DoubleValue) XXX_WellKnownType() string { return "DoubleValue" } +func (m *DoubleValue) Reset() { *m = DoubleValue{} } +func (m *DoubleValue) String() string { return proto.CompactTextString(m) } +func (*DoubleValue) ProtoMessage() {} +func (*DoubleValue) Descriptor() ([]byte, []int) { + return fileDescriptor_wrappers_16c7c35c009f3253, []int{0} +} +func (*DoubleValue) XXX_WellKnownType() string { return "DoubleValue" } +func (m *DoubleValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DoubleValue.Unmarshal(m, b) +} +func (m *DoubleValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DoubleValue.Marshal(b, m, deterministic) +} +func (dst *DoubleValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_DoubleValue.Merge(dst, src) +} +func (m *DoubleValue) XXX_Size() int { + return xxx_messageInfo_DoubleValue.Size(m) +} +func (m *DoubleValue) XXX_DiscardUnknown() { + xxx_messageInfo_DoubleValue.DiscardUnknown(m) +} + +var xxx_messageInfo_DoubleValue proto.InternalMessageInfo func (m *DoubleValue) GetValue() float64 { if m != nil { @@ -61,14 +66,36 @@ func (m *DoubleValue) GetValue() float64 { // The JSON representation for `FloatValue` is JSON number. type FloatValue struct { // The float value. - Value float32 `protobuf:"fixed32,1,opt,name=value" json:"value,omitempty"` + Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *FloatValue) Reset() { *m = FloatValue{} } -func (m *FloatValue) String() string { return proto.CompactTextString(m) } -func (*FloatValue) ProtoMessage() {} -func (*FloatValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } -func (*FloatValue) XXX_WellKnownType() string { return "FloatValue" } +func (m *FloatValue) Reset() { *m = FloatValue{} } +func (m *FloatValue) String() string { return proto.CompactTextString(m) } +func (*FloatValue) ProtoMessage() {} +func (*FloatValue) Descriptor() ([]byte, []int) { + return fileDescriptor_wrappers_16c7c35c009f3253, []int{1} +} +func (*FloatValue) XXX_WellKnownType() string { return "FloatValue" } +func (m *FloatValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FloatValue.Unmarshal(m, b) +} +func (m *FloatValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FloatValue.Marshal(b, m, deterministic) +} +func (dst *FloatValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_FloatValue.Merge(dst, src) +} +func (m *FloatValue) XXX_Size() int { + return xxx_messageInfo_FloatValue.Size(m) +} +func (m *FloatValue) XXX_DiscardUnknown() { + xxx_messageInfo_FloatValue.DiscardUnknown(m) +} + +var xxx_messageInfo_FloatValue proto.InternalMessageInfo func (m *FloatValue) GetValue() float32 { if m != nil { @@ -82,14 +109,36 @@ func (m *FloatValue) GetValue() float32 { // The JSON representation for `Int64Value` is JSON string. type Int64Value struct { // The int64 value. - Value int64 `protobuf:"varint,1,opt,name=value" json:"value,omitempty"` + Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *Int64Value) Reset() { *m = Int64Value{} } -func (m *Int64Value) String() string { return proto.CompactTextString(m) } -func (*Int64Value) ProtoMessage() {} -func (*Int64Value) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } -func (*Int64Value) XXX_WellKnownType() string { return "Int64Value" } +func (m *Int64Value) Reset() { *m = Int64Value{} } +func (m *Int64Value) String() string { return proto.CompactTextString(m) } +func (*Int64Value) ProtoMessage() {} +func (*Int64Value) Descriptor() ([]byte, []int) { + return fileDescriptor_wrappers_16c7c35c009f3253, []int{2} +} +func (*Int64Value) XXX_WellKnownType() string { return "Int64Value" } +func (m *Int64Value) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Int64Value.Unmarshal(m, b) +} +func (m *Int64Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Int64Value.Marshal(b, m, deterministic) +} +func (dst *Int64Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_Int64Value.Merge(dst, src) +} +func (m *Int64Value) XXX_Size() int { + return xxx_messageInfo_Int64Value.Size(m) +} +func (m *Int64Value) XXX_DiscardUnknown() { + xxx_messageInfo_Int64Value.DiscardUnknown(m) +} + +var xxx_messageInfo_Int64Value proto.InternalMessageInfo func (m *Int64Value) GetValue() int64 { if m != nil { @@ -103,14 +152,36 @@ func (m *Int64Value) GetValue() int64 { // The JSON representation for `UInt64Value` is JSON string. type UInt64Value struct { // The uint64 value. - Value uint64 `protobuf:"varint,1,opt,name=value" json:"value,omitempty"` + Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UInt64Value) Reset() { *m = UInt64Value{} } +func (m *UInt64Value) String() string { return proto.CompactTextString(m) } +func (*UInt64Value) ProtoMessage() {} +func (*UInt64Value) Descriptor() ([]byte, []int) { + return fileDescriptor_wrappers_16c7c35c009f3253, []int{3} +} +func (*UInt64Value) XXX_WellKnownType() string { return "UInt64Value" } +func (m *UInt64Value) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UInt64Value.Unmarshal(m, b) +} +func (m *UInt64Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UInt64Value.Marshal(b, m, deterministic) +} +func (dst *UInt64Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_UInt64Value.Merge(dst, src) +} +func (m *UInt64Value) XXX_Size() int { + return xxx_messageInfo_UInt64Value.Size(m) +} +func (m *UInt64Value) XXX_DiscardUnknown() { + xxx_messageInfo_UInt64Value.DiscardUnknown(m) } -func (m *UInt64Value) Reset() { *m = UInt64Value{} } -func (m *UInt64Value) String() string { return proto.CompactTextString(m) } -func (*UInt64Value) ProtoMessage() {} -func (*UInt64Value) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } -func (*UInt64Value) XXX_WellKnownType() string { return "UInt64Value" } +var xxx_messageInfo_UInt64Value proto.InternalMessageInfo func (m *UInt64Value) GetValue() uint64 { if m != nil { @@ -124,14 +195,36 @@ func (m *UInt64Value) GetValue() uint64 { // The JSON representation for `Int32Value` is JSON number. type Int32Value struct { // The int32 value. - Value int32 `protobuf:"varint,1,opt,name=value" json:"value,omitempty"` + Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *Int32Value) Reset() { *m = Int32Value{} } -func (m *Int32Value) String() string { return proto.CompactTextString(m) } -func (*Int32Value) ProtoMessage() {} -func (*Int32Value) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } -func (*Int32Value) XXX_WellKnownType() string { return "Int32Value" } +func (m *Int32Value) Reset() { *m = Int32Value{} } +func (m *Int32Value) String() string { return proto.CompactTextString(m) } +func (*Int32Value) ProtoMessage() {} +func (*Int32Value) Descriptor() ([]byte, []int) { + return fileDescriptor_wrappers_16c7c35c009f3253, []int{4} +} +func (*Int32Value) XXX_WellKnownType() string { return "Int32Value" } +func (m *Int32Value) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Int32Value.Unmarshal(m, b) +} +func (m *Int32Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Int32Value.Marshal(b, m, deterministic) +} +func (dst *Int32Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_Int32Value.Merge(dst, src) +} +func (m *Int32Value) XXX_Size() int { + return xxx_messageInfo_Int32Value.Size(m) +} +func (m *Int32Value) XXX_DiscardUnknown() { + xxx_messageInfo_Int32Value.DiscardUnknown(m) +} + +var xxx_messageInfo_Int32Value proto.InternalMessageInfo func (m *Int32Value) GetValue() int32 { if m != nil { @@ -145,14 +238,36 @@ func (m *Int32Value) GetValue() int32 { // The JSON representation for `UInt32Value` is JSON number. type UInt32Value struct { // The uint32 value. - Value uint32 `protobuf:"varint,1,opt,name=value" json:"value,omitempty"` + Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UInt32Value) Reset() { *m = UInt32Value{} } +func (m *UInt32Value) String() string { return proto.CompactTextString(m) } +func (*UInt32Value) ProtoMessage() {} +func (*UInt32Value) Descriptor() ([]byte, []int) { + return fileDescriptor_wrappers_16c7c35c009f3253, []int{5} +} +func (*UInt32Value) XXX_WellKnownType() string { return "UInt32Value" } +func (m *UInt32Value) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UInt32Value.Unmarshal(m, b) +} +func (m *UInt32Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UInt32Value.Marshal(b, m, deterministic) +} +func (dst *UInt32Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_UInt32Value.Merge(dst, src) +} +func (m *UInt32Value) XXX_Size() int { + return xxx_messageInfo_UInt32Value.Size(m) +} +func (m *UInt32Value) XXX_DiscardUnknown() { + xxx_messageInfo_UInt32Value.DiscardUnknown(m) } -func (m *UInt32Value) Reset() { *m = UInt32Value{} } -func (m *UInt32Value) String() string { return proto.CompactTextString(m) } -func (*UInt32Value) ProtoMessage() {} -func (*UInt32Value) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } -func (*UInt32Value) XXX_WellKnownType() string { return "UInt32Value" } +var xxx_messageInfo_UInt32Value proto.InternalMessageInfo func (m *UInt32Value) GetValue() uint32 { if m != nil { @@ -166,14 +281,36 @@ func (m *UInt32Value) GetValue() uint32 { // The JSON representation for `BoolValue` is JSON `true` and `false`. type BoolValue struct { // The bool value. - Value bool `protobuf:"varint,1,opt,name=value" json:"value,omitempty"` + Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BoolValue) Reset() { *m = BoolValue{} } +func (m *BoolValue) String() string { return proto.CompactTextString(m) } +func (*BoolValue) ProtoMessage() {} +func (*BoolValue) Descriptor() ([]byte, []int) { + return fileDescriptor_wrappers_16c7c35c009f3253, []int{6} +} +func (*BoolValue) XXX_WellKnownType() string { return "BoolValue" } +func (m *BoolValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BoolValue.Unmarshal(m, b) +} +func (m *BoolValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BoolValue.Marshal(b, m, deterministic) +} +func (dst *BoolValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_BoolValue.Merge(dst, src) +} +func (m *BoolValue) XXX_Size() int { + return xxx_messageInfo_BoolValue.Size(m) +} +func (m *BoolValue) XXX_DiscardUnknown() { + xxx_messageInfo_BoolValue.DiscardUnknown(m) } -func (m *BoolValue) Reset() { *m = BoolValue{} } -func (m *BoolValue) String() string { return proto.CompactTextString(m) } -func (*BoolValue) ProtoMessage() {} -func (*BoolValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } -func (*BoolValue) XXX_WellKnownType() string { return "BoolValue" } +var xxx_messageInfo_BoolValue proto.InternalMessageInfo func (m *BoolValue) GetValue() bool { if m != nil { @@ -187,14 +324,36 @@ func (m *BoolValue) GetValue() bool { // The JSON representation for `StringValue` is JSON string. type StringValue struct { // The string value. - Value string `protobuf:"bytes,1,opt,name=value" json:"value,omitempty"` + Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *StringValue) Reset() { *m = StringValue{} } -func (m *StringValue) String() string { return proto.CompactTextString(m) } -func (*StringValue) ProtoMessage() {} -func (*StringValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } -func (*StringValue) XXX_WellKnownType() string { return "StringValue" } +func (m *StringValue) Reset() { *m = StringValue{} } +func (m *StringValue) String() string { return proto.CompactTextString(m) } +func (*StringValue) ProtoMessage() {} +func (*StringValue) Descriptor() ([]byte, []int) { + return fileDescriptor_wrappers_16c7c35c009f3253, []int{7} +} +func (*StringValue) XXX_WellKnownType() string { return "StringValue" } +func (m *StringValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StringValue.Unmarshal(m, b) +} +func (m *StringValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StringValue.Marshal(b, m, deterministic) +} +func (dst *StringValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_StringValue.Merge(dst, src) +} +func (m *StringValue) XXX_Size() int { + return xxx_messageInfo_StringValue.Size(m) +} +func (m *StringValue) XXX_DiscardUnknown() { + xxx_messageInfo_StringValue.DiscardUnknown(m) +} + +var xxx_messageInfo_StringValue proto.InternalMessageInfo func (m *StringValue) GetValue() string { if m != nil { @@ -208,14 +367,36 @@ func (m *StringValue) GetValue() string { // The JSON representation for `BytesValue` is JSON string. type BytesValue struct { // The bytes value. - Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BytesValue) Reset() { *m = BytesValue{} } +func (m *BytesValue) String() string { return proto.CompactTextString(m) } +func (*BytesValue) ProtoMessage() {} +func (*BytesValue) Descriptor() ([]byte, []int) { + return fileDescriptor_wrappers_16c7c35c009f3253, []int{8} +} +func (*BytesValue) XXX_WellKnownType() string { return "BytesValue" } +func (m *BytesValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BytesValue.Unmarshal(m, b) +} +func (m *BytesValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BytesValue.Marshal(b, m, deterministic) +} +func (dst *BytesValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_BytesValue.Merge(dst, src) +} +func (m *BytesValue) XXX_Size() int { + return xxx_messageInfo_BytesValue.Size(m) +} +func (m *BytesValue) XXX_DiscardUnknown() { + xxx_messageInfo_BytesValue.DiscardUnknown(m) } -func (m *BytesValue) Reset() { *m = BytesValue{} } -func (m *BytesValue) String() string { return proto.CompactTextString(m) } -func (*BytesValue) ProtoMessage() {} -func (*BytesValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } -func (*BytesValue) XXX_WellKnownType() string { return "BytesValue" } +var xxx_messageInfo_BytesValue proto.InternalMessageInfo func (m *BytesValue) GetValue() []byte { if m != nil { @@ -236,9 +417,11 @@ func init() { proto.RegisterType((*BytesValue)(nil), "google.protobuf.BytesValue") } -func init() { proto.RegisterFile("google/protobuf/wrappers.proto", fileDescriptor0) } +func init() { + proto.RegisterFile("google/protobuf/wrappers.proto", fileDescriptor_wrappers_16c7c35c009f3253) +} -var fileDescriptor0 = []byte{ +var fileDescriptor_wrappers_16c7c35c009f3253 = []byte{ // 259 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x2f, 0x4a, 0x2c, diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/y/file_dsync.go b/sessiondb/badger/vendor/github.com/dgraph-io/badger/y/file_dsync.go index bd25c9e..3f3445e 100644 --- a/sessiondb/badger/vendor/github.com/dgraph-io/badger/y/file_dsync.go +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/y/file_dsync.go @@ -1,4 +1,4 @@ -// +build !dragonfly,!freebsd,!windows,!darwin +// +build !dragonfly,!freebsd,!windows /* * Copyright 2017 Dgraph Labs, Inc. and Contributors @@ -18,8 +18,8 @@ package y -import "syscall" +import "golang.org/x/sys/unix" func init() { - datasyncFileFlag = syscall.O_DSYNC + datasyncFileFlag = unix.O_DSYNC } diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/y/file_nodsync.go b/sessiondb/badger/vendor/github.com/dgraph-io/badger/y/file_nodsync.go index be295f2..b68be7a 100644 --- a/sessiondb/badger/vendor/github.com/dgraph-io/badger/y/file_nodsync.go +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/y/file_nodsync.go @@ -1,4 +1,4 @@ -// +build dragonfly freebsd windows darwin +// +build dragonfly freebsd windows /* * Copyright 2017 Dgraph Labs, Inc. and Contributors diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/y/watermark.go b/sessiondb/badger/vendor/github.com/dgraph-io/badger/y/watermark.go new file mode 100644 index 0000000..6fd3c89 --- /dev/null +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/y/watermark.go @@ -0,0 +1,130 @@ +/* + * Copyright 2018 Dgraph Labs, Inc. and Contributors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package y + +import ( + "container/heap" + "sync/atomic" + + "golang.org/x/net/trace" +) + +type uint64Heap []uint64 + +func (u uint64Heap) Len() int { return len(u) } +func (u uint64Heap) Less(i int, j int) bool { return u[i] < u[j] } +func (u uint64Heap) Swap(i int, j int) { u[i], u[j] = u[j], u[i] } +func (u *uint64Heap) Push(x interface{}) { *u = append(*u, x.(uint64)) } +func (u *uint64Heap) Pop() interface{} { + old := *u + n := len(old) + x := old[n-1] + *u = old[0 : n-1] + return x +} + +type mark struct { + readTs uint64 + done bool // Set to true if the pending mutation is done. +} +type WaterMark struct { + markCh chan mark + minReadTs uint64 + elog trace.EventLog +} + +// Init initializes a WaterMark struct. MUST be called before using it. +func (w *WaterMark) Init() { + w.markCh = make(chan mark, 1000) + w.elog = trace.NewEventLog("Badger", "MinReadTs") + go w.process() +} + +func (w *WaterMark) Begin(readTs uint64) { + w.markCh <- mark{readTs: readTs, done: false} +} +func (w *WaterMark) Done(readTs uint64) { + w.markCh <- mark{readTs: readTs, done: true} +} + +// DoneUntil returns the maximum index until which all tasks are done. +func (w *WaterMark) MinReadTs() uint64 { + return atomic.LoadUint64(&w.minReadTs) +} + +// process is used to process the Mark channel. This is not thread-safe, +// so only run one goroutine for process. One is sufficient, because +// all ops in the goroutine use only memory and cpu. +func (w *WaterMark) process() { + var reads uint64Heap + // pending maps raft proposal index to the number of pending mutations for this proposal. + pending := make(map[uint64]int) + + heap.Init(&reads) + var loop uint64 + + processOne := func(readTs uint64, done bool) { + // If not already done, then set. Otherwise, don't undo a done entry. + prev, present := pending[readTs] + if !present { + heap.Push(&reads, readTs) + } + + delta := 1 + if done { + delta = -1 + } + pending[readTs] = prev + delta + + loop++ + if len(reads) > 0 && loop%1000 == 0 { + min := reads[0] + w.elog.Printf("ReadTs: %4d. Size: %4d MinReadTs: %-4d Looking for: %-4d. Value: %d\n", + readTs, len(reads), w.MinReadTs(), min, pending[min]) + } + + // Update mark by going through all reads in order; and checking if they have + // been done. Stop at the first readTs, which isn't done. + minReadTs := w.MinReadTs() + // Don't assert that minReadTs < readTs, to avoid any inconsistencies caused by managed + // transactions, or testing where we explicitly set the readTs for transactions like in + // TestTxnVersions. + until := minReadTs + loops := 0 + + for len(reads) > 0 { + min := reads[0] + if done := pending[min]; done != 0 { + break // len(reads) will be > 0. + } + heap.Pop(&reads) + delete(pending, min) + until = min + loops++ + } + if until != minReadTs { + AssertTrue(atomic.CompareAndSwapUint64(&w.minReadTs, minReadTs, until)) + w.elog.Printf("MinReadTs: %d. Loops: %d\n", until, loops) + } + } + + for mark := range w.markCh { + if mark.readTs > 0 { + processOne(mark.readTs, mark.done) + } + } +} diff --git a/sessiondb/badger/vendor/github.com/dgraph-io/badger/y/y.go b/sessiondb/badger/vendor/github.com/dgraph-io/badger/y/y.go index 20a8ea5..5927fd3 100644 --- a/sessiondb/badger/vendor/github.com/dgraph-io/badger/y/y.go +++ b/sessiondb/badger/vendor/github.com/dgraph-io/badger/y/y.go @@ -31,6 +31,15 @@ import ( // and encountering the end of slice. var ErrEOF = errors.New("End of mapped region") +const ( + // Sync indicates that O_DSYNC should be set on the underlying file, + // ensuring that data writes do not return until the data is flushed + // to disk. + Sync = 1 << iota + // ReadOnly opens the underlying file on a read-only basis. + ReadOnly +) + var ( // This is O_DSYNC (datasync) on platforms that support it -- see file_unix.go datasyncFileFlag = 0x0 @@ -39,13 +48,17 @@ var ( CastagnoliCrcTable = crc32.MakeTable(crc32.Castagnoli) ) -// OpenExistingSyncedFile opens an existing file, errors if it doesn't exist. -func OpenExistingSyncedFile(filename string, sync bool) (*os.File, error) { - flags := os.O_RDWR - if sync { - flags |= datasyncFileFlag +// OpenExistingFile opens an existing file, errors if it doesn't exist. +func OpenExistingFile(filename string, flags uint32) (*os.File, error) { + openFlags := os.O_RDWR + if flags&ReadOnly != 0 { + openFlags = os.O_RDONLY + } + + if flags&Sync != 0 { + openFlags |= datasyncFileFlag } - return os.OpenFile(filename, flags, 0) + return os.OpenFile(filename, openFlags, 0) } // CreateSyncedFile creates a new file (using O_EXCL), errors if it already existed. diff --git a/sessiondb/boltdb/database.go b/sessiondb/boltdb/database.go new file mode 100644 index 0000000..71ccec6 --- /dev/null +++ b/sessiondb/boltdb/database.go @@ -0,0 +1,361 @@ +package boltdb + +import ( + "errors" + "log" + "os" + "path/filepath" + "runtime" + "time" + + "github.com/kataras/go-sessions" + + bolt "github.com/etcd-io/bbolt" +) + +// DefaultFileMode used as the default database's "fileMode" +// for creating the sessions directory path, opening and write +// the session boltdb(file-based) storage. +var ( + DefaultFileMode = 0755 +) + +// Database the BoltDB(file-based) session storage. +type Database struct { + table []byte + // Service is the underline BoltDB database connection, + // it's initialized at `New` or `NewFromDB`. + // Can be used to get stats. + Service *bolt.DB +} + +var errPathMissing = errors.New("path is required") + +// New creates and returns a new BoltDB(file-based) storage +// instance based on the "path". +// Path should include the filename and the directory(aka fullpath), i.e sessions/store.db. +// +// It will remove any old session files. +func New(path string, fileMode os.FileMode) (*Database, error) { + if path == "" { + return nil, errPathMissing + } + + if fileMode <= 0 { + fileMode = os.FileMode(DefaultFileMode) + } + + // create directories if necessary + if err := os.MkdirAll(filepath.Dir(path), fileMode); err != nil { + return nil, err + } + + service, err := bolt.Open(path, fileMode, + &bolt.Options{Timeout: 20 * time.Second}, + ) + + if err != nil { + return nil, err + } + + return NewFromDB(service, "sessions") +} + +// NewFromDB same as `New` but accepts an already-created custom boltdb connection instead. +func NewFromDB(service *bolt.DB, bucketName string) (*Database, error) { + bucket := []byte(bucketName) + + service.Update(func(tx *bolt.Tx) (err error) { + _, err = tx.CreateBucketIfNotExists(bucket) + return + }) + + db := &Database{table: bucket, Service: service} + + runtime.SetFinalizer(db, closeDB) + return db, db.cleanup() +} + +func (db *Database) getBucket(tx *bolt.Tx) *bolt.Bucket { + return tx.Bucket(db.table) +} + +func (db *Database) getBucketForSession(tx *bolt.Tx, sid string) *bolt.Bucket { + b := db.getBucket(tx).Bucket([]byte(sid)) + if b == nil { + // session does not exist, it shouldn't happen, session bucket creation happens once at `Acquire`, + // no need to accept the `bolt.bucket.CreateBucketIfNotExists`'s performance cost. + return nil + } + + return b +} + +var ( + expirationBucketName = []byte("expiration") + delim = []byte("_") +) + +// expiration lives on its own bucket for each session bucket. +func getExpirationBucketName(bsid []byte) []byte { + return append(bsid, append(delim, expirationBucketName...)...) +} + +// Cleanup removes any invalid(have expired) session entries on initialization. +func (db *Database) cleanup() error { + return db.Service.Update(func(tx *bolt.Tx) error { + b := db.getBucket(tx) + c := b.Cursor() + // loop through all buckets, find one with expiration. + for bsid, v := c.First(); bsid != nil; bsid, v = c.Next() { + if len(bsid) == 0 { // empty key, continue to the next session bucket. + continue + } + + expirationName := getExpirationBucketName(bsid) + if bExp := b.Bucket(expirationName); bExp != nil { // has expiration. + _, expValue := bExp.Cursor().First() // the expiration bucket contains only one key(we don't care, see `Acquire`) value(time.Time) pair. + if expValue == nil { + log.Printf("cleanup: expiration is there but its value is empty '%s'\n", v) // should never happen. + continue + } + + var expirationTime time.Time + if err := sessions.DefaultTranscoder.Unmarshal(expValue, &expirationTime); err != nil { + log.Printf("cleanup: unable to retrieve expiration value for '%s'\n", v) + continue + } + + if expirationTime.Before(time.Now()) { + // expired, delete the expiration bucket. + if err := b.DeleteBucket(expirationName); err != nil { + log.Printf("cleanup: unable to destroy a session '%s'\n", bsid) + return err + } + + // and the session bucket, if any. + return b.DeleteBucket(bsid) + } + } + } + + return nil + }) +} + +var expirationKey = []byte("exp") // it can be random. + +// Acquire receives a session's lifetime from the database, +// if the return value is LifeTime{} then the session manager sets the life time based on the expiration duration lives in configuration. +func (db *Database) Acquire(sid string, expires time.Duration) (lifetime sessions.LifeTime) { + bsid := []byte(sid) + err := db.Service.Update(func(tx *bolt.Tx) (err error) { + root := db.getBucket(tx) + + if expires > 0 { // should check or create the expiration bucket. + name := getExpirationBucketName(bsid) + b := root.Bucket(name) + if b == nil { + // not found, create a session bucket and an expiration bucket and save the given "expires" of time.Time, + // don't return a lifetime, let it empty, session manager will do its job. + b, err = root.CreateBucket(name) + if err != nil { + return err + } + + expirationTime := time.Now().Add(expires) + timeBytes, err := sessions.DefaultTranscoder.Marshal(expirationTime) + if err != nil { + return err + } + + if err := b.Put(expirationKey, timeBytes); err == nil { + // create the session bucket now, so the rest of the calls can be easly get the bucket without any further checks. + _, err = root.CreateBucket(bsid) + } + + return err + } + + // found, get the associated expiration bucket, wrap its value and return. + _, expValue := b.Cursor().First() + if expValue == nil { + return nil // does not expire. + } + + var expirationTime time.Time + if err = sessions.DefaultTranscoder.Unmarshal(expValue, &expirationTime); err != nil { + return + } + + lifetime = sessions.LifeTime{Time: expirationTime} + return nil + } + + // does not expire, just create the session bucket if not exists so we can be ready later on. + _, err = root.CreateBucketIfNotExists(bsid) + return + }) + + if err != nil { + return sessions.LifeTime{Time: sessions.CookieExpireDelete} + } + + return +} + +// OnUpdateExpiration will re-set the database's session's entry ttl. +func (db *Database) OnUpdateExpiration(sid string, newExpires time.Duration) error { + expirationTime := time.Now().Add(newExpires) + timeBytes, err := sessions.DefaultTranscoder.Marshal(expirationTime) + if err != nil { + return err + } + + return db.Service.Update(func(tx *bolt.Tx) error { + expirationName := getExpirationBucketName([]byte(sid)) + root := db.getBucket(tx) + b := root.Bucket(expirationName) + if b == nil { + // golog.Debugf("tried to reset the expiration value for '%s' while its configured lifetime is unlimited or the session is already expired and not found now", sid) + return sessions.ErrNotFound + } + + return b.Put(expirationKey, timeBytes) + }) +} + +func makeKey(key string) []byte { + return []byte(key) +} + +// Set sets a key value of a specific session. +// Ignore the "immutable". +func (db *Database) Set(sid string, lifetime sessions.LifeTime, key string, value interface{}, immutable bool) { + valueBytes, err := sessions.DefaultTranscoder.Marshal(value) + if err != nil { + return + } + + db.Service.Update(func(tx *bolt.Tx) error { + b := db.getBucketForSession(tx, sid) + if b == nil { + return nil + } + + // Author's notes: + // expiration is handlded by the session manager for the whole session, so the `db.Destroy` will be called when and if needed. + // Therefore we don't have to implement a TTL here, but we need a `db.Cleanup`, as we did previously, method to delete any expired if server restarted + // (badger does not need a `Cleanup` because we set the TTL based on the lifetime.DurationUntilExpiration()). + return b.Put(makeKey(key), valueBytes) + }) +} + +// Get retrieves a session value based on the key. +func (db *Database) Get(sid string, key string) (value interface{}) { + db.Service.View(func(tx *bolt.Tx) error { + b := db.getBucketForSession(tx, sid) + if b == nil { + return nil + } + + valueBytes := b.Get(makeKey(key)) + if len(valueBytes) == 0 { + return nil + } + + return sessions.DefaultTranscoder.Unmarshal(valueBytes, &value) + }) + + return +} + +// Visit loops through all session keys and values. +func (db *Database) Visit(sid string, cb func(key string, value interface{})) { + db.Service.View(func(tx *bolt.Tx) error { + b := db.getBucketForSession(tx, sid) + if b == nil { + return nil + } + + return b.ForEach(func(k []byte, v []byte) error { + var value interface{} + if err := sessions.DefaultTranscoder.Unmarshal(v, &value); err != nil { + return err + } + + cb(string(k), value) + return nil + }) + }) +} + +// Len returns the length of the session's entries (keys). +func (db *Database) Len(sid string) (n int) { + db.Service.View(func(tx *bolt.Tx) error { + b := db.getBucketForSession(tx, sid) + if b == nil { + return nil + } + + n = b.Stats().KeyN + return nil + }) + + return +} + +// Delete removes a session key value based on its key. +func (db *Database) Delete(sid string, key string) (deleted bool) { + err := db.Service.Update(func(tx *bolt.Tx) error { + b := db.getBucketForSession(tx, sid) + if b == nil { + return sessions.ErrNotFound + } + + return b.Delete(makeKey(key)) + }) + + return err == nil +} + +// Clear removes all session key values but it keeps the session entry. +func (db *Database) Clear(sid string) { + db.Service.Update(func(tx *bolt.Tx) error { + b := db.getBucketForSession(tx, sid) + if b == nil { + return nil + } + + return b.ForEach(func(k []byte, v []byte) error { + return b.Delete(k) + }) + }) +} + +// Release destroys the session, it clears and removes the session entry, +// session manager will create a new session ID on the next request after this call. +func (db *Database) Release(sid string) { + db.Service.Update(func(tx *bolt.Tx) error { + // delete the session bucket. + b := db.getBucket(tx) + bsid := []byte(sid) + // try to delete the associated expiration bucket, if exists, ignore error. + b.DeleteBucket(getExpirationBucketName(bsid)) + + if err := b.DeleteBucket(bsid); err != nil { + return err + } + + return nil + }) +} + +// Close shutdowns the BoltDB connection. +func (db *Database) Close() error { + return closeDB(db) +} + +func closeDB(db *Database) error { + return db.Service.Close() +} diff --git a/sessiondb/boltdb/vendor/github.com/etcd-io/bbolt/LICENSE b/sessiondb/boltdb/vendor/github.com/etcd-io/bbolt/LICENSE new file mode 100644 index 0000000..004e77f --- /dev/null +++ b/sessiondb/boltdb/vendor/github.com/etcd-io/bbolt/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2013 Ben Johnson + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/sessiondb/boltdb/vendor/github.com/etcd-io/bbolt/bolt_386.go b/sessiondb/boltdb/vendor/github.com/etcd-io/bbolt/bolt_386.go new file mode 100644 index 0000000..4d35ee7 --- /dev/null +++ b/sessiondb/boltdb/vendor/github.com/etcd-io/bbolt/bolt_386.go @@ -0,0 +1,10 @@ +package bbolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0x7FFFFFFF // 2GB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0xFFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/sessiondb/boltdb/vendor/github.com/etcd-io/bbolt/bolt_amd64.go b/sessiondb/boltdb/vendor/github.com/etcd-io/bbolt/bolt_amd64.go new file mode 100644 index 0000000..60a52da --- /dev/null +++ b/sessiondb/boltdb/vendor/github.com/etcd-io/bbolt/bolt_amd64.go @@ -0,0 +1,10 @@ +package bbolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/sessiondb/boltdb/vendor/github.com/etcd-io/bbolt/bolt_arm.go b/sessiondb/boltdb/vendor/github.com/etcd-io/bbolt/bolt_arm.go new file mode 100644 index 0000000..105d27d --- /dev/null +++ b/sessiondb/boltdb/vendor/github.com/etcd-io/bbolt/bolt_arm.go @@ -0,0 +1,28 @@ +package bbolt + +import "unsafe" + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0x7FFFFFFF // 2GB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0xFFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned bool + +func init() { + // Simple check to see whether this arch handles unaligned load/stores + // correctly. + + // ARM9 and older devices require load/stores to be from/to aligned + // addresses. If not, the lower 2 bits are cleared and that address is + // read in a jumbled up order. + + // See http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.faqs/ka15414.html + + raw := [6]byte{0xfe, 0xef, 0x11, 0x22, 0x22, 0x11} + val := *(*uint32)(unsafe.Pointer(uintptr(unsafe.Pointer(&raw)) + 2)) + + brokenUnaligned = val != 0x11222211 +} diff --git a/sessiondb/boltdb/vendor/github.com/etcd-io/bbolt/bolt_arm64.go b/sessiondb/boltdb/vendor/github.com/etcd-io/bbolt/bolt_arm64.go new file mode 100644 index 0000000..f5aa2a5 --- /dev/null +++ b/sessiondb/boltdb/vendor/github.com/etcd-io/bbolt/bolt_arm64.go @@ -0,0 +1,12 @@ +// +build arm64 + +package bbolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/sessiondb/boltdb/vendor/github.com/etcd-io/bbolt/bolt_linux.go b/sessiondb/boltdb/vendor/github.com/etcd-io/bbolt/bolt_linux.go new file mode 100644 index 0000000..7707bca --- /dev/null +++ b/sessiondb/boltdb/vendor/github.com/etcd-io/bbolt/bolt_linux.go @@ -0,0 +1,10 @@ +package bbolt + +import ( + "syscall" +) + +// fdatasync flushes written data to a file descriptor. +func fdatasync(db *DB) error { + return syscall.Fdatasync(int(db.file.Fd())) +} diff --git a/sessiondb/boltdb/vendor/github.com/etcd-io/bbolt/bolt_mips64x.go b/sessiondb/boltdb/vendor/github.com/etcd-io/bbolt/bolt_mips64x.go new file mode 100644 index 0000000..baeb289 --- /dev/null +++ b/sessiondb/boltdb/vendor/github.com/etcd-io/bbolt/bolt_mips64x.go @@ -0,0 +1,12 @@ +// +build mips64 mips64le + +package bbolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0x8000000000 // 512GB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/sessiondb/boltdb/vendor/github.com/etcd-io/bbolt/bolt_mipsx.go b/sessiondb/boltdb/vendor/github.com/etcd-io/bbolt/bolt_mipsx.go new file mode 100644 index 0000000..2d9b1a9 --- /dev/null +++ b/sessiondb/boltdb/vendor/github.com/etcd-io/bbolt/bolt_mipsx.go @@ -0,0 +1,12 @@ +// +build mips mipsle + +package bbolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0x40000000 // 1GB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0xFFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/sessiondb/boltdb/vendor/github.com/etcd-io/bbolt/bolt_openbsd.go b/sessiondb/boltdb/vendor/github.com/etcd-io/bbolt/bolt_openbsd.go new file mode 100644 index 0000000..d7f5035 --- /dev/null +++ b/sessiondb/boltdb/vendor/github.com/etcd-io/bbolt/bolt_openbsd.go @@ -0,0 +1,27 @@ +package bbolt + +import ( + "syscall" + "unsafe" +) + +const ( + msAsync = 1 << iota // perform asynchronous writes + msSync // perform synchronous writes + msInvalidate // invalidate cached data +) + +func msync(db *DB) error { + _, _, errno := syscall.Syscall(syscall.SYS_MSYNC, uintptr(unsafe.Pointer(db.data)), uintptr(db.datasz), msInvalidate) + if errno != 0 { + return errno + } + return nil +} + +func fdatasync(db *DB) error { + if db.data != nil { + return msync(db) + } + return db.file.Sync() +} diff --git a/sessiondb/boltdb/vendor/github.com/etcd-io/bbolt/bolt_ppc.go b/sessiondb/boltdb/vendor/github.com/etcd-io/bbolt/bolt_ppc.go new file mode 100644 index 0000000..6980471 --- /dev/null +++ b/sessiondb/boltdb/vendor/github.com/etcd-io/bbolt/bolt_ppc.go @@ -0,0 +1,12 @@ +// +build ppc + +package bbolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0x7FFFFFFF // 2GB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0xFFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/sessiondb/boltdb/vendor/github.com/etcd-io/bbolt/bolt_ppc64.go b/sessiondb/boltdb/vendor/github.com/etcd-io/bbolt/bolt_ppc64.go new file mode 100644 index 0000000..3565908 --- /dev/null +++ b/sessiondb/boltdb/vendor/github.com/etcd-io/bbolt/bolt_ppc64.go @@ -0,0 +1,12 @@ +// +build ppc64 + +package bbolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/sessiondb/boltdb/vendor/github.com/etcd-io/bbolt/bolt_ppc64le.go b/sessiondb/boltdb/vendor/github.com/etcd-io/bbolt/bolt_ppc64le.go new file mode 100644 index 0000000..422c7c6 --- /dev/null +++ b/sessiondb/boltdb/vendor/github.com/etcd-io/bbolt/bolt_ppc64le.go @@ -0,0 +1,12 @@ +// +build ppc64le + +package bbolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/sessiondb/boltdb/vendor/github.com/etcd-io/bbolt/bolt_s390x.go b/sessiondb/boltdb/vendor/github.com/etcd-io/bbolt/bolt_s390x.go new file mode 100644 index 0000000..6d3fcb8 --- /dev/null +++ b/sessiondb/boltdb/vendor/github.com/etcd-io/bbolt/bolt_s390x.go @@ -0,0 +1,12 @@ +// +build s390x + +package bbolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/sessiondb/boltdb/vendor/github.com/etcd-io/bbolt/bolt_unix.go b/sessiondb/boltdb/vendor/github.com/etcd-io/bbolt/bolt_unix.go new file mode 100644 index 0000000..38e11d4 --- /dev/null +++ b/sessiondb/boltdb/vendor/github.com/etcd-io/bbolt/bolt_unix.go @@ -0,0 +1,94 @@ +// +build !windows,!plan9,!solaris + +package bbolt + +import ( + "fmt" + "os" + "syscall" + "time" + "unsafe" +) + +// flock acquires an advisory lock on a file descriptor. +func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error { + var t time.Time + if timeout != 0 { + t = time.Now() + } + fd := db.file.Fd() + flag := syscall.LOCK_NB + if exclusive { + flag |= syscall.LOCK_EX + } else { + flag |= syscall.LOCK_SH + } + for { + // Attempt to obtain an exclusive lock. + err := syscall.Flock(int(fd), flag) + if err == nil { + return nil + } else if err != syscall.EWOULDBLOCK { + return err + } + + // If we timed out then return an error. + if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout { + return ErrTimeout + } + + // Wait for a bit and try again. + time.Sleep(flockRetryTimeout) + } +} + +// funlock releases an advisory lock on a file descriptor. +func funlock(db *DB) error { + return syscall.Flock(int(db.file.Fd()), syscall.LOCK_UN) +} + +// mmap memory maps a DB's data file. +func mmap(db *DB, sz int) error { + // Map the data file to memory. + b, err := syscall.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags) + if err != nil { + return err + } + + // Advise the kernel that the mmap is accessed randomly. + err = madvise(b, syscall.MADV_RANDOM) + if err != nil && err != syscall.ENOSYS { + // Ignore not implemented error in kernel because it still works. + return fmt.Errorf("madvise: %s", err) + } + + // Save the original byte slice and convert to a byte array pointer. + db.dataref = b + db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0])) + db.datasz = sz + return nil +} + +// munmap unmaps a DB's data file from memory. +func munmap(db *DB) error { + // Ignore the unmap if we have no mapped data. + if db.dataref == nil { + return nil + } + + // Unmap using the original byte slice. + err := syscall.Munmap(db.dataref) + db.dataref = nil + db.data = nil + db.datasz = 0 + return err +} + +// NOTE: This function is copied from stdlib because it is not available on darwin. +func madvise(b []byte, advice int) (err error) { + _, _, e1 := syscall.Syscall(syscall.SYS_MADVISE, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), uintptr(advice)) + if e1 != 0 { + err = e1 + } + return +} diff --git a/sessiondb/boltdb/vendor/github.com/etcd-io/bbolt/bolt_unix_solaris.go b/sessiondb/boltdb/vendor/github.com/etcd-io/bbolt/bolt_unix_solaris.go new file mode 100644 index 0000000..492eaf3 --- /dev/null +++ b/sessiondb/boltdb/vendor/github.com/etcd-io/bbolt/bolt_unix_solaris.go @@ -0,0 +1,89 @@ +package bbolt + +import ( + "fmt" + "os" + "syscall" + "time" + "unsafe" + + "golang.org/x/sys/unix" +) + +// flock acquires an advisory lock on a file descriptor. +func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error { + var t time.Time + if timeout != 0 { + t = time.Now() + } + fd := db.file.Fd() + var lockType int16 + if exclusive { + lockType = syscall.F_WRLCK + } else { + lockType = syscall.F_RDLCK + } + for { + // Attempt to obtain an exclusive lock. + lock := syscall.Flock_t{Type: lockType} + err := syscall.FcntlFlock(fd, syscall.F_SETLK, &lock) + if err == nil { + return nil + } else if err != syscall.EAGAIN { + return err + } + + // If we timed out then return an error. + if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout { + return ErrTimeout + } + + // Wait for a bit and try again. + time.Sleep(flockRetryTimeout) + } +} + +// funlock releases an advisory lock on a file descriptor. +func funlock(db *DB) error { + var lock syscall.Flock_t + lock.Start = 0 + lock.Len = 0 + lock.Type = syscall.F_UNLCK + lock.Whence = 0 + return syscall.FcntlFlock(uintptr(db.file.Fd()), syscall.F_SETLK, &lock) +} + +// mmap memory maps a DB's data file. +func mmap(db *DB, sz int) error { + // Map the data file to memory. + b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags) + if err != nil { + return err + } + + // Advise the kernel that the mmap is accessed randomly. + if err := unix.Madvise(b, syscall.MADV_RANDOM); err != nil { + return fmt.Errorf("madvise: %s", err) + } + + // Save the original byte slice and convert to a byte array pointer. + db.dataref = b + db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0])) + db.datasz = sz + return nil +} + +// munmap unmaps a DB's data file from memory. +func munmap(db *DB) error { + // Ignore the unmap if we have no mapped data. + if db.dataref == nil { + return nil + } + + // Unmap using the original byte slice. + err := unix.Munmap(db.dataref) + db.dataref = nil + db.data = nil + db.datasz = 0 + return err +} diff --git a/sessiondb/boltdb/vendor/github.com/etcd-io/bbolt/bolt_windows.go b/sessiondb/boltdb/vendor/github.com/etcd-io/bbolt/bolt_windows.go new file mode 100644 index 0000000..4e3f90f --- /dev/null +++ b/sessiondb/boltdb/vendor/github.com/etcd-io/bbolt/bolt_windows.go @@ -0,0 +1,145 @@ +package bbolt + +import ( + "fmt" + "os" + "syscall" + "time" + "unsafe" +) + +// LockFileEx code derived from golang build filemutex_windows.go @ v1.5.1 +var ( + modkernel32 = syscall.NewLazyDLL("kernel32.dll") + procLockFileEx = modkernel32.NewProc("LockFileEx") + procUnlockFileEx = modkernel32.NewProc("UnlockFileEx") +) + +const ( + lockExt = ".lock" + + // see https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx + flagLockExclusive = 2 + flagLockFailImmediately = 1 + + // see https://msdn.microsoft.com/en-us/library/windows/desktop/ms681382(v=vs.85).aspx + errLockViolation syscall.Errno = 0x21 +) + +func lockFileEx(h syscall.Handle, flags, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) { + r, _, err := procLockFileEx.Call(uintptr(h), uintptr(flags), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol))) + if r == 0 { + return err + } + return nil +} + +func unlockFileEx(h syscall.Handle, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) { + r, _, err := procUnlockFileEx.Call(uintptr(h), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)), 0) + if r == 0 { + return err + } + return nil +} + +// fdatasync flushes written data to a file descriptor. +func fdatasync(db *DB) error { + return db.file.Sync() +} + +// flock acquires an advisory lock on a file descriptor. +func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error { + // Create a separate lock file on windows because a process + // cannot share an exclusive lock on the same file. This is + // needed during Tx.WriteTo(). + f, err := os.OpenFile(db.path+lockExt, os.O_CREATE, mode) + if err != nil { + return err + } + db.lockfile = f + + var t time.Time + if timeout != 0 { + t = time.Now() + } + fd := f.Fd() + var flag uint32 = flagLockFailImmediately + if exclusive { + flag |= flagLockExclusive + } + for { + // Attempt to obtain an exclusive lock. + err := lockFileEx(syscall.Handle(fd), flag, 0, 1, 0, &syscall.Overlapped{}) + if err == nil { + return nil + } else if err != errLockViolation { + return err + } + + // If we timed oumercit then return an error. + if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout { + return ErrTimeout + } + + // Wait for a bit and try again. + time.Sleep(flockRetryTimeout) + } +} + +// funlock releases an advisory lock on a file descriptor. +func funlock(db *DB) error { + err := unlockFileEx(syscall.Handle(db.lockfile.Fd()), 0, 1, 0, &syscall.Overlapped{}) + db.lockfile.Close() + os.Remove(db.path + lockExt) + return err +} + +// mmap memory maps a DB's data file. +// Based on: https://github.com/edsrzf/mmap-go +func mmap(db *DB, sz int) error { + if !db.readOnly { + // Truncate the database to the size of the mmap. + if err := db.file.Truncate(int64(sz)); err != nil { + return fmt.Errorf("truncate: %s", err) + } + } + + // Open a file mapping handle. + sizelo := uint32(sz >> 32) + sizehi := uint32(sz) & 0xffffffff + h, errno := syscall.CreateFileMapping(syscall.Handle(db.file.Fd()), nil, syscall.PAGE_READONLY, sizelo, sizehi, nil) + if h == 0 { + return os.NewSyscallError("CreateFileMapping", errno) + } + + // Create the memory map. + addr, errno := syscall.MapViewOfFile(h, syscall.FILE_MAP_READ, 0, 0, uintptr(sz)) + if addr == 0 { + return os.NewSyscallError("MapViewOfFile", errno) + } + + // Close mapping handle. + if err := syscall.CloseHandle(syscall.Handle(h)); err != nil { + return os.NewSyscallError("CloseHandle", err) + } + + // Convert to a byte array. + db.data = ((*[maxMapSize]byte)(unsafe.Pointer(addr))) + db.datasz = sz + + return nil +} + +// munmap unmaps a pointer from a file. +// Based on: https://github.com/edsrzf/mmap-go +func munmap(db *DB) error { + if db.data == nil { + return nil + } + + addr := (uintptr)(unsafe.Pointer(&db.data[0])) + if err := syscall.UnmapViewOfFile(addr); err != nil { + return os.NewSyscallError("UnmapViewOfFile", err) + } + return nil +} diff --git a/sessiondb/boltdb/vendor/github.com/etcd-io/bbolt/boltsync_unix.go b/sessiondb/boltdb/vendor/github.com/etcd-io/bbolt/boltsync_unix.go new file mode 100644 index 0000000..9587afe --- /dev/null +++ b/sessiondb/boltdb/vendor/github.com/etcd-io/bbolt/boltsync_unix.go @@ -0,0 +1,8 @@ +// +build !windows,!plan9,!linux,!openbsd + +package bbolt + +// fdatasync flushes written data to a file descriptor. +func fdatasync(db *DB) error { + return db.file.Sync() +} diff --git a/sessiondb/boltdb/vendor/github.com/etcd-io/bbolt/bucket.go b/sessiondb/boltdb/vendor/github.com/etcd-io/bbolt/bucket.go new file mode 100644 index 0000000..84bfd4d --- /dev/null +++ b/sessiondb/boltdb/vendor/github.com/etcd-io/bbolt/bucket.go @@ -0,0 +1,775 @@ +package bbolt + +import ( + "bytes" + "fmt" + "unsafe" +) + +const ( + // MaxKeySize is the maximum length of a key, in bytes. + MaxKeySize = 32768 + + // MaxValueSize is the maximum length of a value, in bytes. + MaxValueSize = (1 << 31) - 2 +) + +const bucketHeaderSize = int(unsafe.Sizeof(bucket{})) + +const ( + minFillPercent = 0.1 + maxFillPercent = 1.0 +) + +// DefaultFillPercent is the percentage that split pages are filled. +// This value can be changed by setting Bucket.FillPercent. +const DefaultFillPercent = 0.5 + +// Bucket represents a collection of key/value pairs inside the database. +type Bucket struct { + *bucket + tx *Tx // the associated transaction + buckets map[string]*Bucket // subbucket cache + page *page // inline page reference + rootNode *node // materialized node for the root page. + nodes map[pgid]*node // node cache + + // Sets the threshold for filling nodes when they split. By default, + // the bucket will fill to 50% but it can be useful to increase this + // amount if you know that your write workloads are mostly append-only. + // + // This is non-persisted across transactions so it must be set in every Tx. + FillPercent float64 +} + +// bucket represents the on-file representation of a bucket. +// This is stored as the "value" of a bucket key. If the bucket is small enough, +// then its root page can be stored inline in the "value", after the bucket +// header. In the case of inline buckets, the "root" will be 0. +type bucket struct { + root pgid // page id of the bucket's root-level page + sequence uint64 // monotonically incrementing, used by NextSequence() +} + +// newBucket returns a new bucket associated with a transaction. +func newBucket(tx *Tx) Bucket { + var b = Bucket{tx: tx, FillPercent: DefaultFillPercent} + if tx.writable { + b.buckets = make(map[string]*Bucket) + b.nodes = make(map[pgid]*node) + } + return b +} + +// Tx returns the tx of the bucket. +func (b *Bucket) Tx() *Tx { + return b.tx +} + +// Root returns the root of the bucket. +func (b *Bucket) Root() pgid { + return b.root +} + +// Writable returns whether the bucket is writable. +func (b *Bucket) Writable() bool { + return b.tx.writable +} + +// Cursor creates a cursor associated with the bucket. +// The cursor is only valid as long as the transaction is open. +// Do not use a cursor after the transaction is closed. +func (b *Bucket) Cursor() *Cursor { + // Update transaction statistics. + b.tx.stats.CursorCount++ + + // Allocate and return a cursor. + return &Cursor{ + bucket: b, + stack: make([]elemRef, 0), + } +} + +// Bucket retrieves a nested bucket by name. +// Returns nil if the bucket does not exist. +// The bucket instance is only valid for the lifetime of the transaction. +func (b *Bucket) Bucket(name []byte) *Bucket { + if b.buckets != nil { + if child := b.buckets[string(name)]; child != nil { + return child + } + } + + // Move cursor to key. + c := b.Cursor() + k, v, flags := c.seek(name) + + // Return nil if the key doesn't exist or it is not a bucket. + if !bytes.Equal(name, k) || (flags&bucketLeafFlag) == 0 { + return nil + } + + // Otherwise create a bucket and cache it. + var child = b.openBucket(v) + if b.buckets != nil { + b.buckets[string(name)] = child + } + + return child +} + +// Helper method that re-interprets a sub-bucket value +// from a parent into a Bucket +func (b *Bucket) openBucket(value []byte) *Bucket { + var child = newBucket(b.tx) + + // If unaligned load/stores are broken on this arch and value is + // unaligned simply clone to an aligned byte array. + unaligned := brokenUnaligned && uintptr(unsafe.Pointer(&value[0]))&3 != 0 + + if unaligned { + value = cloneBytes(value) + } + + // If this is a writable transaction then we need to copy the bucket entry. + // Read-only transactions can point directly at the mmap entry. + if b.tx.writable && !unaligned { + child.bucket = &bucket{} + *child.bucket = *(*bucket)(unsafe.Pointer(&value[0])) + } else { + child.bucket = (*bucket)(unsafe.Pointer(&value[0])) + } + + // Save a reference to the inline page if the bucket is inline. + if child.root == 0 { + child.page = (*page)(unsafe.Pointer(&value[bucketHeaderSize])) + } + + return &child +} + +// CreateBucket creates a new bucket at the given key and returns the new bucket. +// Returns an error if the key already exists, if the bucket name is blank, or if the bucket name is too long. +// The bucket instance is only valid for the lifetime of the transaction. +func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) { + if b.tx.db == nil { + return nil, ErrTxClosed + } else if !b.tx.writable { + return nil, ErrTxNotWritable + } else if len(key) == 0 { + return nil, ErrBucketNameRequired + } + + // Move cursor to correct position. + c := b.Cursor() + k, _, flags := c.seek(key) + + // Return an error if there is an existing key. + if bytes.Equal(key, k) { + if (flags & bucketLeafFlag) != 0 { + return nil, ErrBucketExists + } + return nil, ErrIncompatibleValue + } + + // Create empty, inline bucket. + var bucket = Bucket{ + bucket: &bucket{}, + rootNode: &node{isLeaf: true}, + FillPercent: DefaultFillPercent, + } + var value = bucket.write() + + // Insert into node. + key = cloneBytes(key) + c.node().put(key, key, value, 0, bucketLeafFlag) + + // Since subbuckets are not allowed on inline buckets, we need to + // dereference the inline page, if it exists. This will cause the bucket + // to be treated as a regular, non-inline bucket for the rest of the tx. + b.page = nil + + return b.Bucket(key), nil +} + +// CreateBucketIfNotExists creates a new bucket if it doesn't already exist and returns a reference to it. +// Returns an error if the bucket name is blank, or if the bucket name is too long. +// The bucket instance is only valid for the lifetime of the transaction. +func (b *Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) { + child, err := b.CreateBucket(key) + if err == ErrBucketExists { + return b.Bucket(key), nil + } else if err != nil { + return nil, err + } + return child, nil +} + +// DeleteBucket deletes a bucket at the given key. +// Returns an error if the bucket does not exists, or if the key represents a non-bucket value. +func (b *Bucket) DeleteBucket(key []byte) error { + if b.tx.db == nil { + return ErrTxClosed + } else if !b.Writable() { + return ErrTxNotWritable + } + + // Move cursor to correct position. + c := b.Cursor() + k, _, flags := c.seek(key) + + // Return an error if bucket doesn't exist or is not a bucket. + if !bytes.Equal(key, k) { + return ErrBucketNotFound + } else if (flags & bucketLeafFlag) == 0 { + return ErrIncompatibleValue + } + + // Recursively delete all child buckets. + child := b.Bucket(key) + err := child.ForEach(func(k, v []byte) error { + if v == nil { + if err := child.DeleteBucket(k); err != nil { + return fmt.Errorf("delete bucket: %s", err) + } + } + return nil + }) + if err != nil { + return err + } + + // Remove cached copy. + delete(b.buckets, string(key)) + + // Release all bucket pages to freelist. + child.nodes = nil + child.rootNode = nil + child.free() + + // Delete the node if we have a matching key. + c.node().del(key) + + return nil +} + +// Get retrieves the value for a key in the bucket. +// Returns a nil value if the key does not exist or if the key is a nested bucket. +// The returned value is only valid for the life of the transaction. +func (b *Bucket) Get(key []byte) []byte { + k, v, flags := b.Cursor().seek(key) + + // Return nil if this is a bucket. + if (flags & bucketLeafFlag) != 0 { + return nil + } + + // If our target node isn't the same key as what's passed in then return nil. + if !bytes.Equal(key, k) { + return nil + } + return v +} + +// Put sets the value for a key in the bucket. +// If the key exist then its previous value will be overwritten. +// Supplied value must remain valid for the life of the transaction. +// Returns an error if the bucket was created from a read-only transaction, if the key is blank, if the key is too large, or if the value is too large. +func (b *Bucket) Put(key []byte, value []byte) error { + if b.tx.db == nil { + return ErrTxClosed + } else if !b.Writable() { + return ErrTxNotWritable + } else if len(key) == 0 { + return ErrKeyRequired + } else if len(key) > MaxKeySize { + return ErrKeyTooLarge + } else if int64(len(value)) > MaxValueSize { + return ErrValueTooLarge + } + + // Move cursor to correct position. + c := b.Cursor() + k, _, flags := c.seek(key) + + // Return an error if there is an existing key with a bucket value. + if bytes.Equal(key, k) && (flags&bucketLeafFlag) != 0 { + return ErrIncompatibleValue + } + + // Insert into node. + key = cloneBytes(key) + c.node().put(key, key, value, 0, 0) + + return nil +} + +// Delete removes a key from the bucket. +// If the key does not exist then nothing is done and a nil error is returned. +// Returns an error if the bucket was created from a read-only transaction. +func (b *Bucket) Delete(key []byte) error { + if b.tx.db == nil { + return ErrTxClosed + } else if !b.Writable() { + return ErrTxNotWritable + } + + // Move cursor to correct position. + c := b.Cursor() + k, _, flags := c.seek(key) + + // Return nil if the key doesn't exist. + if !bytes.Equal(key, k) { + return nil + } + + // Return an error if there is already existing bucket value. + if (flags & bucketLeafFlag) != 0 { + return ErrIncompatibleValue + } + + // Delete the node if we have a matching key. + c.node().del(key) + + return nil +} + +// Sequence returns the current integer for the bucket without incrementing it. +func (b *Bucket) Sequence() uint64 { return b.bucket.sequence } + +// SetSequence updates the sequence number for the bucket. +func (b *Bucket) SetSequence(v uint64) error { + if b.tx.db == nil { + return ErrTxClosed + } else if !b.Writable() { + return ErrTxNotWritable + } + + // Materialize the root node if it hasn't been already so that the + // bucket will be saved during commit. + if b.rootNode == nil { + _ = b.node(b.root, nil) + } + + // Increment and return the sequence. + b.bucket.sequence = v + return nil +} + +// NextSequence returns an autoincrementing integer for the bucket. +func (b *Bucket) NextSequence() (uint64, error) { + if b.tx.db == nil { + return 0, ErrTxClosed + } else if !b.Writable() { + return 0, ErrTxNotWritable + } + + // Materialize the root node if it hasn't been already so that the + // bucket will be saved during commit. + if b.rootNode == nil { + _ = b.node(b.root, nil) + } + + // Increment and return the sequence. + b.bucket.sequence++ + return b.bucket.sequence, nil +} + +// ForEach executes a function for each key/value pair in a bucket. +// If the provided function returns an error then the iteration is stopped and +// the error is returned to the caller. The provided function must not modify +// the bucket; this will result in undefined behavior. +func (b *Bucket) ForEach(fn func(k, v []byte) error) error { + if b.tx.db == nil { + return ErrTxClosed + } + c := b.Cursor() + for k, v := c.First(); k != nil; k, v = c.Next() { + if err := fn(k, v); err != nil { + return err + } + } + return nil +} + +// Stat returns stats on a bucket. +func (b *Bucket) Stats() BucketStats { + var s, subStats BucketStats + pageSize := b.tx.db.pageSize + s.BucketN += 1 + if b.root == 0 { + s.InlineBucketN += 1 + } + b.forEachPage(func(p *page, depth int) { + if (p.flags & leafPageFlag) != 0 { + s.KeyN += int(p.count) + + // used totals the used bytes for the page + used := pageHeaderSize + + if p.count != 0 { + // If page has any elements, add all element headers. + used += leafPageElementSize * int(p.count-1) + + // Add all element key, value sizes. + // The computation takes advantage of the fact that the position + // of the last element's key/value equals to the total of the sizes + // of all previous elements' keys and values. + // It also includes the last element's header. + lastElement := p.leafPageElement(p.count - 1) + used += int(lastElement.pos + lastElement.ksize + lastElement.vsize) + } + + if b.root == 0 { + // For inlined bucket just update the inline stats + s.InlineBucketInuse += used + } else { + // For non-inlined bucket update all the leaf stats + s.LeafPageN++ + s.LeafInuse += used + s.LeafOverflowN += int(p.overflow) + + // Collect stats from sub-buckets. + // Do that by iterating over all element headers + // looking for the ones with the bucketLeafFlag. + for i := uint16(0); i < p.count; i++ { + e := p.leafPageElement(i) + if (e.flags & bucketLeafFlag) != 0 { + // For any bucket element, open the element value + // and recursively call Stats on the contained bucket. + subStats.Add(b.openBucket(e.value()).Stats()) + } + } + } + } else if (p.flags & branchPageFlag) != 0 { + s.BranchPageN++ + lastElement := p.branchPageElement(p.count - 1) + + // used totals the used bytes for the page + // Add header and all element headers. + used := pageHeaderSize + (branchPageElementSize * int(p.count-1)) + + // Add size of all keys and values. + // Again, use the fact that last element's position equals to + // the total of key, value sizes of all previous elements. + used += int(lastElement.pos + lastElement.ksize) + s.BranchInuse += used + s.BranchOverflowN += int(p.overflow) + } + + // Keep track of maximum page depth. + if depth+1 > s.Depth { + s.Depth = (depth + 1) + } + }) + + // Alloc stats can be computed from page counts and pageSize. + s.BranchAlloc = (s.BranchPageN + s.BranchOverflowN) * pageSize + s.LeafAlloc = (s.LeafPageN + s.LeafOverflowN) * pageSize + + // Add the max depth of sub-buckets to get total nested depth. + s.Depth += subStats.Depth + // Add the stats for all sub-buckets + s.Add(subStats) + return s +} + +// forEachPage iterates over every page in a bucket, including inline pages. +func (b *Bucket) forEachPage(fn func(*page, int)) { + // If we have an inline page then just use that. + if b.page != nil { + fn(b.page, 0) + return + } + + // Otherwise traverse the page hierarchy. + b.tx.forEachPage(b.root, 0, fn) +} + +// forEachPageNode iterates over every page (or node) in a bucket. +// This also includes inline pages. +func (b *Bucket) forEachPageNode(fn func(*page, *node, int)) { + // If we have an inline page or root node then just use that. + if b.page != nil { + fn(b.page, nil, 0) + return + } + b._forEachPageNode(b.root, 0, fn) +} + +func (b *Bucket) _forEachPageNode(pgid pgid, depth int, fn func(*page, *node, int)) { + var p, n = b.pageNode(pgid) + + // Execute function. + fn(p, n, depth) + + // Recursively loop over children. + if p != nil { + if (p.flags & branchPageFlag) != 0 { + for i := 0; i < int(p.count); i++ { + elem := p.branchPageElement(uint16(i)) + b._forEachPageNode(elem.pgid, depth+1, fn) + } + } + } else { + if !n.isLeaf { + for _, inode := range n.inodes { + b._forEachPageNode(inode.pgid, depth+1, fn) + } + } + } +} + +// spill writes all the nodes for this bucket to dirty pages. +func (b *Bucket) spill() error { + // Spill all child buckets first. + for name, child := range b.buckets { + // If the child bucket is small enough and it has no child buckets then + // write it inline into the parent bucket's page. Otherwise spill it + // like a normal bucket and make the parent value a pointer to the page. + var value []byte + if child.inlineable() { + child.free() + value = child.write() + } else { + if err := child.spill(); err != nil { + return err + } + + // Update the child bucket header in this bucket. + value = make([]byte, unsafe.Sizeof(bucket{})) + var bucket = (*bucket)(unsafe.Pointer(&value[0])) + *bucket = *child.bucket + } + + // Skip writing the bucket if there are no materialized nodes. + if child.rootNode == nil { + continue + } + + // Update parent node. + var c = b.Cursor() + k, _, flags := c.seek([]byte(name)) + if !bytes.Equal([]byte(name), k) { + panic(fmt.Sprintf("misplaced bucket header: %x -> %x", []byte(name), k)) + } + if flags&bucketLeafFlag == 0 { + panic(fmt.Sprintf("unexpected bucket header flag: %x", flags)) + } + c.node().put([]byte(name), []byte(name), value, 0, bucketLeafFlag) + } + + // Ignore if there's not a materialized root node. + if b.rootNode == nil { + return nil + } + + // Spill nodes. + if err := b.rootNode.spill(); err != nil { + return err + } + b.rootNode = b.rootNode.root() + + // Update the root node for this bucket. + if b.rootNode.pgid >= b.tx.meta.pgid { + panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", b.rootNode.pgid, b.tx.meta.pgid)) + } + b.root = b.rootNode.pgid + + return nil +} + +// inlineable returns true if a bucket is small enough to be written inline +// and if it contains no subbuckets. Otherwise returns false. +func (b *Bucket) inlineable() bool { + var n = b.rootNode + + // Bucket must only contain a single leaf node. + if n == nil || !n.isLeaf { + return false + } + + // Bucket is not inlineable if it contains subbuckets or if it goes beyond + // our threshold for inline bucket size. + var size = pageHeaderSize + for _, inode := range n.inodes { + size += leafPageElementSize + len(inode.key) + len(inode.value) + + if inode.flags&bucketLeafFlag != 0 { + return false + } else if size > b.maxInlineBucketSize() { + return false + } + } + + return true +} + +// Returns the maximum total size of a bucket to make it a candidate for inlining. +func (b *Bucket) maxInlineBucketSize() int { + return b.tx.db.pageSize / 4 +} + +// write allocates and writes a bucket to a byte slice. +func (b *Bucket) write() []byte { + // Allocate the appropriate size. + var n = b.rootNode + var value = make([]byte, bucketHeaderSize+n.size()) + + // Write a bucket header. + var bucket = (*bucket)(unsafe.Pointer(&value[0])) + *bucket = *b.bucket + + // Convert byte slice to a fake page and write the root node. + var p = (*page)(unsafe.Pointer(&value[bucketHeaderSize])) + n.write(p) + + return value +} + +// rebalance attempts to balance all nodes. +func (b *Bucket) rebalance() { + for _, n := range b.nodes { + n.rebalance() + } + for _, child := range b.buckets { + child.rebalance() + } +} + +// node creates a node from a page and associates it with a given parent. +func (b *Bucket) node(pgid pgid, parent *node) *node { + _assert(b.nodes != nil, "nodes map expected") + + // Retrieve node if it's already been created. + if n := b.nodes[pgid]; n != nil { + return n + } + + // Otherwise create a node and cache it. + n := &node{bucket: b, parent: parent} + if parent == nil { + b.rootNode = n + } else { + parent.children = append(parent.children, n) + } + + // Use the inline page if this is an inline bucket. + var p = b.page + if p == nil { + p = b.tx.page(pgid) + } + + // Read the page into the node and cache it. + n.read(p) + b.nodes[pgid] = n + + // Update statistics. + b.tx.stats.NodeCount++ + + return n +} + +// free recursively frees all pages in the bucket. +func (b *Bucket) free() { + if b.root == 0 { + return + } + + var tx = b.tx + b.forEachPageNode(func(p *page, n *node, _ int) { + if p != nil { + tx.db.freelist.free(tx.meta.txid, p) + } else { + n.free() + } + }) + b.root = 0 +} + +// dereference removes all references to the old mmap. +func (b *Bucket) dereference() { + if b.rootNode != nil { + b.rootNode.root().dereference() + } + + for _, child := range b.buckets { + child.dereference() + } +} + +// pageNode returns the in-memory node, if it exists. +// Otherwise returns the underlying page. +func (b *Bucket) pageNode(id pgid) (*page, *node) { + // Inline buckets have a fake page embedded in their value so treat them + // differently. We'll return the rootNode (if available) or the fake page. + if b.root == 0 { + if id != 0 { + panic(fmt.Sprintf("inline bucket non-zero page access(2): %d != 0", id)) + } + if b.rootNode != nil { + return nil, b.rootNode + } + return b.page, nil + } + + // Check the node cache for non-inline buckets. + if b.nodes != nil { + if n := b.nodes[id]; n != nil { + return nil, n + } + } + + // Finally lookup the page from the transaction if no node is materialized. + return b.tx.page(id), nil +} + +// BucketStats records statistics about resources used by a bucket. +type BucketStats struct { + // Page count statistics. + BranchPageN int // number of logical branch pages + BranchOverflowN int // number of physical branch overflow pages + LeafPageN int // number of logical leaf pages + LeafOverflowN int // number of physical leaf overflow pages + + // Tree statistics. + KeyN int // number of keys/value pairs + Depth int // number of levels in B+tree + + // Page size utilization. + BranchAlloc int // bytes allocated for physical branch pages + BranchInuse int // bytes actually used for branch data + LeafAlloc int // bytes allocated for physical leaf pages + LeafInuse int // bytes actually used for leaf data + + // Bucket statistics + BucketN int // total number of buckets including the top bucket + InlineBucketN int // total number on inlined buckets + InlineBucketInuse int // bytes used for inlined buckets (also accounted for in LeafInuse) +} + +func (s *BucketStats) Add(other BucketStats) { + s.BranchPageN += other.BranchPageN + s.BranchOverflowN += other.BranchOverflowN + s.LeafPageN += other.LeafPageN + s.LeafOverflowN += other.LeafOverflowN + s.KeyN += other.KeyN + if s.Depth < other.Depth { + s.Depth = other.Depth + } + s.BranchAlloc += other.BranchAlloc + s.BranchInuse += other.BranchInuse + s.LeafAlloc += other.LeafAlloc + s.LeafInuse += other.LeafInuse + + s.BucketN += other.BucketN + s.InlineBucketN += other.InlineBucketN + s.InlineBucketInuse += other.InlineBucketInuse +} + +// cloneBytes returns a copy of a given slice. +func cloneBytes(v []byte) []byte { + var clone = make([]byte, len(v)) + copy(clone, v) + return clone +} diff --git a/sessiondb/boltdb/vendor/github.com/etcd-io/bbolt/cursor.go b/sessiondb/boltdb/vendor/github.com/etcd-io/bbolt/cursor.go new file mode 100644 index 0000000..3000ace --- /dev/null +++ b/sessiondb/boltdb/vendor/github.com/etcd-io/bbolt/cursor.go @@ -0,0 +1,396 @@ +package bbolt + +import ( + "bytes" + "fmt" + "sort" +) + +// Cursor represents an iterator that can traverse over all key/value pairs in a bucket in sorted order. +// Cursors see nested buckets with value == nil. +// Cursors can be obtained from a transaction and are valid as long as the transaction is open. +// +// Keys and values returned from the cursor are only valid for the life of the transaction. +// +// Changing data while traversing with a cursor may cause it to be invalidated +// and return unexpected keys and/or values. You must reposition your cursor +// after mutating data. +type Cursor struct { + bucket *Bucket + stack []elemRef +} + +// Bucket returns the bucket that this cursor was created from. +func (c *Cursor) Bucket() *Bucket { + return c.bucket +} + +// First moves the cursor to the first item in the bucket and returns its key and value. +// If the bucket is empty then a nil key and value are returned. +// The returned key and value are only valid for the life of the transaction. +func (c *Cursor) First() (key []byte, value []byte) { + _assert(c.bucket.tx.db != nil, "tx closed") + c.stack = c.stack[:0] + p, n := c.bucket.pageNode(c.bucket.root) + c.stack = append(c.stack, elemRef{page: p, node: n, index: 0}) + c.first() + + // If we land on an empty page then move to the next value. + // https://github.com/boltdb/bolt/issues/450 + if c.stack[len(c.stack)-1].count() == 0 { + c.next() + } + + k, v, flags := c.keyValue() + if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil + } + return k, v + +} + +// Last moves the cursor to the last item in the bucket and returns its key and value. +// If the bucket is empty then a nil key and value are returned. +// The returned key and value are only valid for the life of the transaction. +func (c *Cursor) Last() (key []byte, value []byte) { + _assert(c.bucket.tx.db != nil, "tx closed") + c.stack = c.stack[:0] + p, n := c.bucket.pageNode(c.bucket.root) + ref := elemRef{page: p, node: n} + ref.index = ref.count() - 1 + c.stack = append(c.stack, ref) + c.last() + k, v, flags := c.keyValue() + if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil + } + return k, v +} + +// Next moves the cursor to the next item in the bucket and returns its key and value. +// If the cursor is at the end of the bucket then a nil key and value are returned. +// The returned key and value are only valid for the life of the transaction. +func (c *Cursor) Next() (key []byte, value []byte) { + _assert(c.bucket.tx.db != nil, "tx closed") + k, v, flags := c.next() + if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil + } + return k, v +} + +// Prev moves the cursor to the previous item in the bucket and returns its key and value. +// If the cursor is at the beginning of the bucket then a nil key and value are returned. +// The returned key and value are only valid for the life of the transaction. +func (c *Cursor) Prev() (key []byte, value []byte) { + _assert(c.bucket.tx.db != nil, "tx closed") + + // Attempt to move back one element until we're successful. + // Move up the stack as we hit the beginning of each page in our stack. + for i := len(c.stack) - 1; i >= 0; i-- { + elem := &c.stack[i] + if elem.index > 0 { + elem.index-- + break + } + c.stack = c.stack[:i] + } + + // If we've hit the end then return nil. + if len(c.stack) == 0 { + return nil, nil + } + + // Move down the stack to find the last element of the last leaf under this branch. + c.last() + k, v, flags := c.keyValue() + if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil + } + return k, v +} + +// Seek moves the cursor to a given key and returns it. +// If the key does not exist then the next key is used. If no keys +// follow, a nil key is returned. +// The returned key and value are only valid for the life of the transaction. +func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) { + k, v, flags := c.seek(seek) + + // If we ended up after the last element of a page then move to the next one. + if ref := &c.stack[len(c.stack)-1]; ref.index >= ref.count() { + k, v, flags = c.next() + } + + if k == nil { + return nil, nil + } else if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil + } + return k, v +} + +// Delete removes the current key/value under the cursor from the bucket. +// Delete fails if current key/value is a bucket or if the transaction is not writable. +func (c *Cursor) Delete() error { + if c.bucket.tx.db == nil { + return ErrTxClosed + } else if !c.bucket.Writable() { + return ErrTxNotWritable + } + + key, _, flags := c.keyValue() + // Return an error if current value is a bucket. + if (flags & bucketLeafFlag) != 0 { + return ErrIncompatibleValue + } + c.node().del(key) + + return nil +} + +// seek moves the cursor to a given key and returns it. +// If the key does not exist then the next key is used. +func (c *Cursor) seek(seek []byte) (key []byte, value []byte, flags uint32) { + _assert(c.bucket.tx.db != nil, "tx closed") + + // Start from root page/node and traverse to correct page. + c.stack = c.stack[:0] + c.search(seek, c.bucket.root) + + // If this is a bucket then return a nil value. + return c.keyValue() +} + +// first moves the cursor to the first leaf element under the last page in the stack. +func (c *Cursor) first() { + for { + // Exit when we hit a leaf page. + var ref = &c.stack[len(c.stack)-1] + if ref.isLeaf() { + break + } + + // Keep adding pages pointing to the first element to the stack. + var pgid pgid + if ref.node != nil { + pgid = ref.node.inodes[ref.index].pgid + } else { + pgid = ref.page.branchPageElement(uint16(ref.index)).pgid + } + p, n := c.bucket.pageNode(pgid) + c.stack = append(c.stack, elemRef{page: p, node: n, index: 0}) + } +} + +// last moves the cursor to the last leaf element under the last page in the stack. +func (c *Cursor) last() { + for { + // Exit when we hit a leaf page. + ref := &c.stack[len(c.stack)-1] + if ref.isLeaf() { + break + } + + // Keep adding pages pointing to the last element in the stack. + var pgid pgid + if ref.node != nil { + pgid = ref.node.inodes[ref.index].pgid + } else { + pgid = ref.page.branchPageElement(uint16(ref.index)).pgid + } + p, n := c.bucket.pageNode(pgid) + + var nextRef = elemRef{page: p, node: n} + nextRef.index = nextRef.count() - 1 + c.stack = append(c.stack, nextRef) + } +} + +// next moves to the next leaf element and returns the key and value. +// If the cursor is at the last leaf element then it stays there and returns nil. +func (c *Cursor) next() (key []byte, value []byte, flags uint32) { + for { + // Attempt to move over one element until we're successful. + // Move up the stack as we hit the end of each page in our stack. + var i int + for i = len(c.stack) - 1; i >= 0; i-- { + elem := &c.stack[i] + if elem.index < elem.count()-1 { + elem.index++ + break + } + } + + // If we've hit the root page then stop and return. This will leave the + // cursor on the last element of the last page. + if i == -1 { + return nil, nil, 0 + } + + // Otherwise start from where we left off in the stack and find the + // first element of the first leaf page. + c.stack = c.stack[:i+1] + c.first() + + // If this is an empty page then restart and move back up the stack. + // https://github.com/boltdb/bolt/issues/450 + if c.stack[len(c.stack)-1].count() == 0 { + continue + } + + return c.keyValue() + } +} + +// search recursively performs a binary search against a given page/node until it finds a given key. +func (c *Cursor) search(key []byte, pgid pgid) { + p, n := c.bucket.pageNode(pgid) + if p != nil && (p.flags&(branchPageFlag|leafPageFlag)) == 0 { + panic(fmt.Sprintf("invalid page type: %d: %x", p.id, p.flags)) + } + e := elemRef{page: p, node: n} + c.stack = append(c.stack, e) + + // If we're on a leaf page/node then find the specific node. + if e.isLeaf() { + c.nsearch(key) + return + } + + if n != nil { + c.searchNode(key, n) + return + } + c.searchPage(key, p) +} + +func (c *Cursor) searchNode(key []byte, n *node) { + var exact bool + index := sort.Search(len(n.inodes), func(i int) bool { + // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now. + // sort.Search() finds the lowest index where f() != -1 but we need the highest index. + ret := bytes.Compare(n.inodes[i].key, key) + if ret == 0 { + exact = true + } + return ret != -1 + }) + if !exact && index > 0 { + index-- + } + c.stack[len(c.stack)-1].index = index + + // Recursively search to the next page. + c.search(key, n.inodes[index].pgid) +} + +func (c *Cursor) searchPage(key []byte, p *page) { + // Binary search for the correct range. + inodes := p.branchPageElements() + + var exact bool + index := sort.Search(int(p.count), func(i int) bool { + // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now. + // sort.Search() finds the lowest index where f() != -1 but we need the highest index. + ret := bytes.Compare(inodes[i].key(), key) + if ret == 0 { + exact = true + } + return ret != -1 + }) + if !exact && index > 0 { + index-- + } + c.stack[len(c.stack)-1].index = index + + // Recursively search to the next page. + c.search(key, inodes[index].pgid) +} + +// nsearch searches the leaf node on the top of the stack for a key. +func (c *Cursor) nsearch(key []byte) { + e := &c.stack[len(c.stack)-1] + p, n := e.page, e.node + + // If we have a node then search its inodes. + if n != nil { + index := sort.Search(len(n.inodes), func(i int) bool { + return bytes.Compare(n.inodes[i].key, key) != -1 + }) + e.index = index + return + } + + // If we have a page then search its leaf elements. + inodes := p.leafPageElements() + index := sort.Search(int(p.count), func(i int) bool { + return bytes.Compare(inodes[i].key(), key) != -1 + }) + e.index = index +} + +// keyValue returns the key and value of the current leaf element. +func (c *Cursor) keyValue() ([]byte, []byte, uint32) { + ref := &c.stack[len(c.stack)-1] + + // If the cursor is pointing to the end of page/node then return nil. + if ref.count() == 0 || ref.index >= ref.count() { + return nil, nil, 0 + } + + // Retrieve value from node. + if ref.node != nil { + inode := &ref.node.inodes[ref.index] + return inode.key, inode.value, inode.flags + } + + // Or retrieve value from page. + elem := ref.page.leafPageElement(uint16(ref.index)) + return elem.key(), elem.value(), elem.flags +} + +// node returns the node that the cursor is currently positioned on. +func (c *Cursor) node() *node { + _assert(len(c.stack) > 0, "accessing a node with a zero-length cursor stack") + + // If the top of the stack is a leaf node then just return it. + if ref := &c.stack[len(c.stack)-1]; ref.node != nil && ref.isLeaf() { + return ref.node + } + + // Start from root and traverse down the hierarchy. + var n = c.stack[0].node + if n == nil { + n = c.bucket.node(c.stack[0].page.id, nil) + } + for _, ref := range c.stack[:len(c.stack)-1] { + _assert(!n.isLeaf, "expected branch node") + n = n.childAt(int(ref.index)) + } + _assert(n.isLeaf, "expected leaf node") + return n +} + +// elemRef represents a reference to an element on a given page/node. +type elemRef struct { + page *page + node *node + index int +} + +// isLeaf returns whether the ref is pointing at a leaf page/node. +func (r *elemRef) isLeaf() bool { + if r.node != nil { + return r.node.isLeaf + } + return (r.page.flags & leafPageFlag) != 0 +} + +// count returns the number of inodes or page elements. +func (r *elemRef) count() int { + if r.node != nil { + return len(r.node.inodes) + } + return int(r.page.count) +} diff --git a/sessiondb/boltdb/vendor/github.com/etcd-io/bbolt/db.go b/sessiondb/boltdb/vendor/github.com/etcd-io/bbolt/db.go new file mode 100644 index 0000000..4e38ab8 --- /dev/null +++ b/sessiondb/boltdb/vendor/github.com/etcd-io/bbolt/db.go @@ -0,0 +1,1140 @@ +package bbolt + +import ( + "errors" + "fmt" + "hash/fnv" + "log" + "os" + "runtime" + "sort" + "sync" + "time" + "unsafe" +) + +// The largest step that can be taken when remapping the mmap. +const maxMmapStep = 1 << 30 // 1GB + +// The data file format version. +const version = 2 + +// Represents a marker value to indicate that a file is a Bolt DB. +const magic uint32 = 0xED0CDAED + +const pgidNoFreelist pgid = 0xffffffffffffffff + +// IgnoreNoSync specifies whether the NoSync field of a DB is ignored when +// syncing changes to a file. This is required as some operating systems, +// such as OpenBSD, do not have a unified buffer cache (UBC) and writes +// must be synchronized using the msync(2) syscall. +const IgnoreNoSync = runtime.GOOS == "openbsd" + +// Default values if not set in a DB instance. +const ( + DefaultMaxBatchSize int = 1000 + DefaultMaxBatchDelay = 10 * time.Millisecond + DefaultAllocSize = 16 * 1024 * 1024 +) + +// default page size for db is set to the OS page size. +var defaultPageSize = os.Getpagesize() + +// The time elapsed between consecutive file locking attempts. +const flockRetryTimeout = 50 * time.Millisecond + +// DB represents a collection of buckets persisted to a file on disk. +// All data access is performed through transactions which can be obtained through the DB. +// All the functions on DB will return a ErrDatabaseNotOpen if accessed before Open() is called. +type DB struct { + // When enabled, the database will perform a Check() after every commit. + // A panic is issued if the database is in an inconsistent state. This + // flag has a large performance impact so it should only be used for + // debugging purposes. + StrictMode bool + + // Setting the NoSync flag will cause the database to skip fsync() + // calls after each commit. This can be useful when bulk loading data + // into a database and you can restart the bulk load in the event of + // a system failure or database corruption. Do not set this flag for + // normal use. + // + // If the package global IgnoreNoSync constant is true, this value is + // ignored. See the comment on that constant for more details. + // + // THIS IS UNSAFE. PLEASE USE WITH CAUTION. + NoSync bool + + // When true, skips syncing freelist to disk. This improves the database + // write performance under normal operation, but requires a full database + // re-sync during recovery. + NoFreelistSync bool + + // When true, skips the truncate call when growing the database. + // Setting this to true is only safe on non-ext3/ext4 systems. + // Skipping truncation avoids preallocation of hard drive space and + // bypasses a truncate() and fsync() syscall on remapping. + // + // https://github.com/boltdb/bolt/issues/284 + NoGrowSync bool + + // If you want to read the entire database fast, you can set MmapFlag to + // syscall.MAP_POPULATE on Linux 2.6.23+ for sequential read-ahead. + MmapFlags int + + // MaxBatchSize is the maximum size of a batch. Default value is + // copied from DefaultMaxBatchSize in Open. + // + // If <=0, disables batching. + // + // Do not change concurrently with calls to Batch. + MaxBatchSize int + + // MaxBatchDelay is the maximum delay before a batch starts. + // Default value is copied from DefaultMaxBatchDelay in Open. + // + // If <=0, effectively disables batching. + // + // Do not change concurrently with calls to Batch. + MaxBatchDelay time.Duration + + // AllocSize is the amount of space allocated when the database + // needs to create new pages. This is done to amortize the cost + // of truncate() and fsync() when growing the data file. + AllocSize int + + path string + file *os.File + lockfile *os.File // windows only + dataref []byte // mmap'ed readonly, write throws SEGV + data *[maxMapSize]byte + datasz int + filesz int // current on disk file size + meta0 *meta + meta1 *meta + pageSize int + opened bool + rwtx *Tx + txs []*Tx + stats Stats + + freelist *freelist + freelistLoad sync.Once + + pagePool sync.Pool + + batchMu sync.Mutex + batch *batch + + rwlock sync.Mutex // Allows only one writer at a time. + metalock sync.Mutex // Protects meta page access. + mmaplock sync.RWMutex // Protects mmap access during remapping. + statlock sync.RWMutex // Protects stats access. + + ops struct { + writeAt func(b []byte, off int64) (n int, err error) + } + + // Read only mode. + // When true, Update() and Begin(true) return ErrDatabaseReadOnly immediately. + readOnly bool +} + +// Path returns the path to currently open database file. +func (db *DB) Path() string { + return db.path +} + +// GoString returns the Go string representation of the database. +func (db *DB) GoString() string { + return fmt.Sprintf("bolt.DB{path:%q}", db.path) +} + +// String returns the string representation of the database. +func (db *DB) String() string { + return fmt.Sprintf("DB<%q>", db.path) +} + +// Open creates and opens a database at the given path. +// If the file does not exist then it will be created automatically. +// Passing in nil options will cause Bolt to open the database with the default options. +func Open(path string, mode os.FileMode, options *Options) (*DB, error) { + db := &DB{ + opened: true, + } + // Set default options if no options are provided. + if options == nil { + options = DefaultOptions + } + db.NoSync = options.NoSync + db.NoGrowSync = options.NoGrowSync + db.MmapFlags = options.MmapFlags + db.NoFreelistSync = options.NoFreelistSync + + // Set default values for later DB operations. + db.MaxBatchSize = DefaultMaxBatchSize + db.MaxBatchDelay = DefaultMaxBatchDelay + db.AllocSize = DefaultAllocSize + + flag := os.O_RDWR + if options.ReadOnly { + flag = os.O_RDONLY + db.readOnly = true + } + + // Open data file and separate sync handler for metadata writes. + db.path = path + var err error + if db.file, err = os.OpenFile(db.path, flag|os.O_CREATE, mode); err != nil { + _ = db.close() + return nil, err + } + + // Lock file so that other processes using Bolt in read-write mode cannot + // use the database at the same time. This would cause corruption since + // the two processes would write meta pages and free pages separately. + // The database file is locked exclusively (only one process can grab the lock) + // if !options.ReadOnly. + // The database file is locked using the shared lock (more than one process may + // hold a lock at the same time) otherwise (options.ReadOnly is set). + if err := flock(db, mode, !db.readOnly, options.Timeout); err != nil { + db.lockfile = nil // make 'unused' happy. TODO: rework locks + _ = db.close() + return nil, err + } + + // Default values for test hooks + db.ops.writeAt = db.file.WriteAt + + if db.pageSize = options.PageSize; db.pageSize == 0 { + // Set the default page size to the OS page size. + db.pageSize = defaultPageSize + } + + // Initialize the database if it doesn't exist. + if info, err := db.file.Stat(); err != nil { + _ = db.close() + return nil, err + } else if info.Size() == 0 { + // Initialize new files with meta pages. + if err := db.init(); err != nil { + // clean up file descriptor on initialization fail + _ = db.close() + return nil, err + } + } else { + // Read the first meta page to determine the page size. + var buf [0x1000]byte + // If we can't read the page size, but can read a page, assume + // it's the same as the OS or one given -- since that's how the + // page size was chosen in the first place. + // + // If the first page is invalid and this OS uses a different + // page size than what the database was created with then we + // are out of luck and cannot access the database. + // + // TODO: scan for next page + if bw, err := db.file.ReadAt(buf[:], 0); err == nil && bw == len(buf) { + if m := db.pageInBuffer(buf[:], 0).meta(); m.validate() == nil { + db.pageSize = int(m.pageSize) + } + } else { + _ = db.close() + return nil, ErrInvalid + } + } + + // Initialize page pool. + db.pagePool = sync.Pool{ + New: func() interface{} { + return make([]byte, db.pageSize) + }, + } + + // Memory map the data file. + if err := db.mmap(options.InitialMmapSize); err != nil { + _ = db.close() + return nil, err + } + + if db.readOnly { + return db, nil + } + + db.loadFreelist() + + // Flush freelist when transitioning from no sync to sync so + // NoFreelistSync unaware boltdb can open the db later. + if !db.NoFreelistSync && !db.hasSyncedFreelist() { + tx, err := db.Begin(true) + if tx != nil { + err = tx.Commit() + } + if err != nil { + _ = db.close() + return nil, err + } + } + + // Mark the database as opened and return. + return db, nil +} + +// loadFreelist reads the freelist if it is synced, or reconstructs it +// by scanning the DB if it is not synced. It assumes there are no +// concurrent accesses being made to the freelist. +func (db *DB) loadFreelist() { + db.freelistLoad.Do(func() { + db.freelist = newFreelist() + if !db.hasSyncedFreelist() { + // Reconstruct free list by scanning the DB. + db.freelist.readIDs(db.freepages()) + } else { + // Read free list from freelist page. + db.freelist.read(db.page(db.meta().freelist)) + } + db.stats.FreePageN = len(db.freelist.ids) + }) +} + +func (db *DB) hasSyncedFreelist() bool { + return db.meta().freelist != pgidNoFreelist +} + +// mmap opens the underlying memory-mapped file and initializes the meta references. +// minsz is the minimum size that the new mmap can be. +func (db *DB) mmap(minsz int) error { + db.mmaplock.Lock() + defer db.mmaplock.Unlock() + + info, err := db.file.Stat() + if err != nil { + return fmt.Errorf("mmap stat error: %s", err) + } else if int(info.Size()) < db.pageSize*2 { + return fmt.Errorf("file size too small") + } + + // Ensure the size is at least the minimum size. + var size = int(info.Size()) + if size < minsz { + size = minsz + } + size, err = db.mmapSize(size) + if err != nil { + return err + } + + // Dereference all mmap references before unmapping. + if db.rwtx != nil { + db.rwtx.root.dereference() + } + + // Unmap existing data before continuing. + if err := db.munmap(); err != nil { + return err + } + + // Memory-map the data file as a byte slice. + if err := mmap(db, size); err != nil { + return err + } + + // Save references to the meta pages. + db.meta0 = db.page(0).meta() + db.meta1 = db.page(1).meta() + + // Validate the meta pages. We only return an error if both meta pages fail + // validation, since meta0 failing validation means that it wasn't saved + // properly -- but we can recover using meta1. And vice-versa. + err0 := db.meta0.validate() + err1 := db.meta1.validate() + if err0 != nil && err1 != nil { + return err0 + } + + return nil +} + +// munmap unmaps the data file from memory. +func (db *DB) munmap() error { + if err := munmap(db); err != nil { + return fmt.Errorf("unmap error: " + err.Error()) + } + return nil +} + +// mmapSize determines the appropriate size for the mmap given the current size +// of the database. The minimum size is 32KB and doubles until it reaches 1GB. +// Returns an error if the new mmap size is greater than the max allowed. +func (db *DB) mmapSize(size int) (int, error) { + // Double the size from 32KB until 1GB. + for i := uint(15); i <= 30; i++ { + if size <= 1< maxMapSize { + return 0, fmt.Errorf("mmap too large") + } + + // If larger than 1GB then grow by 1GB at a time. + sz := int64(size) + if remainder := sz % int64(maxMmapStep); remainder > 0 { + sz += int64(maxMmapStep) - remainder + } + + // Ensure that the mmap size is a multiple of the page size. + // This should always be true since we're incrementing in MBs. + pageSize := int64(db.pageSize) + if (sz % pageSize) != 0 { + sz = ((sz / pageSize) + 1) * pageSize + } + + // If we've exceeded the max size then only grow up to the max size. + if sz > maxMapSize { + sz = maxMapSize + } + + return int(sz), nil +} + +// init creates a new database file and initializes its meta pages. +func (db *DB) init() error { + // Create two meta pages on a buffer. + buf := make([]byte, db.pageSize*4) + for i := 0; i < 2; i++ { + p := db.pageInBuffer(buf[:], pgid(i)) + p.id = pgid(i) + p.flags = metaPageFlag + + // Initialize the meta page. + m := p.meta() + m.magic = magic + m.version = version + m.pageSize = uint32(db.pageSize) + m.freelist = 2 + m.root = bucket{root: 3} + m.pgid = 4 + m.txid = txid(i) + m.checksum = m.sum64() + } + + // Write an empty freelist at page 3. + p := db.pageInBuffer(buf[:], pgid(2)) + p.id = pgid(2) + p.flags = freelistPageFlag + p.count = 0 + + // Write an empty leaf page at page 4. + p = db.pageInBuffer(buf[:], pgid(3)) + p.id = pgid(3) + p.flags = leafPageFlag + p.count = 0 + + // Write the buffer to our data file. + if _, err := db.ops.writeAt(buf, 0); err != nil { + return err + } + if err := fdatasync(db); err != nil { + return err + } + + return nil +} + +// Close releases all database resources. +// It will block waiting for any open transactions to finish +// before closing the database and returning. +func (db *DB) Close() error { + db.rwlock.Lock() + defer db.rwlock.Unlock() + + db.metalock.Lock() + defer db.metalock.Unlock() + + db.mmaplock.Lock() + defer db.mmaplock.Unlock() + + return db.close() +} + +func (db *DB) close() error { + if !db.opened { + return nil + } + + db.opened = false + + db.freelist = nil + + // Clear ops. + db.ops.writeAt = nil + + // Close the mmap. + if err := db.munmap(); err != nil { + return err + } + + // Close file handles. + if db.file != nil { + // No need to unlock read-only file. + if !db.readOnly { + // Unlock the file. + if err := funlock(db); err != nil { + log.Printf("bolt.Close(): funlock error: %s", err) + } + } + + // Close the file descriptor. + if err := db.file.Close(); err != nil { + return fmt.Errorf("db file close: %s", err) + } + db.file = nil + } + + db.path = "" + return nil +} + +// Begin starts a new transaction. +// Multiple read-only transactions can be used concurrently but only one +// write transaction can be used at a time. Starting multiple write transactions +// will cause the calls to block and be serialized until the current write +// transaction finishes. +// +// Transactions should not be dependent on one another. Opening a read +// transaction and a write transaction in the same goroutine can cause the +// writer to deadlock because the database periodically needs to re-mmap itself +// as it grows and it cannot do that while a read transaction is open. +// +// If a long running read transaction (for example, a snapshot transaction) is +// needed, you might want to set DB.InitialMmapSize to a large enough value +// to avoid potential blocking of write transaction. +// +// IMPORTANT: You must close read-only transactions after you are finished or +// else the database will not reclaim old pages. +func (db *DB) Begin(writable bool) (*Tx, error) { + if writable { + return db.beginRWTx() + } + return db.beginTx() +} + +func (db *DB) beginTx() (*Tx, error) { + // Lock the meta pages while we initialize the transaction. We obtain + // the meta lock before the mmap lock because that's the order that the + // write transaction will obtain them. + db.metalock.Lock() + + // Obtain a read-only lock on the mmap. When the mmap is remapped it will + // obtain a write lock so all transactions must finish before it can be + // remapped. + db.mmaplock.RLock() + + // Exit if the database is not open yet. + if !db.opened { + db.mmaplock.RUnlock() + db.metalock.Unlock() + return nil, ErrDatabaseNotOpen + } + + // Create a transaction associated with the database. + t := &Tx{} + t.init(db) + + // Keep track of transaction until it closes. + db.txs = append(db.txs, t) + n := len(db.txs) + + // Unlock the meta pages. + db.metalock.Unlock() + + // Update the transaction stats. + db.statlock.Lock() + db.stats.TxN++ + db.stats.OpenTxN = n + db.statlock.Unlock() + + return t, nil +} + +func (db *DB) beginRWTx() (*Tx, error) { + // If the database was opened with Options.ReadOnly, return an error. + if db.readOnly { + return nil, ErrDatabaseReadOnly + } + + // Obtain writer lock. This is released by the transaction when it closes. + // This enforces only one writer transaction at a time. + db.rwlock.Lock() + + // Once we have the writer lock then we can lock the meta pages so that + // we can set up the transaction. + db.metalock.Lock() + defer db.metalock.Unlock() + + // Exit if the database is not open yet. + if !db.opened { + db.rwlock.Unlock() + return nil, ErrDatabaseNotOpen + } + + // Create a transaction associated with the database. + t := &Tx{writable: true} + t.init(db) + db.rwtx = t + db.freePages() + return t, nil +} + +// freePages releases any pages associated with closed read-only transactions. +func (db *DB) freePages() { + // Free all pending pages prior to earliest open transaction. + sort.Sort(txsById(db.txs)) + minid := txid(0xFFFFFFFFFFFFFFFF) + if len(db.txs) > 0 { + minid = db.txs[0].meta.txid + } + if minid > 0 { + db.freelist.release(minid - 1) + } + // Release unused txid extents. + for _, t := range db.txs { + db.freelist.releaseRange(minid, t.meta.txid-1) + minid = t.meta.txid + 1 + } + db.freelist.releaseRange(minid, txid(0xFFFFFFFFFFFFFFFF)) + // Any page both allocated and freed in an extent is safe to release. +} + +type txsById []*Tx + +func (t txsById) Len() int { return len(t) } +func (t txsById) Swap(i, j int) { t[i], t[j] = t[j], t[i] } +func (t txsById) Less(i, j int) bool { return t[i].meta.txid < t[j].meta.txid } + +// removeTx removes a transaction from the database. +func (db *DB) removeTx(tx *Tx) { + // Release the read lock on the mmap. + db.mmaplock.RUnlock() + + // Use the meta lock to restrict access to the DB object. + db.metalock.Lock() + + // Remove the transaction. + for i, t := range db.txs { + if t == tx { + last := len(db.txs) - 1 + db.txs[i] = db.txs[last] + db.txs[last] = nil + db.txs = db.txs[:last] + break + } + } + n := len(db.txs) + + // Unlock the meta pages. + db.metalock.Unlock() + + // Merge statistics. + db.statlock.Lock() + db.stats.OpenTxN = n + db.stats.TxStats.add(&tx.stats) + db.statlock.Unlock() +} + +// Update executes a function within the context of a read-write managed transaction. +// If no error is returned from the function then the transaction is committed. +// If an error is returned then the entire transaction is rolled back. +// Any error that is returned from the function or returned from the commit is +// returned from the Update() method. +// +// Attempting to manually commit or rollback within the function will cause a panic. +func (db *DB) Update(fn func(*Tx) error) error { + t, err := db.Begin(true) + if err != nil { + return err + } + + // Make sure the transaction rolls back in the event of a panic. + defer func() { + if t.db != nil { + t.rollback() + } + }() + + // Mark as a managed tx so that the inner function cannot manually commit. + t.managed = true + + // If an error is returned from the function then rollback and return error. + err = fn(t) + t.managed = false + if err != nil { + _ = t.Rollback() + return err + } + + return t.Commit() +} + +// View executes a function within the context of a managed read-only transaction. +// Any error that is returned from the function is returned from the View() method. +// +// Attempting to manually rollback within the function will cause a panic. +func (db *DB) View(fn func(*Tx) error) error { + t, err := db.Begin(false) + if err != nil { + return err + } + + // Make sure the transaction rolls back in the event of a panic. + defer func() { + if t.db != nil { + t.rollback() + } + }() + + // Mark as a managed tx so that the inner function cannot manually rollback. + t.managed = true + + // If an error is returned from the function then pass it through. + err = fn(t) + t.managed = false + if err != nil { + _ = t.Rollback() + return err + } + + return t.Rollback() +} + +// Batch calls fn as part of a batch. It behaves similar to Update, +// except: +// +// 1. concurrent Batch calls can be combined into a single Bolt +// transaction. +// +// 2. the function passed to Batch may be called multiple times, +// regardless of whether it returns error or not. +// +// This means that Batch function side effects must be idempotent and +// take permanent effect only after a successful return is seen in +// caller. +// +// The maximum batch size and delay can be adjusted with DB.MaxBatchSize +// and DB.MaxBatchDelay, respectively. +// +// Batch is only useful when there are multiple goroutines calling it. +func (db *DB) Batch(fn func(*Tx) error) error { + errCh := make(chan error, 1) + + db.batchMu.Lock() + if (db.batch == nil) || (db.batch != nil && len(db.batch.calls) >= db.MaxBatchSize) { + // There is no existing batch, or the existing batch is full; start a new one. + db.batch = &batch{ + db: db, + } + db.batch.timer = time.AfterFunc(db.MaxBatchDelay, db.batch.trigger) + } + db.batch.calls = append(db.batch.calls, call{fn: fn, err: errCh}) + if len(db.batch.calls) >= db.MaxBatchSize { + // wake up batch, it's ready to run + go db.batch.trigger() + } + db.batchMu.Unlock() + + err := <-errCh + if err == trySolo { + err = db.Update(fn) + } + return err +} + +type call struct { + fn func(*Tx) error + err chan<- error +} + +type batch struct { + db *DB + timer *time.Timer + start sync.Once + calls []call +} + +// trigger runs the batch if it hasn't already been run. +func (b *batch) trigger() { + b.start.Do(b.run) +} + +// run performs the transactions in the batch and communicates results +// back to DB.Batch. +func (b *batch) run() { + b.db.batchMu.Lock() + b.timer.Stop() + // Make sure no new work is added to this batch, but don't break + // other batches. + if b.db.batch == b { + b.db.batch = nil + } + b.db.batchMu.Unlock() + +retry: + for len(b.calls) > 0 { + var failIdx = -1 + err := b.db.Update(func(tx *Tx) error { + for i, c := range b.calls { + if err := safelyCall(c.fn, tx); err != nil { + failIdx = i + return err + } + } + return nil + }) + + if failIdx >= 0 { + // take the failing transaction out of the batch. it's + // safe to shorten b.calls here because db.batch no longer + // points to us, and we hold the mutex anyway. + c := b.calls[failIdx] + b.calls[failIdx], b.calls = b.calls[len(b.calls)-1], b.calls[:len(b.calls)-1] + // tell the submitter re-run it solo, continue with the rest of the batch + c.err <- trySolo + continue retry + } + + // pass success, or bolt internal errors, to all callers + for _, c := range b.calls { + c.err <- err + } + break retry + } +} + +// trySolo is a special sentinel error value used for signaling that a +// transaction function should be re-run. It should never be seen by +// callers. +var trySolo = errors.New("batch function returned an error and should be re-run solo") + +type panicked struct { + reason interface{} +} + +func (p panicked) Error() string { + if err, ok := p.reason.(error); ok { + return err.Error() + } + return fmt.Sprintf("panic: %v", p.reason) +} + +func safelyCall(fn func(*Tx) error, tx *Tx) (err error) { + defer func() { + if p := recover(); p != nil { + err = panicked{p} + } + }() + return fn(tx) +} + +// Sync executes fdatasync() against the database file handle. +// +// This is not necessary under normal operation, however, if you use NoSync +// then it allows you to force the database file to sync against the disk. +func (db *DB) Sync() error { return fdatasync(db) } + +// Stats retrieves ongoing performance stats for the database. +// This is only updated when a transaction closes. +func (db *DB) Stats() Stats { + db.statlock.RLock() + defer db.statlock.RUnlock() + return db.stats +} + +// This is for internal access to the raw data bytes from the C cursor, use +// carefully, or not at all. +func (db *DB) Info() *Info { + return &Info{uintptr(unsafe.Pointer(&db.data[0])), db.pageSize} +} + +// page retrieves a page reference from the mmap based on the current page size. +func (db *DB) page(id pgid) *page { + pos := id * pgid(db.pageSize) + return (*page)(unsafe.Pointer(&db.data[pos])) +} + +// pageInBuffer retrieves a page reference from a given byte array based on the current page size. +func (db *DB) pageInBuffer(b []byte, id pgid) *page { + return (*page)(unsafe.Pointer(&b[id*pgid(db.pageSize)])) +} + +// meta retrieves the current meta page reference. +func (db *DB) meta() *meta { + // We have to return the meta with the highest txid which doesn't fail + // validation. Otherwise, we can cause errors when in fact the database is + // in a consistent state. metaA is the one with the higher txid. + metaA := db.meta0 + metaB := db.meta1 + if db.meta1.txid > db.meta0.txid { + metaA = db.meta1 + metaB = db.meta0 + } + + // Use higher meta page if valid. Otherwise fallback to previous, if valid. + if err := metaA.validate(); err == nil { + return metaA + } else if err := metaB.validate(); err == nil { + return metaB + } + + // This should never be reached, because both meta1 and meta0 were validated + // on mmap() and we do fsync() on every write. + panic("bolt.DB.meta(): invalid meta pages") +} + +// allocate returns a contiguous block of memory starting at a given page. +func (db *DB) allocate(txid txid, count int) (*page, error) { + // Allocate a temporary buffer for the page. + var buf []byte + if count == 1 { + buf = db.pagePool.Get().([]byte) + } else { + buf = make([]byte, count*db.pageSize) + } + p := (*page)(unsafe.Pointer(&buf[0])) + p.overflow = uint32(count - 1) + + // Use pages from the freelist if they are available. + if p.id = db.freelist.allocate(txid, count); p.id != 0 { + return p, nil + } + + // Resize mmap() if we're at the end. + p.id = db.rwtx.meta.pgid + var minsz = int((p.id+pgid(count))+1) * db.pageSize + if minsz >= db.datasz { + if err := db.mmap(minsz); err != nil { + return nil, fmt.Errorf("mmap allocate error: %s", err) + } + } + + // Move the page id high water mark. + db.rwtx.meta.pgid += pgid(count) + + return p, nil +} + +// grow grows the size of the database to the given sz. +func (db *DB) grow(sz int) error { + // Ignore if the new size is less than available file size. + if sz <= db.filesz { + return nil + } + + // If the data is smaller than the alloc size then only allocate what's needed. + // Once it goes over the allocation size then allocate in chunks. + if db.datasz < db.AllocSize { + sz = db.datasz + } else { + sz += db.AllocSize + } + + // Truncate and fsync to ensure file size metadata is flushed. + // https://github.com/boltdb/bolt/issues/284 + if !db.NoGrowSync && !db.readOnly { + if runtime.GOOS != "windows" { + if err := db.file.Truncate(int64(sz)); err != nil { + return fmt.Errorf("file resize error: %s", err) + } + } + if err := db.file.Sync(); err != nil { + return fmt.Errorf("file sync error: %s", err) + } + } + + db.filesz = sz + return nil +} + +func (db *DB) IsReadOnly() bool { + return db.readOnly +} + +func (db *DB) freepages() []pgid { + tx, err := db.beginTx() + defer func() { + err = tx.Rollback() + if err != nil { + panic("freepages: failed to rollback tx") + } + }() + if err != nil { + panic("freepages: failed to open read only tx") + } + + reachable := make(map[pgid]*page) + nofreed := make(map[pgid]bool) + ech := make(chan error) + go func() { + for e := range ech { + panic(fmt.Sprintf("freepages: failed to get all reachable pages (%v)", e)) + } + }() + tx.checkBucket(&tx.root, reachable, nofreed, ech) + close(ech) + + var fids []pgid + for i := pgid(2); i < db.meta().pgid; i++ { + if _, ok := reachable[i]; !ok { + fids = append(fids, i) + } + } + return fids +} + +// Options represents the options that can be set when opening a database. +type Options struct { + // Timeout is the amount of time to wait to obtain a file lock. + // When set to zero it will wait indefinitely. This option is only + // available on Darwin and Linux. + Timeout time.Duration + + // Sets the DB.NoGrowSync flag before memory mapping the file. + NoGrowSync bool + + // Do not sync freelist to disk. This improves the database write performance + // under normal operation, but requires a full database re-sync during recovery. + NoFreelistSync bool + + // Open database in read-only mode. Uses flock(..., LOCK_SH |LOCK_NB) to + // grab a shared lock (UNIX). + ReadOnly bool + + // Sets the DB.MmapFlags flag before memory mapping the file. + MmapFlags int + + // InitialMmapSize is the initial mmap size of the database + // in bytes. Read transactions won't block write transaction + // if the InitialMmapSize is large enough to hold database mmap + // size. (See DB.Begin for more information) + // + // If <=0, the initial map size is 0. + // If initialMmapSize is smaller than the previous database size, + // it takes no effect. + InitialMmapSize int + + // PageSize overrides the default OS page size. + PageSize int + + // NoSync sets the initial value of DB.NoSync. Normally this can just be + // set directly on the DB itself when returned from Open(), but this option + // is useful in APIs which expose Options but not the underlying DB. + NoSync bool +} + +// DefaultOptions represent the options used if nil options are passed into Open(). +// No timeout is used which will cause Bolt to wait indefinitely for a lock. +var DefaultOptions = &Options{ + Timeout: 0, + NoGrowSync: false, +} + +// Stats represents statistics about the database. +type Stats struct { + // Freelist stats + FreePageN int // total number of free pages on the freelist + PendingPageN int // total number of pending pages on the freelist + FreeAlloc int // total bytes allocated in free pages + FreelistInuse int // total bytes used by the freelist + + // Transaction stats + TxN int // total number of started read transactions + OpenTxN int // number of currently open read transactions + + TxStats TxStats // global, ongoing stats. +} + +// Sub calculates and returns the difference between two sets of database stats. +// This is useful when obtaining stats at two different points and time and +// you need the performance counters that occurred within that time span. +func (s *Stats) Sub(other *Stats) Stats { + if other == nil { + return *s + } + var diff Stats + diff.FreePageN = s.FreePageN + diff.PendingPageN = s.PendingPageN + diff.FreeAlloc = s.FreeAlloc + diff.FreelistInuse = s.FreelistInuse + diff.TxN = s.TxN - other.TxN + diff.TxStats = s.TxStats.Sub(&other.TxStats) + return diff +} + +type Info struct { + Data uintptr + PageSize int +} + +type meta struct { + magic uint32 + version uint32 + pageSize uint32 + flags uint32 + root bucket + freelist pgid + pgid pgid + txid txid + checksum uint64 +} + +// validate checks the marker bytes and version of the meta page to ensure it matches this binary. +func (m *meta) validate() error { + if m.magic != magic { + return ErrInvalid + } else if m.version != version { + return ErrVersionMismatch + } else if m.checksum != 0 && m.checksum != m.sum64() { + return ErrChecksum + } + return nil +} + +// copy copies one meta object to another. +func (m *meta) copy(dest *meta) { + *dest = *m +} + +// write writes the meta onto a page. +func (m *meta) write(p *page) { + if m.root.root >= m.pgid { + panic(fmt.Sprintf("root bucket pgid (%d) above high water mark (%d)", m.root.root, m.pgid)) + } else if m.freelist >= m.pgid && m.freelist != pgidNoFreelist { + // TODO: reject pgidNoFreeList if !NoFreelistSync + panic(fmt.Sprintf("freelist pgid (%d) above high water mark (%d)", m.freelist, m.pgid)) + } + + // Page id is either going to be 0 or 1 which we can determine by the transaction ID. + p.id = pgid(m.txid % 2) + p.flags |= metaPageFlag + + // Calculate the checksum. + m.checksum = m.sum64() + + m.copy(p.meta()) +} + +// generates the checksum for the meta. +func (m *meta) sum64() uint64 { + var h = fnv.New64a() + _, _ = h.Write((*[unsafe.Offsetof(meta{}.checksum)]byte)(unsafe.Pointer(m))[:]) + return h.Sum64() +} + +// _assert will panic with a given formatted message if the given condition is false. +func _assert(condition bool, msg string, v ...interface{}) { + if !condition { + panic(fmt.Sprintf("assertion failed: "+msg, v...)) + } +} diff --git a/sessiondb/boltdb/vendor/github.com/etcd-io/bbolt/errors.go b/sessiondb/boltdb/vendor/github.com/etcd-io/bbolt/errors.go new file mode 100644 index 0000000..48758ca --- /dev/null +++ b/sessiondb/boltdb/vendor/github.com/etcd-io/bbolt/errors.go @@ -0,0 +1,71 @@ +package bbolt + +import "errors" + +// These errors can be returned when opening or calling methods on a DB. +var ( + // ErrDatabaseNotOpen is returned when a DB instance is accessed before it + // is opened or after it is closed. + ErrDatabaseNotOpen = errors.New("database not open") + + // ErrDatabaseOpen is returned when opening a database that is + // already open. + ErrDatabaseOpen = errors.New("database already open") + + // ErrInvalid is returned when both meta pages on a database are invalid. + // This typically occurs when a file is not a bolt database. + ErrInvalid = errors.New("invalid database") + + // ErrVersionMismatch is returned when the data file was created with a + // different version of Bolt. + ErrVersionMismatch = errors.New("version mismatch") + + // ErrChecksum is returned when either meta page checksum does not match. + ErrChecksum = errors.New("checksum error") + + // ErrTimeout is returned when a database cannot obtain an exclusive lock + // on the data file after the timeout passed to Open(). + ErrTimeout = errors.New("timeout") +) + +// These errors can occur when beginning or committing a Tx. +var ( + // ErrTxNotWritable is returned when performing a write operation on a + // read-only transaction. + ErrTxNotWritable = errors.New("tx not writable") + + // ErrTxClosed is returned when committing or rolling back a transaction + // that has already been committed or rolled back. + ErrTxClosed = errors.New("tx closed") + + // ErrDatabaseReadOnly is returned when a mutating transaction is started on a + // read-only database. + ErrDatabaseReadOnly = errors.New("database is in read-only mode") +) + +// These errors can occur when putting or deleting a value or a bucket. +var ( + // ErrBucketNotFound is returned when trying to access a bucket that has + // not been created yet. + ErrBucketNotFound = errors.New("bucket not found") + + // ErrBucketExists is returned when creating a bucket that already exists. + ErrBucketExists = errors.New("bucket already exists") + + // ErrBucketNameRequired is returned when creating a bucket with a blank name. + ErrBucketNameRequired = errors.New("bucket name required") + + // ErrKeyRequired is returned when inserting a zero-length key. + ErrKeyRequired = errors.New("key required") + + // ErrKeyTooLarge is returned when inserting a key that is larger than MaxKeySize. + ErrKeyTooLarge = errors.New("key too large") + + // ErrValueTooLarge is returned when inserting a value that is larger than MaxValueSize. + ErrValueTooLarge = errors.New("value too large") + + // ErrIncompatibleValue is returned when trying create or delete a bucket + // on an existing non-bucket key or when trying to create or delete a + // non-bucket key on an existing bucket key. + ErrIncompatibleValue = errors.New("incompatible value") +) diff --git a/sessiondb/boltdb/vendor/github.com/etcd-io/bbolt/freelist.go b/sessiondb/boltdb/vendor/github.com/etcd-io/bbolt/freelist.go new file mode 100644 index 0000000..e4bcb2d --- /dev/null +++ b/sessiondb/boltdb/vendor/github.com/etcd-io/bbolt/freelist.go @@ -0,0 +1,333 @@ +package bbolt + +import ( + "fmt" + "sort" + "unsafe" +) + +// txPending holds a list of pgids and corresponding allocation txns +// that are pending to be freed. +type txPending struct { + ids []pgid + alloctx []txid // txids allocating the ids + lastReleaseBegin txid // beginning txid of last matching releaseRange +} + +// freelist represents a list of all pages that are available for allocation. +// It also tracks pages that have been freed but are still in use by open transactions. +type freelist struct { + ids []pgid // all free and available free page ids. + allocs map[pgid]txid // mapping of txid that allocated a pgid. + pending map[txid]*txPending // mapping of soon-to-be free page ids by tx. + cache map[pgid]bool // fast lookup of all free and pending page ids. +} + +// newFreelist returns an empty, initialized freelist. +func newFreelist() *freelist { + return &freelist{ + allocs: make(map[pgid]txid), + pending: make(map[txid]*txPending), + cache: make(map[pgid]bool), + } +} + +// size returns the size of the page after serialization. +func (f *freelist) size() int { + n := f.count() + if n >= 0xFFFF { + // The first element will be used to store the count. See freelist.write. + n++ + } + return pageHeaderSize + (int(unsafe.Sizeof(pgid(0))) * n) +} + +// count returns count of pages on the freelist +func (f *freelist) count() int { + return f.free_count() + f.pending_count() +} + +// free_count returns count of free pages +func (f *freelist) free_count() int { + return len(f.ids) +} + +// pending_count returns count of pending pages +func (f *freelist) pending_count() int { + var count int + for _, txp := range f.pending { + count += len(txp.ids) + } + return count +} + +// copyall copies into dst a list of all free ids and all pending ids in one sorted list. +// f.count returns the minimum length required for dst. +func (f *freelist) copyall(dst []pgid) { + m := make(pgids, 0, f.pending_count()) + for _, txp := range f.pending { + m = append(m, txp.ids...) + } + sort.Sort(m) + mergepgids(dst, f.ids, m) +} + +// allocate returns the starting page id of a contiguous list of pages of a given size. +// If a contiguous block cannot be found then 0 is returned. +func (f *freelist) allocate(txid txid, n int) pgid { + if len(f.ids) == 0 { + return 0 + } + + var initial, previd pgid + for i, id := range f.ids { + if id <= 1 { + panic(fmt.Sprintf("invalid page allocation: %d", id)) + } + + // Reset initial page if this is not contiguous. + if previd == 0 || id-previd != 1 { + initial = id + } + + // If we found a contiguous block then remove it and return it. + if (id-initial)+1 == pgid(n) { + // If we're allocating off the beginning then take the fast path + // and just adjust the existing slice. This will use extra memory + // temporarily but the append() in free() will realloc the slice + // as is necessary. + if (i + 1) == n { + f.ids = f.ids[i+1:] + } else { + copy(f.ids[i-n+1:], f.ids[i+1:]) + f.ids = f.ids[:len(f.ids)-n] + } + + // Remove from the free cache. + for i := pgid(0); i < pgid(n); i++ { + delete(f.cache, initial+i) + } + f.allocs[initial] = txid + return initial + } + + previd = id + } + return 0 +} + +// free releases a page and its overflow for a given transaction id. +// If the page is already free then a panic will occur. +func (f *freelist) free(txid txid, p *page) { + if p.id <= 1 { + panic(fmt.Sprintf("cannot free page 0 or 1: %d", p.id)) + } + + // Free page and all its overflow pages. + txp := f.pending[txid] + if txp == nil { + txp = &txPending{} + f.pending[txid] = txp + } + allocTxid, ok := f.allocs[p.id] + if ok { + delete(f.allocs, p.id) + } else if (p.flags & freelistPageFlag) != 0 { + // Freelist is always allocated by prior tx. + allocTxid = txid - 1 + } + + for id := p.id; id <= p.id+pgid(p.overflow); id++ { + // Verify that page is not already free. + if f.cache[id] { + panic(fmt.Sprintf("page %d already freed", id)) + } + // Add to the freelist and cache. + txp.ids = append(txp.ids, id) + txp.alloctx = append(txp.alloctx, allocTxid) + f.cache[id] = true + } +} + +// release moves all page ids for a transaction id (or older) to the freelist. +func (f *freelist) release(txid txid) { + m := make(pgids, 0) + for tid, txp := range f.pending { + if tid <= txid { + // Move transaction's pending pages to the available freelist. + // Don't remove from the cache since the page is still free. + m = append(m, txp.ids...) + delete(f.pending, tid) + } + } + sort.Sort(m) + f.ids = pgids(f.ids).merge(m) +} + +// releaseRange moves pending pages allocated within an extent [begin,end] to the free list. +func (f *freelist) releaseRange(begin, end txid) { + if begin > end { + return + } + var m pgids + for tid, txp := range f.pending { + if tid < begin || tid > end { + continue + } + // Don't recompute freed pages if ranges haven't updated. + if txp.lastReleaseBegin == begin { + continue + } + for i := 0; i < len(txp.ids); i++ { + if atx := txp.alloctx[i]; atx < begin || atx > end { + continue + } + m = append(m, txp.ids[i]) + txp.ids[i] = txp.ids[len(txp.ids)-1] + txp.ids = txp.ids[:len(txp.ids)-1] + txp.alloctx[i] = txp.alloctx[len(txp.alloctx)-1] + txp.alloctx = txp.alloctx[:len(txp.alloctx)-1] + i-- + } + txp.lastReleaseBegin = begin + if len(txp.ids) == 0 { + delete(f.pending, tid) + } + } + sort.Sort(m) + f.ids = pgids(f.ids).merge(m) +} + +// rollback removes the pages from a given pending tx. +func (f *freelist) rollback(txid txid) { + // Remove page ids from cache. + txp := f.pending[txid] + if txp == nil { + return + } + var m pgids + for i, pgid := range txp.ids { + delete(f.cache, pgid) + tx := txp.alloctx[i] + if tx == 0 { + continue + } + if tx != txid { + // Pending free aborted; restore page back to alloc list. + f.allocs[pgid] = tx + } else { + // Freed page was allocated by this txn; OK to throw away. + m = append(m, pgid) + } + } + // Remove pages from pending list and mark as free if allocated by txid. + delete(f.pending, txid) + sort.Sort(m) + f.ids = pgids(f.ids).merge(m) +} + +// freed returns whether a given page is in the free list. +func (f *freelist) freed(pgid pgid) bool { + return f.cache[pgid] +} + +// read initializes the freelist from a freelist page. +func (f *freelist) read(p *page) { + if (p.flags & freelistPageFlag) == 0 { + panic(fmt.Sprintf("invalid freelist page: %d, page type is %s", p.id, p.typ())) + } + // If the page.count is at the max uint16 value (64k) then it's considered + // an overflow and the size of the freelist is stored as the first element. + idx, count := 0, int(p.count) + if count == 0xFFFF { + idx = 1 + count = int(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0]) + } + + // Copy the list of page ids from the freelist. + if count == 0 { + f.ids = nil + } else { + ids := ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[idx : idx+count] + f.ids = make([]pgid, len(ids)) + copy(f.ids, ids) + + // Make sure they're sorted. + sort.Sort(pgids(f.ids)) + } + + // Rebuild the page cache. + f.reindex() +} + +// read initializes the freelist from a given list of ids. +func (f *freelist) readIDs(ids []pgid) { + f.ids = ids + f.reindex() +} + +// write writes the page ids onto a freelist page. All free and pending ids are +// saved to disk since in the event of a program crash, all pending ids will +// become free. +func (f *freelist) write(p *page) error { + // Combine the old free pgids and pgids waiting on an open transaction. + + // Update the header flag. + p.flags |= freelistPageFlag + + // The page.count can only hold up to 64k elements so if we overflow that + // number then we handle it by putting the size in the first element. + lenids := f.count() + if lenids == 0 { + p.count = uint16(lenids) + } else if lenids < 0xFFFF { + p.count = uint16(lenids) + f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:]) + } else { + p.count = 0xFFFF + ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0] = pgid(lenids) + f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[1:]) + } + + return nil +} + +// reload reads the freelist from a page and filters out pending items. +func (f *freelist) reload(p *page) { + f.read(p) + + // Build a cache of only pending pages. + pcache := make(map[pgid]bool) + for _, txp := range f.pending { + for _, pendingID := range txp.ids { + pcache[pendingID] = true + } + } + + // Check each page in the freelist and build a new available freelist + // with any pages not in the pending lists. + var a []pgid + for _, id := range f.ids { + if !pcache[id] { + a = append(a, id) + } + } + f.ids = a + + // Once the available list is rebuilt then rebuild the free cache so that + // it includes the available and pending free pages. + f.reindex() +} + +// reindex rebuilds the free cache based on available and pending free lists. +func (f *freelist) reindex() { + f.cache = make(map[pgid]bool, len(f.ids)) + for _, id := range f.ids { + f.cache[id] = true + } + for _, txp := range f.pending { + for _, pendingID := range txp.ids { + f.cache[pendingID] = true + } + } +} diff --git a/sessiondb/boltdb/vendor/github.com/etcd-io/bbolt/node.go b/sessiondb/boltdb/vendor/github.com/etcd-io/bbolt/node.go new file mode 100644 index 0000000..6c3fa55 --- /dev/null +++ b/sessiondb/boltdb/vendor/github.com/etcd-io/bbolt/node.go @@ -0,0 +1,604 @@ +package bbolt + +import ( + "bytes" + "fmt" + "sort" + "unsafe" +) + +// node represents an in-memory, deserialized page. +type node struct { + bucket *Bucket + isLeaf bool + unbalanced bool + spilled bool + key []byte + pgid pgid + parent *node + children nodes + inodes inodes +} + +// root returns the top-level node this node is attached to. +func (n *node) root() *node { + if n.parent == nil { + return n + } + return n.parent.root() +} + +// minKeys returns the minimum number of inodes this node should have. +func (n *node) minKeys() int { + if n.isLeaf { + return 1 + } + return 2 +} + +// size returns the size of the node after serialization. +func (n *node) size() int { + sz, elsz := pageHeaderSize, n.pageElementSize() + for i := 0; i < len(n.inodes); i++ { + item := &n.inodes[i] + sz += elsz + len(item.key) + len(item.value) + } + return sz +} + +// sizeLessThan returns true if the node is less than a given size. +// This is an optimization to avoid calculating a large node when we only need +// to know if it fits inside a certain page size. +func (n *node) sizeLessThan(v int) bool { + sz, elsz := pageHeaderSize, n.pageElementSize() + for i := 0; i < len(n.inodes); i++ { + item := &n.inodes[i] + sz += elsz + len(item.key) + len(item.value) + if sz >= v { + return false + } + } + return true +} + +// pageElementSize returns the size of each page element based on the type of node. +func (n *node) pageElementSize() int { + if n.isLeaf { + return leafPageElementSize + } + return branchPageElementSize +} + +// childAt returns the child node at a given index. +func (n *node) childAt(index int) *node { + if n.isLeaf { + panic(fmt.Sprintf("invalid childAt(%d) on a leaf node", index)) + } + return n.bucket.node(n.inodes[index].pgid, n) +} + +// childIndex returns the index of a given child node. +func (n *node) childIndex(child *node) int { + index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, child.key) != -1 }) + return index +} + +// numChildren returns the number of children. +func (n *node) numChildren() int { + return len(n.inodes) +} + +// nextSibling returns the next node with the same parent. +func (n *node) nextSibling() *node { + if n.parent == nil { + return nil + } + index := n.parent.childIndex(n) + if index >= n.parent.numChildren()-1 { + return nil + } + return n.parent.childAt(index + 1) +} + +// prevSibling returns the previous node with the same parent. +func (n *node) prevSibling() *node { + if n.parent == nil { + return nil + } + index := n.parent.childIndex(n) + if index == 0 { + return nil + } + return n.parent.childAt(index - 1) +} + +// put inserts a key/value. +func (n *node) put(oldKey, newKey, value []byte, pgid pgid, flags uint32) { + if pgid >= n.bucket.tx.meta.pgid { + panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", pgid, n.bucket.tx.meta.pgid)) + } else if len(oldKey) <= 0 { + panic("put: zero-length old key") + } else if len(newKey) <= 0 { + panic("put: zero-length new key") + } + + // Find insertion index. + index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, oldKey) != -1 }) + + // Add capacity and shift nodes if we don't have an exact match and need to insert. + exact := (len(n.inodes) > 0 && index < len(n.inodes) && bytes.Equal(n.inodes[index].key, oldKey)) + if !exact { + n.inodes = append(n.inodes, inode{}) + copy(n.inodes[index+1:], n.inodes[index:]) + } + + inode := &n.inodes[index] + inode.flags = flags + inode.key = newKey + inode.value = value + inode.pgid = pgid + _assert(len(inode.key) > 0, "put: zero-length inode key") +} + +// del removes a key from the node. +func (n *node) del(key []byte) { + // Find index of key. + index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, key) != -1 }) + + // Exit if the key isn't found. + if index >= len(n.inodes) || !bytes.Equal(n.inodes[index].key, key) { + return + } + + // Delete inode from the node. + n.inodes = append(n.inodes[:index], n.inodes[index+1:]...) + + // Mark the node as needing rebalancing. + n.unbalanced = true +} + +// read initializes the node from a page. +func (n *node) read(p *page) { + n.pgid = p.id + n.isLeaf = ((p.flags & leafPageFlag) != 0) + n.inodes = make(inodes, int(p.count)) + + for i := 0; i < int(p.count); i++ { + inode := &n.inodes[i] + if n.isLeaf { + elem := p.leafPageElement(uint16(i)) + inode.flags = elem.flags + inode.key = elem.key() + inode.value = elem.value() + } else { + elem := p.branchPageElement(uint16(i)) + inode.pgid = elem.pgid + inode.key = elem.key() + } + _assert(len(inode.key) > 0, "read: zero-length inode key") + } + + // Save first key so we can find the node in the parent when we spill. + if len(n.inodes) > 0 { + n.key = n.inodes[0].key + _assert(len(n.key) > 0, "read: zero-length node key") + } else { + n.key = nil + } +} + +// write writes the items onto one or more pages. +func (n *node) write(p *page) { + // Initialize page. + if n.isLeaf { + p.flags |= leafPageFlag + } else { + p.flags |= branchPageFlag + } + + if len(n.inodes) >= 0xFFFF { + panic(fmt.Sprintf("inode overflow: %d (pgid=%d)", len(n.inodes), p.id)) + } + p.count = uint16(len(n.inodes)) + + // Stop here if there are no items to write. + if p.count == 0 { + return + } + + // Loop over each item and write it to the page. + b := (*[maxAllocSize]byte)(unsafe.Pointer(&p.ptr))[n.pageElementSize()*len(n.inodes):] + for i, item := range n.inodes { + _assert(len(item.key) > 0, "write: zero-length inode key") + + // Write the page element. + if n.isLeaf { + elem := p.leafPageElement(uint16(i)) + elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))) + elem.flags = item.flags + elem.ksize = uint32(len(item.key)) + elem.vsize = uint32(len(item.value)) + } else { + elem := p.branchPageElement(uint16(i)) + elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))) + elem.ksize = uint32(len(item.key)) + elem.pgid = item.pgid + _assert(elem.pgid != p.id, "write: circular dependency occurred") + } + + // If the length of key+value is larger than the max allocation size + // then we need to reallocate the byte array pointer. + // + // See: https://github.com/boltdb/bolt/pull/335 + klen, vlen := len(item.key), len(item.value) + if len(b) < klen+vlen { + b = (*[maxAllocSize]byte)(unsafe.Pointer(&b[0]))[:] + } + + // Write data for the element to the end of the page. + copy(b[0:], item.key) + b = b[klen:] + copy(b[0:], item.value) + b = b[vlen:] + } + + // DEBUG ONLY: n.dump() +} + +// split breaks up a node into multiple smaller nodes, if appropriate. +// This should only be called from the spill() function. +func (n *node) split(pageSize int) []*node { + var nodes []*node + + node := n + for { + // Split node into two. + a, b := node.splitTwo(pageSize) + nodes = append(nodes, a) + + // If we can't split then exit the loop. + if b == nil { + break + } + + // Set node to b so it gets split on the next iteration. + node = b + } + + return nodes +} + +// splitTwo breaks up a node into two smaller nodes, if appropriate. +// This should only be called from the split() function. +func (n *node) splitTwo(pageSize int) (*node, *node) { + // Ignore the split if the page doesn't have at least enough nodes for + // two pages or if the nodes can fit in a single page. + if len(n.inodes) <= (minKeysPerPage*2) || n.sizeLessThan(pageSize) { + return n, nil + } + + // Determine the threshold before starting a new node. + var fillPercent = n.bucket.FillPercent + if fillPercent < minFillPercent { + fillPercent = minFillPercent + } else if fillPercent > maxFillPercent { + fillPercent = maxFillPercent + } + threshold := int(float64(pageSize) * fillPercent) + + // Determine split position and sizes of the two pages. + splitIndex, _ := n.splitIndex(threshold) + + // Split node into two separate nodes. + // If there's no parent then we'll need to create one. + if n.parent == nil { + n.parent = &node{bucket: n.bucket, children: []*node{n}} + } + + // Create a new node and add it to the parent. + next := &node{bucket: n.bucket, isLeaf: n.isLeaf, parent: n.parent} + n.parent.children = append(n.parent.children, next) + + // Split inodes across two nodes. + next.inodes = n.inodes[splitIndex:] + n.inodes = n.inodes[:splitIndex] + + // Update the statistics. + n.bucket.tx.stats.Split++ + + return n, next +} + +// splitIndex finds the position where a page will fill a given threshold. +// It returns the index as well as the size of the first page. +// This is only be called from split(). +func (n *node) splitIndex(threshold int) (index, sz int) { + sz = pageHeaderSize + + // Loop until we only have the minimum number of keys required for the second page. + for i := 0; i < len(n.inodes)-minKeysPerPage; i++ { + index = i + inode := n.inodes[i] + elsize := n.pageElementSize() + len(inode.key) + len(inode.value) + + // If we have at least the minimum number of keys and adding another + // node would put us over the threshold then exit and return. + if i >= minKeysPerPage && sz+elsize > threshold { + break + } + + // Add the element size to the total size. + sz += elsize + } + + return +} + +// spill writes the nodes to dirty pages and splits nodes as it goes. +// Returns an error if dirty pages cannot be allocated. +func (n *node) spill() error { + var tx = n.bucket.tx + if n.spilled { + return nil + } + + // Spill child nodes first. Child nodes can materialize sibling nodes in + // the case of split-merge so we cannot use a range loop. We have to check + // the children size on every loop iteration. + sort.Sort(n.children) + for i := 0; i < len(n.children); i++ { + if err := n.children[i].spill(); err != nil { + return err + } + } + + // We no longer need the child list because it's only used for spill tracking. + n.children = nil + + // Split nodes into appropriate sizes. The first node will always be n. + var nodes = n.split(tx.db.pageSize) + for _, node := range nodes { + // Add node's page to the freelist if it's not new. + if node.pgid > 0 { + tx.db.freelist.free(tx.meta.txid, tx.page(node.pgid)) + node.pgid = 0 + } + + // Allocate contiguous space for the node. + p, err := tx.allocate((node.size() + tx.db.pageSize - 1) / tx.db.pageSize) + if err != nil { + return err + } + + // Write the node. + if p.id >= tx.meta.pgid { + panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", p.id, tx.meta.pgid)) + } + node.pgid = p.id + node.write(p) + node.spilled = true + + // Insert into parent inodes. + if node.parent != nil { + var key = node.key + if key == nil { + key = node.inodes[0].key + } + + node.parent.put(key, node.inodes[0].key, nil, node.pgid, 0) + node.key = node.inodes[0].key + _assert(len(node.key) > 0, "spill: zero-length node key") + } + + // Update the statistics. + tx.stats.Spill++ + } + + // If the root node split and created a new root then we need to spill that + // as well. We'll clear out the children to make sure it doesn't try to respill. + if n.parent != nil && n.parent.pgid == 0 { + n.children = nil + return n.parent.spill() + } + + return nil +} + +// rebalance attempts to combine the node with sibling nodes if the node fill +// size is below a threshold or if there are not enough keys. +func (n *node) rebalance() { + if !n.unbalanced { + return + } + n.unbalanced = false + + // Update statistics. + n.bucket.tx.stats.Rebalance++ + + // Ignore if node is above threshold (25%) and has enough keys. + var threshold = n.bucket.tx.db.pageSize / 4 + if n.size() > threshold && len(n.inodes) > n.minKeys() { + return + } + + // Root node has special handling. + if n.parent == nil { + // If root node is a branch and only has one node then collapse it. + if !n.isLeaf && len(n.inodes) == 1 { + // Move root's child up. + child := n.bucket.node(n.inodes[0].pgid, n) + n.isLeaf = child.isLeaf + n.inodes = child.inodes[:] + n.children = child.children + + // Reparent all child nodes being moved. + for _, inode := range n.inodes { + if child, ok := n.bucket.nodes[inode.pgid]; ok { + child.parent = n + } + } + + // Remove old child. + child.parent = nil + delete(n.bucket.nodes, child.pgid) + child.free() + } + + return + } + + // If node has no keys then just remove it. + if n.numChildren() == 0 { + n.parent.del(n.key) + n.parent.removeChild(n) + delete(n.bucket.nodes, n.pgid) + n.free() + n.parent.rebalance() + return + } + + _assert(n.parent.numChildren() > 1, "parent must have at least 2 children") + + // Destination node is right sibling if idx == 0, otherwise left sibling. + var target *node + var useNextSibling = (n.parent.childIndex(n) == 0) + if useNextSibling { + target = n.nextSibling() + } else { + target = n.prevSibling() + } + + // If both this node and the target node are too small then merge them. + if useNextSibling { + // Reparent all child nodes being moved. + for _, inode := range target.inodes { + if child, ok := n.bucket.nodes[inode.pgid]; ok { + child.parent.removeChild(child) + child.parent = n + child.parent.children = append(child.parent.children, child) + } + } + + // Copy over inodes from target and remove target. + n.inodes = append(n.inodes, target.inodes...) + n.parent.del(target.key) + n.parent.removeChild(target) + delete(n.bucket.nodes, target.pgid) + target.free() + } else { + // Reparent all child nodes being moved. + for _, inode := range n.inodes { + if child, ok := n.bucket.nodes[inode.pgid]; ok { + child.parent.removeChild(child) + child.parent = target + child.parent.children = append(child.parent.children, child) + } + } + + // Copy over inodes to target and remove node. + target.inodes = append(target.inodes, n.inodes...) + n.parent.del(n.key) + n.parent.removeChild(n) + delete(n.bucket.nodes, n.pgid) + n.free() + } + + // Either this node or the target node was deleted from the parent so rebalance it. + n.parent.rebalance() +} + +// removes a node from the list of in-memory children. +// This does not affect the inodes. +func (n *node) removeChild(target *node) { + for i, child := range n.children { + if child == target { + n.children = append(n.children[:i], n.children[i+1:]...) + return + } + } +} + +// dereference causes the node to copy all its inode key/value references to heap memory. +// This is required when the mmap is reallocated so inodes are not pointing to stale data. +func (n *node) dereference() { + if n.key != nil { + key := make([]byte, len(n.key)) + copy(key, n.key) + n.key = key + _assert(n.pgid == 0 || len(n.key) > 0, "dereference: zero-length node key on existing node") + } + + for i := range n.inodes { + inode := &n.inodes[i] + + key := make([]byte, len(inode.key)) + copy(key, inode.key) + inode.key = key + _assert(len(inode.key) > 0, "dereference: zero-length inode key") + + value := make([]byte, len(inode.value)) + copy(value, inode.value) + inode.value = value + } + + // Recursively dereference children. + for _, child := range n.children { + child.dereference() + } + + // Update statistics. + n.bucket.tx.stats.NodeDeref++ +} + +// free adds the node's underlying page to the freelist. +func (n *node) free() { + if n.pgid != 0 { + n.bucket.tx.db.freelist.free(n.bucket.tx.meta.txid, n.bucket.tx.page(n.pgid)) + n.pgid = 0 + } +} + +// dump writes the contents of the node to STDERR for debugging purposes. +/* +func (n *node) dump() { + // Write node header. + var typ = "branch" + if n.isLeaf { + typ = "leaf" + } + warnf("[NODE %d {type=%s count=%d}]", n.pgid, typ, len(n.inodes)) + + // Write out abbreviated version of each item. + for _, item := range n.inodes { + if n.isLeaf { + if item.flags&bucketLeafFlag != 0 { + bucket := (*bucket)(unsafe.Pointer(&item.value[0])) + warnf("+L %08x -> (bucket root=%d)", trunc(item.key, 4), bucket.root) + } else { + warnf("+L %08x -> %08x", trunc(item.key, 4), trunc(item.value, 4)) + } + } else { + warnf("+B %08x -> pgid=%d", trunc(item.key, 4), item.pgid) + } + } + warn("") +} +*/ + +type nodes []*node + +func (s nodes) Len() int { return len(s) } +func (s nodes) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s nodes) Less(i, j int) bool { return bytes.Compare(s[i].inodes[0].key, s[j].inodes[0].key) == -1 } + +// inode represents an internal node inside of a node. +// It can be used to point to elements in a page or point +// to an element which hasn't been added to a page yet. +type inode struct { + flags uint32 + pgid pgid + key []byte + value []byte +} + +type inodes []inode diff --git a/sessiondb/boltdb/vendor/github.com/etcd-io/bbolt/page.go b/sessiondb/boltdb/vendor/github.com/etcd-io/bbolt/page.go new file mode 100644 index 0000000..bca9615 --- /dev/null +++ b/sessiondb/boltdb/vendor/github.com/etcd-io/bbolt/page.go @@ -0,0 +1,197 @@ +package bbolt + +import ( + "fmt" + "os" + "sort" + "unsafe" +) + +const pageHeaderSize = int(unsafe.Offsetof(((*page)(nil)).ptr)) + +const minKeysPerPage = 2 + +const branchPageElementSize = int(unsafe.Sizeof(branchPageElement{})) +const leafPageElementSize = int(unsafe.Sizeof(leafPageElement{})) + +const ( + branchPageFlag = 0x01 + leafPageFlag = 0x02 + metaPageFlag = 0x04 + freelistPageFlag = 0x10 +) + +const ( + bucketLeafFlag = 0x01 +) + +type pgid uint64 + +type page struct { + id pgid + flags uint16 + count uint16 + overflow uint32 + ptr uintptr +} + +// typ returns a human readable page type string used for debugging. +func (p *page) typ() string { + if (p.flags & branchPageFlag) != 0 { + return "branch" + } else if (p.flags & leafPageFlag) != 0 { + return "leaf" + } else if (p.flags & metaPageFlag) != 0 { + return "meta" + } else if (p.flags & freelistPageFlag) != 0 { + return "freelist" + } + return fmt.Sprintf("unknown<%02x>", p.flags) +} + +// meta returns a pointer to the metadata section of the page. +func (p *page) meta() *meta { + return (*meta)(unsafe.Pointer(&p.ptr)) +} + +// leafPageElement retrieves the leaf node by index +func (p *page) leafPageElement(index uint16) *leafPageElement { + n := &((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[index] + return n +} + +// leafPageElements retrieves a list of leaf nodes. +func (p *page) leafPageElements() []leafPageElement { + if p.count == 0 { + return nil + } + return ((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[:] +} + +// branchPageElement retrieves the branch node by index +func (p *page) branchPageElement(index uint16) *branchPageElement { + return &((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[index] +} + +// branchPageElements retrieves a list of branch nodes. +func (p *page) branchPageElements() []branchPageElement { + if p.count == 0 { + return nil + } + return ((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[:] +} + +// dump writes n bytes of the page to STDERR as hex output. +func (p *page) hexdump(n int) { + buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:n] + fmt.Fprintf(os.Stderr, "%x\n", buf) +} + +type pages []*page + +func (s pages) Len() int { return len(s) } +func (s pages) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s pages) Less(i, j int) bool { return s[i].id < s[j].id } + +// branchPageElement represents a node on a branch page. +type branchPageElement struct { + pos uint32 + ksize uint32 + pgid pgid +} + +// key returns a byte slice of the node key. +func (n *branchPageElement) key() []byte { + buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) + return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize] +} + +// leafPageElement represents a node on a leaf page. +type leafPageElement struct { + flags uint32 + pos uint32 + ksize uint32 + vsize uint32 +} + +// key returns a byte slice of the node key. +func (n *leafPageElement) key() []byte { + buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) + return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize:n.ksize] +} + +// value returns a byte slice of the node value. +func (n *leafPageElement) value() []byte { + buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) + return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos+n.ksize]))[:n.vsize:n.vsize] +} + +// PageInfo represents human readable information about a page. +type PageInfo struct { + ID int + Type string + Count int + OverflowCount int +} + +type pgids []pgid + +func (s pgids) Len() int { return len(s) } +func (s pgids) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s pgids) Less(i, j int) bool { return s[i] < s[j] } + +// merge returns the sorted union of a and b. +func (a pgids) merge(b pgids) pgids { + // Return the opposite slice if one is nil. + if len(a) == 0 { + return b + } + if len(b) == 0 { + return a + } + merged := make(pgids, len(a)+len(b)) + mergepgids(merged, a, b) + return merged +} + +// mergepgids copies the sorted union of a and b into dst. +// If dst is too small, it panics. +func mergepgids(dst, a, b pgids) { + if len(dst) < len(a)+len(b) { + panic(fmt.Errorf("mergepgids bad len %d < %d + %d", len(dst), len(a), len(b))) + } + // Copy in the opposite slice if one is nil. + if len(a) == 0 { + copy(dst, b) + return + } + if len(b) == 0 { + copy(dst, a) + return + } + + // Merged will hold all elements from both lists. + merged := dst[:0] + + // Assign lead to the slice with a lower starting value, follow to the higher value. + lead, follow := a, b + if b[0] < a[0] { + lead, follow = b, a + } + + // Continue while there are elements in the lead. + for len(lead) > 0 { + // Merge largest prefix of lead that is ahead of follow[0]. + n := sort.Search(len(lead), func(i int) bool { return lead[i] > follow[0] }) + merged = append(merged, lead[:n]...) + if n >= len(lead) { + break + } + + // Swap lead and follow. + lead, follow = follow, lead[n:] + } + + // Append what's left in follow. + _ = append(merged, follow...) +} diff --git a/sessiondb/boltdb/vendor/github.com/etcd-io/bbolt/tx.go b/sessiondb/boltdb/vendor/github.com/etcd-io/bbolt/tx.go new file mode 100644 index 0000000..f508641 --- /dev/null +++ b/sessiondb/boltdb/vendor/github.com/etcd-io/bbolt/tx.go @@ -0,0 +1,707 @@ +package bbolt + +import ( + "fmt" + "io" + "os" + "sort" + "strings" + "time" + "unsafe" +) + +// txid represents the internal transaction identifier. +type txid uint64 + +// Tx represents a read-only or read/write transaction on the database. +// Read-only transactions can be used for retrieving values for keys and creating cursors. +// Read/write transactions can create and remove buckets and create and remove keys. +// +// IMPORTANT: You must commit or rollback transactions when you are done with +// them. Pages can not be reclaimed by the writer until no more transactions +// are using them. A long running read transaction can cause the database to +// quickly grow. +type Tx struct { + writable bool + managed bool + db *DB + meta *meta + root Bucket + pages map[pgid]*page + stats TxStats + commitHandlers []func() + + // WriteFlag specifies the flag for write-related methods like WriteTo(). + // Tx opens the database file with the specified flag to copy the data. + // + // By default, the flag is unset, which works well for mostly in-memory + // workloads. For databases that are much larger than available RAM, + // set the flag to syscall.O_DIRECT to avoid trashing the page cache. + WriteFlag int +} + +// init initializes the transaction. +func (tx *Tx) init(db *DB) { + tx.db = db + tx.pages = nil + + // Copy the meta page since it can be changed by the writer. + tx.meta = &meta{} + db.meta().copy(tx.meta) + + // Copy over the root bucket. + tx.root = newBucket(tx) + tx.root.bucket = &bucket{} + *tx.root.bucket = tx.meta.root + + // Increment the transaction id and add a page cache for writable transactions. + if tx.writable { + tx.pages = make(map[pgid]*page) + tx.meta.txid += txid(1) + } +} + +// ID returns the transaction id. +func (tx *Tx) ID() int { + return int(tx.meta.txid) +} + +// DB returns a reference to the database that created the transaction. +func (tx *Tx) DB() *DB { + return tx.db +} + +// Size returns current database size in bytes as seen by this transaction. +func (tx *Tx) Size() int64 { + return int64(tx.meta.pgid) * int64(tx.db.pageSize) +} + +// Writable returns whether the transaction can perform write operations. +func (tx *Tx) Writable() bool { + return tx.writable +} + +// Cursor creates a cursor associated with the root bucket. +// All items in the cursor will return a nil value because all root bucket keys point to buckets. +// The cursor is only valid as long as the transaction is open. +// Do not use a cursor after the transaction is closed. +func (tx *Tx) Cursor() *Cursor { + return tx.root.Cursor() +} + +// Stats retrieves a copy of the current transaction statistics. +func (tx *Tx) Stats() TxStats { + return tx.stats +} + +// Bucket retrieves a bucket by name. +// Returns nil if the bucket does not exist. +// The bucket instance is only valid for the lifetime of the transaction. +func (tx *Tx) Bucket(name []byte) *Bucket { + return tx.root.Bucket(name) +} + +// CreateBucket creates a new bucket. +// Returns an error if the bucket already exists, if the bucket name is blank, or if the bucket name is too long. +// The bucket instance is only valid for the lifetime of the transaction. +func (tx *Tx) CreateBucket(name []byte) (*Bucket, error) { + return tx.root.CreateBucket(name) +} + +// CreateBucketIfNotExists creates a new bucket if it doesn't already exist. +// Returns an error if the bucket name is blank, or if the bucket name is too long. +// The bucket instance is only valid for the lifetime of the transaction. +func (tx *Tx) CreateBucketIfNotExists(name []byte) (*Bucket, error) { + return tx.root.CreateBucketIfNotExists(name) +} + +// DeleteBucket deletes a bucket. +// Returns an error if the bucket cannot be found or if the key represents a non-bucket value. +func (tx *Tx) DeleteBucket(name []byte) error { + return tx.root.DeleteBucket(name) +} + +// ForEach executes a function for each bucket in the root. +// If the provided function returns an error then the iteration is stopped and +// the error is returned to the caller. +func (tx *Tx) ForEach(fn func(name []byte, b *Bucket) error) error { + return tx.root.ForEach(func(k, v []byte) error { + return fn(k, tx.root.Bucket(k)) + }) +} + +// OnCommit adds a handler function to be executed after the transaction successfully commits. +func (tx *Tx) OnCommit(fn func()) { + tx.commitHandlers = append(tx.commitHandlers, fn) +} + +// Commit writes all changes to disk and updates the meta page. +// Returns an error if a disk write error occurs, or if Commit is +// called on a read-only transaction. +func (tx *Tx) Commit() error { + _assert(!tx.managed, "managed tx commit not allowed") + if tx.db == nil { + return ErrTxClosed + } else if !tx.writable { + return ErrTxNotWritable + } + + // TODO(benbjohnson): Use vectorized I/O to write out dirty pages. + + // Rebalance nodes which have had deletions. + var startTime = time.Now() + tx.root.rebalance() + if tx.stats.Rebalance > 0 { + tx.stats.RebalanceTime += time.Since(startTime) + } + + // spill data onto dirty pages. + startTime = time.Now() + if err := tx.root.spill(); err != nil { + tx.rollback() + return err + } + tx.stats.SpillTime += time.Since(startTime) + + // Free the old root bucket. + tx.meta.root.root = tx.root.root + + // Free the old freelist because commit writes out a fresh freelist. + if tx.meta.freelist != pgidNoFreelist { + tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist)) + } + + if !tx.db.NoFreelistSync { + err := tx.commitFreelist() + if err != nil { + return err + } + } else { + tx.meta.freelist = pgidNoFreelist + } + + // Write dirty pages to disk. + startTime = time.Now() + if err := tx.write(); err != nil { + tx.rollback() + return err + } + + // If strict mode is enabled then perform a consistency check. + // Only the first consistency error is reported in the panic. + if tx.db.StrictMode { + ch := tx.Check() + var errs []string + for { + err, ok := <-ch + if !ok { + break + } + errs = append(errs, err.Error()) + } + if len(errs) > 0 { + panic("check fail: " + strings.Join(errs, "\n")) + } + } + + // Write meta to disk. + if err := tx.writeMeta(); err != nil { + tx.rollback() + return err + } + tx.stats.WriteTime += time.Since(startTime) + + // Finalize the transaction. + tx.close() + + // Execute commit handlers now that the locks have been removed. + for _, fn := range tx.commitHandlers { + fn() + } + + return nil +} + +func (tx *Tx) commitFreelist() error { + // Allocate new pages for the new free list. This will overestimate + // the size of the freelist but not underestimate the size (which would be bad). + opgid := tx.meta.pgid + p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1) + if err != nil { + tx.rollback() + return err + } + if err := tx.db.freelist.write(p); err != nil { + tx.rollback() + return err + } + tx.meta.freelist = p.id + // If the high water mark has moved up then attempt to grow the database. + if tx.meta.pgid > opgid { + if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil { + tx.rollback() + return err + } + } + + return nil +} + +// Rollback closes the transaction and ignores all previous updates. Read-only +// transactions must be rolled back and not committed. +func (tx *Tx) Rollback() error { + _assert(!tx.managed, "managed tx rollback not allowed") + if tx.db == nil { + return ErrTxClosed + } + tx.rollback() + return nil +} + +func (tx *Tx) rollback() { + if tx.db == nil { + return + } + if tx.writable { + tx.db.freelist.rollback(tx.meta.txid) + tx.db.freelist.reload(tx.db.page(tx.db.meta().freelist)) + } + tx.close() +} + +func (tx *Tx) close() { + if tx.db == nil { + return + } + if tx.writable { + // Grab freelist stats. + var freelistFreeN = tx.db.freelist.free_count() + var freelistPendingN = tx.db.freelist.pending_count() + var freelistAlloc = tx.db.freelist.size() + + // Remove transaction ref & writer lock. + tx.db.rwtx = nil + tx.db.rwlock.Unlock() + + // Merge statistics. + tx.db.statlock.Lock() + tx.db.stats.FreePageN = freelistFreeN + tx.db.stats.PendingPageN = freelistPendingN + tx.db.stats.FreeAlloc = (freelistFreeN + freelistPendingN) * tx.db.pageSize + tx.db.stats.FreelistInuse = freelistAlloc + tx.db.stats.TxStats.add(&tx.stats) + tx.db.statlock.Unlock() + } else { + tx.db.removeTx(tx) + } + + // Clear all references. + tx.db = nil + tx.meta = nil + tx.root = Bucket{tx: tx} + tx.pages = nil +} + +// Copy writes the entire database to a writer. +// This function exists for backwards compatibility. +// +// Deprecated; Use WriteTo() instead. +func (tx *Tx) Copy(w io.Writer) error { + _, err := tx.WriteTo(w) + return err +} + +// WriteTo writes the entire database to a writer. +// If err == nil then exactly tx.Size() bytes will be written into the writer. +func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) { + // Attempt to open reader with WriteFlag + f, err := os.OpenFile(tx.db.path, os.O_RDONLY|tx.WriteFlag, 0) + if err != nil { + return 0, err + } + defer func() { + if cerr := f.Close(); err == nil { + err = cerr + } + }() + + // Generate a meta page. We use the same page data for both meta pages. + buf := make([]byte, tx.db.pageSize) + page := (*page)(unsafe.Pointer(&buf[0])) + page.flags = metaPageFlag + *page.meta() = *tx.meta + + // Write meta 0. + page.id = 0 + page.meta().checksum = page.meta().sum64() + nn, err := w.Write(buf) + n += int64(nn) + if err != nil { + return n, fmt.Errorf("meta 0 copy: %s", err) + } + + // Write meta 1 with a lower transaction id. + page.id = 1 + page.meta().txid -= 1 + page.meta().checksum = page.meta().sum64() + nn, err = w.Write(buf) + n += int64(nn) + if err != nil { + return n, fmt.Errorf("meta 1 copy: %s", err) + } + + // Move past the meta pages in the file. + if _, err := f.Seek(int64(tx.db.pageSize*2), io.SeekStart); err != nil { + return n, fmt.Errorf("seek: %s", err) + } + + // Copy data pages. + wn, err := io.CopyN(w, f, tx.Size()-int64(tx.db.pageSize*2)) + n += wn + if err != nil { + return n, err + } + + return n, nil +} + +// CopyFile copies the entire database to file at the given path. +// A reader transaction is maintained during the copy so it is safe to continue +// using the database while a copy is in progress. +func (tx *Tx) CopyFile(path string, mode os.FileMode) error { + f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, mode) + if err != nil { + return err + } + + err = tx.Copy(f) + if err != nil { + _ = f.Close() + return err + } + return f.Close() +} + +// Check performs several consistency checks on the database for this transaction. +// An error is returned if any inconsistency is found. +// +// It can be safely run concurrently on a writable transaction. However, this +// incurs a high cost for large databases and databases with a lot of subbuckets +// because of caching. This overhead can be removed if running on a read-only +// transaction, however, it is not safe to execute other writer transactions at +// the same time. +func (tx *Tx) Check() <-chan error { + ch := make(chan error) + go tx.check(ch) + return ch +} + +func (tx *Tx) check(ch chan error) { + // Force loading free list if opened in ReadOnly mode. + tx.db.loadFreelist() + + // Check if any pages are double freed. + freed := make(map[pgid]bool) + all := make([]pgid, tx.db.freelist.count()) + tx.db.freelist.copyall(all) + for _, id := range all { + if freed[id] { + ch <- fmt.Errorf("page %d: already freed", id) + } + freed[id] = true + } + + // Track every reachable page. + reachable := make(map[pgid]*page) + reachable[0] = tx.page(0) // meta0 + reachable[1] = tx.page(1) // meta1 + if tx.meta.freelist != pgidNoFreelist { + for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ { + reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist) + } + } + + // Recursively check buckets. + tx.checkBucket(&tx.root, reachable, freed, ch) + + // Ensure all pages below high water mark are either reachable or freed. + for i := pgid(0); i < tx.meta.pgid; i++ { + _, isReachable := reachable[i] + if !isReachable && !freed[i] { + ch <- fmt.Errorf("page %d: unreachable unfreed", int(i)) + } + } + + // Close the channel to signal completion. + close(ch) +} + +func (tx *Tx) checkBucket(b *Bucket, reachable map[pgid]*page, freed map[pgid]bool, ch chan error) { + // Ignore inline buckets. + if b.root == 0 { + return + } + + // Check every page used by this bucket. + b.tx.forEachPage(b.root, 0, func(p *page, _ int) { + if p.id > tx.meta.pgid { + ch <- fmt.Errorf("page %d: out of bounds: %d", int(p.id), int(b.tx.meta.pgid)) + } + + // Ensure each page is only referenced once. + for i := pgid(0); i <= pgid(p.overflow); i++ { + var id = p.id + i + if _, ok := reachable[id]; ok { + ch <- fmt.Errorf("page %d: multiple references", int(id)) + } + reachable[id] = p + } + + // We should only encounter un-freed leaf and branch pages. + if freed[p.id] { + ch <- fmt.Errorf("page %d: reachable freed", int(p.id)) + } else if (p.flags&branchPageFlag) == 0 && (p.flags&leafPageFlag) == 0 { + ch <- fmt.Errorf("page %d: invalid type: %s", int(p.id), p.typ()) + } + }) + + // Check each bucket within this bucket. + _ = b.ForEach(func(k, v []byte) error { + if child := b.Bucket(k); child != nil { + tx.checkBucket(child, reachable, freed, ch) + } + return nil + }) +} + +// allocate returns a contiguous block of memory starting at a given page. +func (tx *Tx) allocate(count int) (*page, error) { + p, err := tx.db.allocate(tx.meta.txid, count) + if err != nil { + return nil, err + } + + // Save to our page cache. + tx.pages[p.id] = p + + // Update statistics. + tx.stats.PageCount += count + tx.stats.PageAlloc += count * tx.db.pageSize + + return p, nil +} + +// write writes any dirty pages to disk. +func (tx *Tx) write() error { + // Sort pages by id. + pages := make(pages, 0, len(tx.pages)) + for _, p := range tx.pages { + pages = append(pages, p) + } + // Clear out page cache early. + tx.pages = make(map[pgid]*page) + sort.Sort(pages) + + // Write pages to disk in order. + for _, p := range pages { + size := (int(p.overflow) + 1) * tx.db.pageSize + offset := int64(p.id) * int64(tx.db.pageSize) + + // Write out page in "max allocation" sized chunks. + ptr := (*[maxAllocSize]byte)(unsafe.Pointer(p)) + for { + // Limit our write to our max allocation size. + sz := size + if sz > maxAllocSize-1 { + sz = maxAllocSize - 1 + } + + // Write chunk to disk. + buf := ptr[:sz] + if _, err := tx.db.ops.writeAt(buf, offset); err != nil { + return err + } + + // Update statistics. + tx.stats.Write++ + + // Exit inner for loop if we've written all the chunks. + size -= sz + if size == 0 { + break + } + + // Otherwise move offset forward and move pointer to next chunk. + offset += int64(sz) + ptr = (*[maxAllocSize]byte)(unsafe.Pointer(&ptr[sz])) + } + } + + // Ignore file sync if flag is set on DB. + if !tx.db.NoSync || IgnoreNoSync { + if err := fdatasync(tx.db); err != nil { + return err + } + } + + // Put small pages back to page pool. + for _, p := range pages { + // Ignore page sizes over 1 page. + // These are allocated using make() instead of the page pool. + if int(p.overflow) != 0 { + continue + } + + buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:tx.db.pageSize] + + // See https://go.googlesource.com/go/+/f03c9202c43e0abb130669852082117ca50aa9b1 + for i := range buf { + buf[i] = 0 + } + tx.db.pagePool.Put(buf) + } + + return nil +} + +// writeMeta writes the meta to the disk. +func (tx *Tx) writeMeta() error { + // Create a temporary buffer for the meta page. + buf := make([]byte, tx.db.pageSize) + p := tx.db.pageInBuffer(buf, 0) + tx.meta.write(p) + + // Write the meta page to file. + if _, err := tx.db.ops.writeAt(buf, int64(p.id)*int64(tx.db.pageSize)); err != nil { + return err + } + if !tx.db.NoSync || IgnoreNoSync { + if err := fdatasync(tx.db); err != nil { + return err + } + } + + // Update statistics. + tx.stats.Write++ + + return nil +} + +// page returns a reference to the page with a given id. +// If page has been written to then a temporary buffered page is returned. +func (tx *Tx) page(id pgid) *page { + // Check the dirty pages first. + if tx.pages != nil { + if p, ok := tx.pages[id]; ok { + return p + } + } + + // Otherwise return directly from the mmap. + return tx.db.page(id) +} + +// forEachPage iterates over every page within a given page and executes a function. +func (tx *Tx) forEachPage(pgid pgid, depth int, fn func(*page, int)) { + p := tx.page(pgid) + + // Execute function. + fn(p, depth) + + // Recursively loop over children. + if (p.flags & branchPageFlag) != 0 { + for i := 0; i < int(p.count); i++ { + elem := p.branchPageElement(uint16(i)) + tx.forEachPage(elem.pgid, depth+1, fn) + } + } +} + +// Page returns page information for a given page number. +// This is only safe for concurrent use when used by a writable transaction. +func (tx *Tx) Page(id int) (*PageInfo, error) { + if tx.db == nil { + return nil, ErrTxClosed + } else if pgid(id) >= tx.meta.pgid { + return nil, nil + } + + // Build the page info. + p := tx.db.page(pgid(id)) + info := &PageInfo{ + ID: id, + Count: int(p.count), + OverflowCount: int(p.overflow), + } + + // Determine the type (or if it's free). + if tx.db.freelist.freed(pgid(id)) { + info.Type = "free" + } else { + info.Type = p.typ() + } + + return info, nil +} + +// TxStats represents statistics about the actions performed by the transaction. +type TxStats struct { + // Page statistics. + PageCount int // number of page allocations + PageAlloc int // total bytes allocated + + // Cursor statistics. + CursorCount int // number of cursors created + + // Node statistics + NodeCount int // number of node allocations + NodeDeref int // number of node dereferences + + // Rebalance statistics. + Rebalance int // number of node rebalances + RebalanceTime time.Duration // total time spent rebalancing + + // Split/Spill statistics. + Split int // number of nodes split + Spill int // number of nodes spilled + SpillTime time.Duration // total time spent spilling + + // Write statistics. + Write int // number of writes performed + WriteTime time.Duration // total time spent writing to disk +} + +func (s *TxStats) add(other *TxStats) { + s.PageCount += other.PageCount + s.PageAlloc += other.PageAlloc + s.CursorCount += other.CursorCount + s.NodeCount += other.NodeCount + s.NodeDeref += other.NodeDeref + s.Rebalance += other.Rebalance + s.RebalanceTime += other.RebalanceTime + s.Split += other.Split + s.Spill += other.Spill + s.SpillTime += other.SpillTime + s.Write += other.Write + s.WriteTime += other.WriteTime +} + +// Sub calculates and returns the difference between two sets of transaction stats. +// This is useful when obtaining stats at two different points and time and +// you need the performance counters that occurred within that time span. +func (s *TxStats) Sub(other *TxStats) TxStats { + var diff TxStats + diff.PageCount = s.PageCount - other.PageCount + diff.PageAlloc = s.PageAlloc - other.PageAlloc + diff.CursorCount = s.CursorCount - other.CursorCount + diff.NodeCount = s.NodeCount - other.NodeCount + diff.NodeDeref = s.NodeDeref - other.NodeDeref + diff.Rebalance = s.Rebalance - other.Rebalance + diff.RebalanceTime = s.RebalanceTime - other.RebalanceTime + diff.Split = s.Split - other.Split + diff.Spill = s.Spill - other.Spill + diff.SpillTime = s.SpillTime - other.SpillTime + diff.Write = s.Write - other.Write + diff.WriteTime = s.WriteTime - other.WriteTime + return diff +} diff --git a/sessiondb/redis/database.go b/sessiondb/redis/database.go index 7999647..e5d6720 100644 --- a/sessiondb/redis/database.go +++ b/sessiondb/redis/database.go @@ -1,11 +1,10 @@ package redis import ( + "log" "runtime" "time" - "github.com/kataras/golog" - "github.com/kataras/go-sessions" "github.com/kataras/go-sessions/sessiondb/redis/service" ) @@ -23,7 +22,7 @@ func New(cfg ...service.Config) *Database { db.redis.Connect() _, err := db.redis.PingPong() if err != nil { - golog.Debugf("error connecting to redis: %v", err) + log.Fatal(err) return nil } runtime.SetFinalizer(db, closeDB) @@ -42,7 +41,7 @@ func (db *Database) Acquire(sid string, expires time.Duration) sessions.LifeTime if !found { // not found, create an entry with ttl and return an empty lifetime, session manager will do its job. if err := db.redis.Set(sid, sid, int64(expires.Seconds())); err != nil { - golog.Debug(err) + return sessions.LifeTime{Time: sessions.CookieExpireDelete} } return sessions.LifeTime{} // session manager will handle the rest. @@ -56,6 +55,12 @@ func (db *Database) Acquire(sid string, expires time.Duration) sessions.LifeTime return sessions.LifeTime{Time: time.Now().Add(time.Duration(seconds) * time.Second)} } +// OnUpdateExpiration will re-set the database's session's entry ttl. +// https://redis.io/commands/expire#refreshing-expires +func (db *Database) OnUpdateExpiration(sid string, newExpires time.Duration) error { + return db.redis.UpdateTTLMany(sid, int64(newExpires.Seconds())) +} + const delim = "_" func makeKey(sid, key string) string { @@ -67,13 +72,10 @@ func makeKey(sid, key string) string { func (db *Database) Set(sid string, lifetime sessions.LifeTime, key string, value interface{}, immutable bool) { valueBytes, err := sessions.DefaultTranscoder.Marshal(value) if err != nil { - golog.Error(err) return } - if err = db.redis.Set(makeKey(sid, key), valueBytes, int64(lifetime.DurationUntilExpiration().Seconds())); err != nil { - golog.Debug(err) - } + db.redis.Set(makeKey(sid, key), valueBytes, int64(lifetime.DurationUntilExpiration().Seconds())) } // Get retrieves a session value based on the key. @@ -89,15 +91,12 @@ func (db *Database) get(key string, outPtr interface{}) { return } - if err = sessions.DefaultTranscoder.Unmarshal(data.([]byte), outPtr); err != nil { - golog.Debugf("unable to unmarshal value of key: '%s': %v", key, err) - } + sessions.DefaultTranscoder.Unmarshal(data.([]byte), outPtr) } func (db *Database) keys(sid string) []string { keys, err := db.redis.GetKeys(sid + delim) if err != nil { - golog.Debugf("unable to get all redis keys of session '%s': %v", sid, err) return nil } @@ -121,20 +120,14 @@ func (db *Database) Len(sid string) (n int) { // Delete removes a session key value based on its key. func (db *Database) Delete(sid string, key string) (deleted bool) { - err := db.redis.Delete(makeKey(sid, key)) - if err != nil { - golog.Error(err) - } - return err == nil + return db.redis.Delete(makeKey(sid, key)) == nil } // Clear removes all session key values but it keeps the session entry. func (db *Database) Clear(sid string) { keys := db.keys(sid) for _, key := range keys { - if err := db.redis.Delete(key); err != nil { - golog.Debugf("unable to delete session '%s' value of key: '%s': %v", sid, key, err) - } + db.redis.Delete(key) } } diff --git a/sessiondb/redis/service/service.go b/sessiondb/redis/service/service.go index b33bc61..57bc309 100644 --- a/sessiondb/redis/service/service.go +++ b/sessiondb/redis/service/service.go @@ -9,10 +9,10 @@ import ( ) var ( - // ErrRedisClosed an error with message 'already closed' - ErrRedisClosed = errors.New("already closed") - // ErrKeyNotFound a static error when key not found. - ErrKeyNotFound = errors.New("not found") + // ErrRedisClosed an error with message 'Redis is already closed' + ErrRedisClosed = errors.New("redis is already closed") + // ErrKeyNotFound an error with message 'key not found' + ErrKeyNotFound = errors.New("key not found") ) // Service the Redis service, contains the config and the redis pool @@ -84,7 +84,7 @@ func (r *Service) Get(key string) (interface{}, error) { // TTL returns the seconds to expire, if the key has expiration and error if action failed. // Read more at: https://redis.io/commands/ttl -func (r *Service) TTL(key string) (seconds int64, hasExpiration bool, ok bool) { +func (r *Service) TTL(key string) (seconds int64, hasExpiration bool, found bool) { c := r.pool.Get() defer c.Close() redisVal, err := c.Do("TTL", r.Config.Prefix+key) @@ -93,12 +93,72 @@ func (r *Service) TTL(key string) (seconds int64, hasExpiration bool, ok bool) { } seconds = redisVal.(int64) // if -1 means the key has unlimited life time. - hasExpiration = seconds == -1 + hasExpiration = seconds > -1 // if -2 means key does not exist. - ok = (c.Err() != nil || seconds == -2) + found = !(c.Err() != nil || seconds == -2) return } +func (r *Service) updateTTLConn(c redis.Conn, key string, newSecondsLifeTime int64) error { + reply, err := c.Do("EXPIRE", r.Config.Prefix+key, newSecondsLifeTime) + + if err != nil { + return err + } + + // https://redis.io/commands/expire#return-value + // + // 1 if the timeout was set. + // 0 if key does not exist. + if hadTTLOrExists, ok := reply.(int); ok { + if hadTTLOrExists == 1 { + return nil + } else if hadTTLOrExists == 0 { + return fmt.Errorf("unable to update expiration, the key '%s' was stored without ttl", key) + } // do not check for -1. + } + + return nil +} + +// UpdateTTL will update the ttl of a key. +// Using the "EXPIRE" command. +// Read more at: https://redis.io/commands/expire#refreshing-expires +func (r *Service) UpdateTTL(key string, newSecondsLifeTime int64) error { + c := r.pool.Get() + defer c.Close() + err := c.Err() + if err != nil { + return err + } + + return r.updateTTLConn(c, key, newSecondsLifeTime) +} + +// UpdateTTLMany like `UpdateTTL` but for all keys starting with that "prefix", +// it is a bit faster operation if you need to update all sessions keys (although it can be even faster if we used hash but this will limit other features), +// look the `sessions/Database#OnUpdateExpiration` for example. +func (r *Service) UpdateTTLMany(prefix string, newSecondsLifeTime int64) error { + c := r.pool.Get() + defer c.Close() + if err := c.Err(); err != nil { + return err + } + + keys, err := r.getKeysConn(c, prefix) + if err != nil { + return err + } + + for _, key := range keys { + if err = r.updateTTLConn(c, key, newSecondsLifeTime); err != nil { // fail on first error. + return err + } + } + + return err +} + // GetAll returns all redis entries using the "SCAN" command (2.8+). func (r *Service) GetAll() (interface{}, error) { c := r.pool.Get() @@ -120,15 +180,7 @@ func (r *Service) GetAll() (interface{}, error) { return redisVal, nil } -// GetKeys returns all redis keys using the "SCAN" with MATCH command. -// Read more at: https://redis.io/commands/scan#the-match-option. -func (r *Service) GetKeys(prefix string) ([]string, error) { - c := r.pool.Get() - defer c.Close() - if err := c.Err(); err != nil { - return nil, err - } - +func (r *Service) getKeysConn(c redis.Conn, prefix string) ([]string, error) { if err := c.Send("SCAN", 0, "MATCH", r.Config.Prefix+prefix+"*", "COUNT", 9999999999); err != nil { return nil, err } @@ -150,18 +202,29 @@ func (r *Service) GetKeys(prefix string) ([]string, error) { if keysSliceAsBytes, ok := keysInterface[1].([]interface{}); ok { keys := make([]string, len(keysSliceAsBytes), len(keysSliceAsBytes)) for i, k := range keysSliceAsBytes { - keys[i] = fmt.Sprintf("%s", k) + keys[i] = fmt.Sprintf("%s", k)[len(r.Config.Prefix):] } return keys, nil } - } } return nil, nil } +// GetKeys returns all redis keys using the "SCAN" with MATCH command. +// Read more at: https://redis.io/commands/scan#the-match-option. +func (r *Service) GetKeys(prefix string) ([]string, error) { + c := r.pool.Get() + defer c.Close() + if err := c.Err(); err != nil { + return nil, err + } + + return r.getKeysConn(c, prefix) +} + // GetBytes returns value, err by its key // you can use utils.Deserialize((.GetBytes("yourkey"),&theobject{}) //returns nil and a filled error if something wrong happens diff --git a/sessions.go b/sessions.go index 5336c5a..5ac15f3 100644 --- a/sessions.go +++ b/sessions.go @@ -35,7 +35,7 @@ import ( const ( // Version current semantic version string of the go-sessions package. - Version = "3.0.0" + Version = "3.1.0" ) // A Sessions manager should be responsible to Start a sesion, based @@ -253,14 +253,21 @@ func UpdateExpiration(w http.ResponseWriter, r *http.Request, expires time.Durat // UpdateExpiration change expire date of a session to a new date // by using timeout value passed by `expires` receiver. -func (s *Sessions) UpdateExpiration(w http.ResponseWriter, r *http.Request, expires time.Duration) { +// It will return `ErrNotFound` when trying to update expiration on a non-existence or not valid session entry. +// It will return `ErrNotImplemented` if a database is used and it does not support this feature, yet. +func (s *Sessions) UpdateExpiration(w http.ResponseWriter, r *http.Request, expires time.Duration) error { cookieValue := s.decodeCookieValue(GetCookie(r, s.config.Cookie)) + if cookieValue == "" { + return ErrNotFound + } - if cookieValue != "" { - if s.provider.UpdateExpiration(cookieValue, expires) { - s.updateCookie(w, r, cookieValue, expires) - } + // we should also allow it to expire when the browser closed + err := s.provider.UpdateExpiration(cookieValue, expires) + if err == nil || expires == -1 { + s.updateCookie(w, r, cookieValue, expires) } + + return err } // UpdateExpirationFasthttp change expire date of a session to a new date @@ -271,14 +278,19 @@ func UpdateExpirationFasthttp(ctx *fasthttp.RequestCtx, expires time.Duration) { // UpdateExpirationFasthttp change expire date of a session to a new date // by using timeout value passed by `expires` receiver. -func (s *Sessions) UpdateExpirationFasthttp(ctx *fasthttp.RequestCtx, expires time.Duration) { +func (s *Sessions) UpdateExpirationFasthttp(ctx *fasthttp.RequestCtx, expires time.Duration) error { cookieValue := s.decodeCookieValue(GetCookieFasthttp(ctx, s.config.Cookie)) + if cookieValue == "" { + return ErrNotFound + } - if cookieValue != "" { - if s.provider.UpdateExpiration(cookieValue, expires) { - s.updateCookieFasthttp(ctx, cookieValue, expires) - } + // we should also allow it to expire when the browser closed + err := s.provider.UpdateExpiration(cookieValue, expires) + if err == nil || expires == -1 { + s.updateCookieFasthttp(ctx, cookieValue, expires) } + + return err } func (s *Sessions) destroy(cookieValue string) { @@ -288,6 +300,7 @@ func (s *Sessions) destroy(cookieValue string) { if cookieValue == "" { // nothing to destroy return } + s.provider.Destroy(cookieValue) } diff --git a/sessions_test.go b/sessions_test.go index 0140e3e..746db32 100644 --- a/sessions_test.go +++ b/sessions_test.go @@ -13,7 +13,6 @@ import ( "github.com/gorilla/securecookie" "github.com/iris-contrib/httpexpect" - "github.com/kataras/go-serializer" ) var errReadBody = errors.New("While trying to read from the request body") @@ -48,8 +47,7 @@ func getTester(mux *http.ServeMux, t *testing.T) *httpexpect.Expect { } func writeValues(res http.ResponseWriter, values map[string]interface{}) error { - - result, err := serializer.Serialize("application/json", values) + result, err := json.Marshal(values) if err != nil { return err } diff --git a/vendor/github.com/satori/go.uuid/LICENSE b/vendor/github.com/iris-contrib/go.uuid/LICENSE similarity index 100% rename from vendor/github.com/satori/go.uuid/LICENSE rename to vendor/github.com/iris-contrib/go.uuid/LICENSE diff --git a/vendor/github.com/satori/go.uuid/codec.go b/vendor/github.com/iris-contrib/go.uuid/codec.go similarity index 100% rename from vendor/github.com/satori/go.uuid/codec.go rename to vendor/github.com/iris-contrib/go.uuid/codec.go diff --git a/vendor/github.com/satori/go.uuid/generator.go b/vendor/github.com/iris-contrib/go.uuid/generator.go similarity index 100% rename from vendor/github.com/satori/go.uuid/generator.go rename to vendor/github.com/iris-contrib/go.uuid/generator.go diff --git a/vendor/github.com/satori/go.uuid/sql.go b/vendor/github.com/iris-contrib/go.uuid/sql.go similarity index 100% rename from vendor/github.com/satori/go.uuid/sql.go rename to vendor/github.com/iris-contrib/go.uuid/sql.go diff --git a/vendor/github.com/satori/go.uuid/uuid.go b/vendor/github.com/iris-contrib/go.uuid/uuid.go similarity index 100% rename from vendor/github.com/satori/go.uuid/uuid.go rename to vendor/github.com/iris-contrib/go.uuid/uuid.go