> 24),
- byte(sl >> 16),
- byte(sl >> 8),
- byte(sl),
- }...)
-
- // Marshal each element in the set.
- for k := range s {
- data = append(data, []byte{
- byte(k >> 24),
- byte(k >> 16),
- byte(k >> 8),
- byte(k),
- }...)
- }
-
- return data, nil
-}
-
-type uint64Slice []uint32
-
-func (p uint64Slice) Len() int { return len(p) }
-func (p uint64Slice) Less(i, j int) bool { return p[i] < p[j] }
-func (p uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
diff --git a/vendor/github.com/alicebob/miniredis/v2/hyperloglog/utils.go b/vendor/github.com/alicebob/miniredis/v2/hyperloglog/utils.go
deleted file mode 100644
index 896bf7e7..00000000
--- a/vendor/github.com/alicebob/miniredis/v2/hyperloglog/utils.go
+++ /dev/null
@@ -1,69 +0,0 @@
-package hyperloglog
-
-import (
- "github.com/alicebob/miniredis/v2/metro"
- "math"
- "math/bits"
-)
-
-var hash = hashFunc
-
-func beta14(ez float64) float64 {
- zl := math.Log(ez + 1)
- return -0.370393911*ez +
- 0.070471823*zl +
- 0.17393686*math.Pow(zl, 2) +
- 0.16339839*math.Pow(zl, 3) +
- -0.09237745*math.Pow(zl, 4) +
- 0.03738027*math.Pow(zl, 5) +
- -0.005384159*math.Pow(zl, 6) +
- 0.00042419*math.Pow(zl, 7)
-}
-
-func beta16(ez float64) float64 {
- zl := math.Log(ez + 1)
- return -0.37331876643753059*ez +
- -1.41704077448122989*zl +
- 0.40729184796612533*math.Pow(zl, 2) +
- 1.56152033906584164*math.Pow(zl, 3) +
- -0.99242233534286128*math.Pow(zl, 4) +
- 0.26064681399483092*math.Pow(zl, 5) +
- -0.03053811369682807*math.Pow(zl, 6) +
- 0.00155770210179105*math.Pow(zl, 7)
-}
-
-func alpha(m float64) float64 {
- switch m {
- case 16:
- return 0.673
- case 32:
- return 0.697
- case 64:
- return 0.709
- }
- return 0.7213 / (1 + 1.079/m)
-}
-
-func getPosVal(x uint64, p uint8) (uint64, uint8) {
- i := bextr(x, 64-p, p) // {x63,...,x64-p}
- w := x<> start) & ((1 << length) - 1)
-}
-
-func bextr32(v uint32, start, length uint8) uint32 {
- return (v >> start) & ((1 << length) - 1)
-}
-
-func hashFunc(e []byte) uint64 {
- return metro.Hash64(e, 1337)
-}
diff --git a/vendor/github.com/alicebob/miniredis/v2/keys.go b/vendor/github.com/alicebob/miniredis/v2/keys.go
deleted file mode 100644
index 058e0a79..00000000
--- a/vendor/github.com/alicebob/miniredis/v2/keys.go
+++ /dev/null
@@ -1,83 +0,0 @@
-package miniredis
-
-// Translate the 'KEYS' or 'PSUBSCRIBE' argument ('foo*', 'f??', &c.) into a regexp.
-
-import (
- "bytes"
- "regexp"
-)
-
-// patternRE compiles a glob to a regexp. Returns nil if the given
-// pattern will never match anything.
-// The general strategy is to sandwich all non-meta characters between \Q...\E.
-func patternRE(k string) *regexp.Regexp {
- re := bytes.Buffer{}
- re.WriteString(`(?s)^\Q`)
- for i := 0; i < len(k); i++ {
- p := k[i]
- switch p {
- case '*':
- re.WriteString(`\E.*\Q`)
- case '?':
- re.WriteString(`\E.\Q`)
- case '[':
- charClass := bytes.Buffer{}
- i++
- for ; i < len(k); i++ {
- if k[i] == ']' {
- break
- }
- if k[i] == '\\' {
- if i == len(k)-1 {
- // Ends with a '\'. U-huh.
- return nil
- }
- charClass.WriteByte(k[i])
- i++
- charClass.WriteByte(k[i])
- continue
- }
- charClass.WriteByte(k[i])
- }
- if charClass.Len() == 0 {
- // '[]' is valid in Redis, but matches nothing.
- return nil
- }
- re.WriteString(`\E[`)
- re.Write(charClass.Bytes())
- re.WriteString(`]\Q`)
-
- case '\\':
- if i == len(k)-1 {
- // Ends with a '\'. U-huh.
- return nil
- }
- // Forget the \, keep the next char.
- i++
- re.WriteByte(k[i])
- continue
- default:
- re.WriteByte(p)
- }
- }
- re.WriteString(`\E$`)
- return regexp.MustCompile(re.String())
-}
-
-// matchKeys filters only matching keys.
-// The returned boolean is whether the match pattern was valid
-func matchKeys(keys []string, match string) ([]string, bool) {
- re := patternRE(match)
- if re == nil {
- // Special case: the given pattern won't match anything or is invalid.
- return nil, false
- }
- var res []string
- for _, k := range keys {
- if !re.MatchString(k) {
- continue
- }
- res = append(res, k)
- }
- return res, true
-}
diff --git a/vendor/github.com/alicebob/miniredis/v2/lua.go b/vendor/github.com/alicebob/miniredis/v2/lua.go
deleted file mode 100644
index 42222dce..00000000
--- a/vendor/github.com/alicebob/miniredis/v2/lua.go
+++ /dev/null
@@ -1,242 +0,0 @@
-package miniredis
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "strings"
-
- lua "github.com/yuin/gopher-lua"
-
- "github.com/alicebob/miniredis/v2/server"
-)
-
-var luaRedisConstants = map[string]lua.LValue{
- "LOG_DEBUG": lua.LNumber(0),
- "LOG_VERBOSE": lua.LNumber(1),
- "LOG_NOTICE": lua.LNumber(2),
- "LOG_WARNING": lua.LNumber(3),
-}
-
-func mkLua(srv *server.Server, c *server.Peer) (map[string]lua.LGFunction, map[string]lua.LValue) {
- mkCall := func(failFast bool) func(l *lua.LState) int {
- // one server.Ctx for a single Lua run
- pCtx := &connCtx{}
- if getCtx(c).authenticated {
- pCtx.authenticated = true
- }
- pCtx.nested = true
- pCtx.selectedDB = getCtx(c).selectedDB
-
- return func(l *lua.LState) int {
- top := l.GetTop()
- if top == 0 {
- l.Error(lua.LString("Please specify at least one argument for redis.call()"), 1)
- return 0
- }
- var args []string
- for i := 1; i <= top; i++ {
- switch a := l.Get(i).(type) {
- case lua.LNumber:
- args = append(args, a.String())
- case lua.LString:
- args = append(args, string(a))
- default:
- l.Error(lua.LString("Lua redis() command arguments must be strings or integers"), 1)
- return 0
- }
- }
- if len(args) == 0 {
- l.Error(lua.LString(msgNotFromScripts), 1)
- return 0
- }
-
- buf := &bytes.Buffer{}
- wr := bufio.NewWriter(buf)
- peer := server.NewPeer(wr)
- peer.Ctx = pCtx
- srv.Dispatch(peer, args)
- wr.Flush()
-
- res, err := server.ParseReply(bufio.NewReader(buf))
- if err != nil {
- if failFast {
- // call() mode
- if strings.Contains(err.Error(), "ERR unknown command") {
- l.Error(lua.LString("Unknown Redis command called from Lua script"), 1)
- } else {
- l.Error(lua.LString(err.Error()), 1)
- }
- return 0
- }
- // pcall() mode
- l.Push(lua.LNil)
- return 1
- }
-
- if res == nil {
- l.Push(lua.LFalse)
- } else {
- switch r := res.(type) {
- case int64:
- l.Push(lua.LNumber(r))
- case int:
- l.Push(lua.LNumber(r))
- case []uint8:
- l.Push(lua.LString(string(r)))
- case []interface{}:
- l.Push(redisToLua(l, r))
- case server.Simple:
- l.Push(luaStatusReply(string(r)))
- case string:
- l.Push(lua.LString(r))
- case error:
- l.Error(lua.LString(r.Error()), 1)
- return 0
- default:
- panic(fmt.Sprintf("type not handled (%T)", r))
- }
- }
- return 1
- }
- }
-
- return map[string]lua.LGFunction{
- "call": mkCall(true),
- "pcall": mkCall(false),
- "error_reply": func(l *lua.LState) int {
- v := l.Get(1)
- msg, ok := v.(lua.LString)
- if !ok {
- l.Error(lua.LString("wrong number or type of arguments"), 1)
- return 0
- }
- res := &lua.LTable{}
- res.RawSetString("err", lua.LString(msg))
- l.Push(res)
- return 1
- },
- "log": func(l *lua.LState) int {
- level := l.CheckInt(1)
- msg := l.CheckString(2)
- _, _ = level, msg
- // do nothing by default. To see logs uncomment:
- // fmt.Printf("%v: %v", level, msg)
- return 0
- },
- "status_reply": func(l *lua.LState) int {
- v := l.Get(1)
- msg, ok := v.(lua.LString)
- if !ok {
- l.Error(lua.LString("wrong number or type of arguments"), 1)
- return 0
- }
- res := luaStatusReply(string(msg))
- l.Push(res)
- return 1
- },
- "sha1hex": func(l *lua.LState) int {
- top := l.GetTop()
- if top != 1 {
- l.Error(lua.LString("wrong number of arguments"), 1)
- return 0
- }
- msg := lua.LVAsString(l.Get(1))
- l.Push(lua.LString(sha1Hex(msg)))
- return 1
- },
- "replicate_commands": func(l *lua.LState) int {
- // ignored
- return 1
- },
- }, luaRedisConstants
-}
-
-func luaToRedis(l *lua.LState, c *server.Peer, value lua.LValue) {
- if value == nil {
- c.WriteNull()
- return
- }
-
- switch t := value.(type) {
- case *lua.LNilType:
- c.WriteNull()
- case lua.LBool:
- if lua.LVAsBool(value) {
- c.WriteInt(1)
- } else {
- c.WriteNull()
- }
- case lua.LNumber:
- c.WriteInt(int(lua.LVAsNumber(value)))
- case lua.LString:
- s := lua.LVAsString(value)
- c.WriteBulk(s)
- case *lua.LTable:
- // special case for tables with an 'err' or 'ok' field
- // note: according to the docs this only counts when 'err' or 'ok' is
- // the only field.
- if s := t.RawGetString("err"); s.Type() != lua.LTNil {
- c.WriteError(s.String())
- return
- }
- if s := t.RawGetString("ok"); s.Type() != lua.LTNil {
- c.WriteInline(s.String())
- return
- }
-
- result := []lua.LValue{}
- for j := 1; true; j++ {
- val := l.GetTable(value, lua.LNumber(j))
- if val == nil {
- result = append(result, val)
- continue
- }
-
- if val.Type() == lua.LTNil {
- break
- }
-
- result = append(result, val)
- }
-
- c.WriteLen(len(result))
- for _, r := range result {
- luaToRedis(l, c, r)
- }
- default:
- panic("....")
- }
-}
-
-func redisToLua(l *lua.LState, res []interface{}) *lua.LTable {
- rettb := l.NewTable()
- for _, e := range res {
- var v lua.LValue
- if e == nil {
- v = lua.LFalse
- } else {
- switch et := e.(type) {
- case int64:
- v = lua.LNumber(et)
- case []uint8:
- v = lua.LString(string(et))
- case []interface{}:
- v = redisToLua(l, et)
- case string:
- v = lua.LString(et)
- default:
- // TODO: oops?
- v = lua.LString(e.(string))
- }
- }
- l.RawSet(rettb, lua.LNumber(rettb.Len()+1), v)
- }
- return rettb
-}
-
-func luaStatusReply(msg string) *lua.LTable {
- tab := &lua.LTable{}
- tab.RawSetString("ok", lua.LString(msg))
- return tab
-}
diff --git a/vendor/github.com/alicebob/miniredis/v2/metro/LICENSE b/vendor/github.com/alicebob/miniredis/v2/metro/LICENSE
deleted file mode 100644
index 6243b617..00000000
--- a/vendor/github.com/alicebob/miniredis/v2/metro/LICENSE
+++ /dev/null
@@ -1,24 +0,0 @@
-This package is a mechanical translation of the reference C++ code for
-MetroHash, available at https://github.com/jandrewrogers/MetroHash
-
-The MIT License (MIT)
-
-Copyright (c) 2016 Damian Gryski
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
diff --git a/vendor/github.com/alicebob/miniredis/v2/metro/README.md b/vendor/github.com/alicebob/miniredis/v2/metro/README.md
deleted file mode 100644
index 07e4ee9f..00000000
--- a/vendor/github.com/alicebob/miniredis/v2/metro/README.md
+++ /dev/null
@@ -1 +0,0 @@
-This is a partial copy of github.com/dgryski/go-metro.
\ No newline at end of file
diff --git a/vendor/github.com/alicebob/miniredis/v2/metro/metro64.go b/vendor/github.com/alicebob/miniredis/v2/metro/metro64.go
deleted file mode 100644
index 5b3db9a9..00000000
--- a/vendor/github.com/alicebob/miniredis/v2/metro/metro64.go
+++ /dev/null
@@ -1,87 +0,0 @@
-package metro
-
-import "encoding/binary"
-
-func Hash64(buffer []byte, seed uint64) uint64 {
-
- const (
- k0 = 0xD6D018F5
- k1 = 0xA2AA033B
- k2 = 0x62992FC1
- k3 = 0x30BC5B29
- )
-
- ptr := buffer
-
- hash := (seed + k2) * k0
-
- if len(ptr) >= 32 {
- v := [4]uint64{hash, hash, hash, hash}
-
- for len(ptr) >= 32 {
- v[0] += binary.LittleEndian.Uint64(ptr[:8]) * k0
- v[0] = rotate_right(v[0], 29) + v[2]
- v[1] += binary.LittleEndian.Uint64(ptr[8:16]) * k1
- v[1] = rotate_right(v[1], 29) + v[3]
- v[2] += binary.LittleEndian.Uint64(ptr[16:24]) * k2
- v[2] = rotate_right(v[2], 29) + v[0]
- v[3] += binary.LittleEndian.Uint64(ptr[24:32]) * k3
- v[3] = rotate_right(v[3], 29) + v[1]
- ptr = ptr[32:]
- }
-
- v[2] ^= rotate_right(((v[0]+v[3])*k0)+v[1], 37) * k1
- v[3] ^= rotate_right(((v[1]+v[2])*k1)+v[0], 37) * k0
- v[0] ^= rotate_right(((v[0]+v[2])*k0)+v[3], 37) * k1
- v[1] ^= rotate_right(((v[1]+v[3])*k1)+v[2], 37) * k0
- hash += v[0] ^ v[1]
- }
-
- if len(ptr) >= 16 {
- v0 := hash + (binary.LittleEndian.Uint64(ptr[:8]) * k2)
- v0 = rotate_right(v0, 29) * k3
- v1 := hash + (binary.LittleEndian.Uint64(ptr[8:16]) * k2)
- v1 = rotate_right(v1, 29) * k3
- v0 ^= rotate_right(v0*k0, 21) + v1
- v1 ^= rotate_right(v1*k3, 21) + v0
- hash += v1
- ptr = ptr[16:]
- }
-
- if len(ptr) >= 8 {
- hash += binary.LittleEndian.Uint64(ptr[:8]) * k3
- ptr = ptr[8:]
- hash ^= rotate_right(hash, 55) * k1
- }
-
- if len(ptr) >= 4 {
- hash += uint64(binary.LittleEndian.Uint32(ptr[:4])) * k3
- hash ^= rotate_right(hash, 26) * k1
- ptr = ptr[4:]
- }
-
- if len(ptr) >= 2 {
- hash += uint64(binary.LittleEndian.Uint16(ptr[:2])) * k3
- ptr = ptr[2:]
- hash ^= rotate_right(hash, 48) * k1
- }
-
- if len(ptr) >= 1 {
- hash += uint64(ptr[0]) * k3
- hash ^= rotate_right(hash, 37) * k1
- }
-
- hash ^= rotate_right(hash, 28)
- hash *= k0
- hash ^= rotate_right(hash, 29)
-
- return hash
-}
-
-func Hash64Str(buffer string, seed uint64) uint64 {
- return Hash64([]byte(buffer), seed)
-}
-
-func rotate_right(v uint64, k uint) uint64 {
- return (v >> k) | (v << (64 - k))
-}
diff --git a/vendor/github.com/alicebob/miniredis/v2/miniredis.go b/vendor/github.com/alicebob/miniredis/v2/miniredis.go
deleted file mode 100644
index c86e2fe8..00000000
--- a/vendor/github.com/alicebob/miniredis/v2/miniredis.go
+++ /dev/null
@@ -1,720 +0,0 @@
-// Package miniredis is a pure Go Redis test server, for use in Go unittests.
-// There are no dependencies on system binaries, and every server you start
-// will be empty.
-//
-// import "github.com/alicebob/miniredis/v2"
-//
-// Start a server with `s := miniredis.RunT(t)`, it'll be shutdown via a t.Cleanup().
-// Or do everything manual: `s, err := miniredis.Run(); defer s.Close()`
-//
-// Point your Redis client to `s.Addr()` or `s.Host(), s.Port()`.
-//
-// Set keys directly via s.Set(...) and similar commands, or use a Redis client.
-//
-// For direct use you can select a Redis database with either `s.Select(12);
-// s.Get("foo")` or `s.DB(12).Get("foo")`.
-package miniredis
-
-import (
- "context"
- "crypto/tls"
- "fmt"
- "math/rand"
- "strconv"
- "strings"
- "sync"
- "time"
-
- "github.com/alicebob/miniredis/v2/server"
-)
-
-var DumpMaxLineLen = 60
-
-type hashKey map[string]string
-type listKey []string
-type setKey map[string]struct{}
-
-// RedisDB holds a single (numbered) Redis database.
-type RedisDB struct {
- master *Miniredis // pointer to the lock in Miniredis
- id int // db id
- keys map[string]string // Master map of keys with their type
- stringKeys map[string]string // GET/SET &c. keys
- hashKeys map[string]hashKey // MGET/MSET &c. keys
- listKeys map[string]listKey // LPUSH &c. keys
- setKeys map[string]setKey // SADD &c. keys
- hllKeys map[string]*hll // PFADD &c. keys
- sortedsetKeys map[string]sortedSet // ZADD &c. keys
- streamKeys map[string]*streamKey // XADD &c. keys
- ttl map[string]time.Duration // effective TTL values
- keyVersion map[string]uint // used to watch values
-}
-
-// Miniredis is a Redis server implementation.
-type Miniredis struct {
- sync.Mutex
- srv *server.Server
- port int
- passwords map[string]string // username password
- dbs map[int]*RedisDB
- selectedDB int // DB id used in the direct Get(), Set() &c.
- scripts map[string]string // sha1 -> lua src
- signal *sync.Cond
- now time.Time // time.Now() if not set.
- subscribers map[*Subscriber]struct{}
- rand *rand.Rand
- Ctx context.Context
- CtxCancel context.CancelFunc
-}
-
-type txCmd func(*server.Peer, *connCtx)
-
-// database id + key combo
-type dbKey struct {
- db int
- key string
-}
-
-// connCtx has all state for a single connection.
-type connCtx struct {
- selectedDB int // selected DB
- authenticated bool // auth enabled and a valid AUTH seen
- transaction []txCmd // transaction callbacks. Or nil.
- dirtyTransaction bool // any error during QUEUEing
- watch map[dbKey]uint // WATCHed keys
- subscriber *Subscriber // client is in PUBSUB mode if not nil
- nested bool // this is called via Lua
-}
-
-// NewMiniRedis makes a new, non-started, Miniredis object.
-func NewMiniRedis() *Miniredis {
- m := Miniredis{
- dbs: map[int]*RedisDB{},
- scripts: map[string]string{},
- subscribers: map[*Subscriber]struct{}{},
- }
- m.Ctx, m.CtxCancel = context.WithCancel(context.Background())
- m.signal = sync.NewCond(&m)
- return &m
-}
-
-func newRedisDB(id int, m *Miniredis) RedisDB {
- return RedisDB{
- id: id,
- master: m,
- keys: map[string]string{},
- stringKeys: map[string]string{},
- hashKeys: map[string]hashKey{},
- listKeys: map[string]listKey{},
- setKeys: map[string]setKey{},
- hllKeys: map[string]*hll{},
- sortedsetKeys: map[string]sortedSet{},
- streamKeys: map[string]*streamKey{},
- ttl: map[string]time.Duration{},
- keyVersion: map[string]uint{},
- }
-}
-
-// Run creates and Start()s a Miniredis.
-func Run() (*Miniredis, error) {
- m := NewMiniRedis()
- return m, m.Start()
-}
-
-// Run creates and Start()s a Miniredis, TLS version.
-func RunTLS(cfg *tls.Config) (*Miniredis, error) {
- m := NewMiniRedis()
- return m, m.StartTLS(cfg)
-}
-
-// Tester is a minimal version of a testing.T
-type Tester interface {
- Fatalf(string, ...interface{})
- Cleanup(func())
-}
-
-// RunT start a new miniredis, pass it a testing.T. It also registers the cleanup after your test is done.
-func RunT(t Tester) *Miniredis {
- m := NewMiniRedis()
- if err := m.Start(); err != nil {
- t.Fatalf("could not start miniredis: %s", err)
- // not reached
- }
- t.Cleanup(m.Close)
- return m
-}
-
-// Start starts a server. It listens on a random port on localhost. See also
-// Addr().
-func (m *Miniredis) Start() error {
- s, err := server.NewServer(fmt.Sprintf("127.0.0.1:%d", m.port))
- if err != nil {
- return err
- }
- return m.start(s)
-}
-
-// Start starts a server, TLS version.
-func (m *Miniredis) StartTLS(cfg *tls.Config) error {
- s, err := server.NewServerTLS(fmt.Sprintf("127.0.0.1:%d", m.port), cfg)
- if err != nil {
- return err
- }
- return m.start(s)
-}
-
-// StartAddr runs miniredis with a given addr. Examples: "127.0.0.1:6379",
-// ":6379", or "127.0.0.1:0"
-func (m *Miniredis) StartAddr(addr string) error {
- s, err := server.NewServer(addr)
- if err != nil {
- return err
- }
- return m.start(s)
-}
-
-func (m *Miniredis) start(s *server.Server) error {
- m.Lock()
- defer m.Unlock()
- m.srv = s
- m.port = s.Addr().Port
-
- commandsConnection(m)
- commandsGeneric(m)
- commandsServer(m)
- commandsString(m)
- commandsHash(m)
- commandsList(m)
- commandsPubsub(m)
- commandsSet(m)
- commandsSortedSet(m)
- commandsStream(m)
- commandsTransaction(m)
- commandsScripting(m)
- commandsGeo(m)
- commandsCluster(m)
- commandsHll(m)
-
- return nil
-}
-
-// Restart restarts a Close()d server on the same port. Values will be
-// preserved.
-func (m *Miniredis) Restart() error {
- return m.Start()
-}
-
-// Close shuts down a Miniredis.
-func (m *Miniredis) Close() {
- m.Lock()
-
- if m.srv == nil {
- m.Unlock()
- return
- }
- srv := m.srv
- m.srv = nil
- m.CtxCancel()
- m.Unlock()
-
- // the OnDisconnect callbacks can lock m, so run Close() outside the lock.
- srv.Close()
-
-}
-
-// RequireAuth makes every connection need to AUTH first. This is the old 'AUTH [password] command.
-// Remove it by setting an empty string.
-func (m *Miniredis) RequireAuth(pw string) {
- m.RequireUserAuth("default", pw)
-}
-
-// Add a username/password, for use with 'AUTH [username] [password]'.
-// There are currently no access controls for commands implemented.
-// Disable access for the user with an empty password.
-func (m *Miniredis) RequireUserAuth(username, pw string) {
- m.Lock()
- defer m.Unlock()
- if m.passwords == nil {
- m.passwords = map[string]string{}
- }
- if pw == "" {
- delete(m.passwords, username)
- return
- }
- m.passwords[username] = pw
-}
-
-// DB returns a DB by ID.
-func (m *Miniredis) DB(i int) *RedisDB {
- m.Lock()
- defer m.Unlock()
- return m.db(i)
-}
-
-// get DB. No locks!
-func (m *Miniredis) db(i int) *RedisDB {
- if db, ok := m.dbs[i]; ok {
- return db
- }
- db := newRedisDB(i, m) // main miniredis has our mutex.
- m.dbs[i] = &db
- return &db
-}
-
-// SwapDB swaps DBs by IDs.
-func (m *Miniredis) SwapDB(i, j int) bool {
- m.Lock()
- defer m.Unlock()
- return m.swapDB(i, j)
-}
-
-// swap DB. No locks!
-func (m *Miniredis) swapDB(i, j int) bool {
- db1 := m.db(i)
- db2 := m.db(j)
-
- db1.id = j
- db2.id = i
-
- m.dbs[i] = db2
- m.dbs[j] = db1
-
- return true
-}
-
-// Addr returns '127.0.0.1:12345'. Can be given to a Dial(). See also Host()
-// and Port(), which return the same things.
-func (m *Miniredis) Addr() string {
- m.Lock()
- defer m.Unlock()
- return m.srv.Addr().String()
-}
-
-// Host returns the host part of Addr().
-func (m *Miniredis) Host() string {
- m.Lock()
- defer m.Unlock()
- return m.srv.Addr().IP.String()
-}
-
-// Port returns the (random) port part of Addr().
-func (m *Miniredis) Port() string {
- m.Lock()
- defer m.Unlock()
- return strconv.Itoa(m.srv.Addr().Port)
-}
-
-// CommandCount returns the number of processed commands.
-func (m *Miniredis) CommandCount() int {
- m.Lock()
- defer m.Unlock()
- return int(m.srv.TotalCommands())
-}
-
-// CurrentConnectionCount returns the number of currently connected clients.
-func (m *Miniredis) CurrentConnectionCount() int {
- m.Lock()
- defer m.Unlock()
- return m.srv.ClientsLen()
-}
-
-// TotalConnectionCount returns the number of client connections since server start.
-func (m *Miniredis) TotalConnectionCount() int {
- m.Lock()
- defer m.Unlock()
- return int(m.srv.TotalConnections())
-}
-
-// FastForward decreases all TTLs by the given duration. All TTLs <= 0 will be
-// expired.
-func (m *Miniredis) FastForward(duration time.Duration) {
- m.Lock()
- defer m.Unlock()
- for _, db := range m.dbs {
- db.fastForward(duration)
- }
-}
-
-// Server returns the underlying server to allow custom commands to be implemented
-func (m *Miniredis) Server() *server.Server {
- return m.srv
-}
-
-// Dump returns a text version of the selected DB, usable for debugging.
-//
-// Dump limits the maximum length of each key:value to "DumpMaxLineLen" characters.
-// To increase that, call something like:
-//
-// miniredis.DumpMaxLineLen = 1024
-// mr, _ = miniredis.Run()
-// mr.Dump()
-func (m *Miniredis) Dump() string {
- m.Lock()
- defer m.Unlock()
-
- var (
- maxLen = DumpMaxLineLen
- indent = " "
- db = m.db(m.selectedDB)
- r = ""
- v = func(s string) string {
- suffix := ""
- if len(s) > maxLen {
- suffix = fmt.Sprintf("...(%d)", len(s))
- s = s[:maxLen-len(suffix)]
- }
- return fmt.Sprintf("%q%s", s, suffix)
- }
- )
-
- for _, k := range db.allKeys() {
- r += fmt.Sprintf("- %s\n", k)
- t := db.t(k)
- switch t {
- case "string":
- r += fmt.Sprintf("%s%s\n", indent, v(db.stringKeys[k]))
- case "hash":
- for _, hk := range db.hashFields(k) {
- r += fmt.Sprintf("%s%s: %s\n", indent, hk, v(db.hashGet(k, hk)))
- }
- case "list":
- for _, lk := range db.listKeys[k] {
- r += fmt.Sprintf("%s%s\n", indent, v(lk))
- }
- case "set":
- for _, mk := range db.setMembers(k) {
- r += fmt.Sprintf("%s%s\n", indent, v(mk))
- }
- case "zset":
- for _, el := range db.ssetElements(k) {
- r += fmt.Sprintf("%s%f: %s\n", indent, el.score, v(el.member))
- }
- case "stream":
- for _, entry := range db.streamKeys[k].entries {
- r += fmt.Sprintf("%s%s\n", indent, entry.ID)
- ev := entry.Values
- for i := 0; i < len(ev)/2; i++ {
- r += fmt.Sprintf("%s%s%s: %s\n", indent, indent, v(ev[2*i]), v(ev[2*i+1]))
- }
- }
- case "hll":
- for _, entry := range db.hllKeys {
- r += fmt.Sprintf("%s%s\n", indent, v(string(entry.Bytes())))
- }
- default:
- r += fmt.Sprintf("%s(a %s, fixme!)\n", indent, t)
- }
- }
- return r
-}
-
-// SetTime sets the time against which EXPIREAT values are compared, and the
-// time used in stream entry IDs. Will use time.Now() if this is not set.
-func (m *Miniredis) SetTime(t time.Time) {
- m.Lock()
- defer m.Unlock()
- m.now = t
-}
-
-// make every command return this message. For example:
-//
-// LOADING Redis is loading the dataset in memory
-// MASTERDOWN Link with MASTER is down and replica-serve-stale-data is set to 'no'.
-//
-// Clear it with an empty string. Don't add newlines.
-func (m *Miniredis) SetError(msg string) {
- cb := server.Hook(nil)
- if msg != "" {
- cb = func(c *server.Peer, cmd string, args ...string) bool {
- c.WriteError(msg)
- return true
- }
- }
- m.srv.SetPreHook(cb)
-}
-
-// isValidCMD returns true if command is valid and can be executed.
-func (m *Miniredis) isValidCMD(c *server.Peer, cmd string) bool {
- if !m.handleAuth(c) {
- return false
- }
- if m.checkPubsub(c, cmd) {
- return false
- }
-
- return true
-}
-
-// handleAuth returns false if connection has no access. It sends the reply.
-func (m *Miniredis) handleAuth(c *server.Peer) bool {
- if getCtx(c).nested {
- return true
- }
-
- m.Lock()
- defer m.Unlock()
- if len(m.passwords) == 0 {
- return true
- }
- if !getCtx(c).authenticated {
- c.WriteError("NOAUTH Authentication required.")
- return false
- }
- return true
-}
-
-// handlePubsub sends an error to the user if the connection is in PUBSUB mode.
-// It'll return true if it did.
-func (m *Miniredis) checkPubsub(c *server.Peer, cmd string) bool {
- if getCtx(c).nested {
- return false
- }
-
- m.Lock()
- defer m.Unlock()
-
- ctx := getCtx(c)
- if ctx.subscriber == nil {
- return false
- }
-
- prefix := "ERR "
- if strings.ToLower(cmd) == "exec" {
- prefix = "EXECABORT Transaction discarded because of: "
- }
- c.WriteError(fmt.Sprintf(
- "%sCan't execute '%s': only (P)SUBSCRIBE / (P)UNSUBSCRIBE / PING / QUIT are allowed in this context",
- prefix,
- strings.ToLower(cmd),
- ))
- return true
-}
-
-func getCtx(c *server.Peer) *connCtx {
- if c.Ctx == nil {
- c.Ctx = &connCtx{}
- }
- return c.Ctx.(*connCtx)
-}
-
-func startTx(ctx *connCtx) {
- ctx.transaction = []txCmd{}
- ctx.dirtyTransaction = false
-}
-
-func stopTx(ctx *connCtx) {
- ctx.transaction = nil
- unwatch(ctx)
-}
-
-func inTx(ctx *connCtx) bool {
- return ctx.transaction != nil
-}
-
-func addTxCmd(ctx *connCtx, cb txCmd) {
- ctx.transaction = append(ctx.transaction, cb)
-}
-
-func watch(db *RedisDB, ctx *connCtx, key string) {
- if ctx.watch == nil {
- ctx.watch = map[dbKey]uint{}
- }
- ctx.watch[dbKey{db: db.id, key: key}] = db.keyVersion[key] // Can be 0.
-}
-
-func unwatch(ctx *connCtx) {
- ctx.watch = nil
-}
-
-// setDirty can be called even when not in an tx. Is an no-op then.
-func setDirty(c *server.Peer) {
- if c.Ctx == nil {
- // No transaction. Not relevant.
- return
- }
- getCtx(c).dirtyTransaction = true
-}
-
-func (m *Miniredis) addSubscriber(s *Subscriber) {
- m.subscribers[s] = struct{}{}
-}
-
-// closes and remove the subscriber.
-func (m *Miniredis) removeSubscriber(s *Subscriber) {
- _, ok := m.subscribers[s]
- delete(m.subscribers, s)
- if ok {
- s.Close()
- }
-}
-
-func (m *Miniredis) publish(c, msg string) int {
- n := 0
- for s := range m.subscribers {
- n += s.Publish(c, msg)
- }
- return n
-}
-
-// enter 'subscribed state', or return the existing one.
-func (m *Miniredis) subscribedState(c *server.Peer) *Subscriber {
- ctx := getCtx(c)
- sub := ctx.subscriber
- if sub != nil {
- return sub
- }
-
- sub = newSubscriber()
- m.addSubscriber(sub)
-
- c.OnDisconnect(func() {
- m.Lock()
- m.removeSubscriber(sub)
- m.Unlock()
- })
-
- ctx.subscriber = sub
-
- go monitorPublish(c, sub.publish)
- go monitorPpublish(c, sub.ppublish)
-
- return sub
-}
-
-// whenever the p?sub count drops to 0 subscribed state should be stopped, and
-// all redis commands are allowed again.
-func endSubscriber(m *Miniredis, c *server.Peer) {
- ctx := getCtx(c)
- if sub := ctx.subscriber; sub != nil {
- m.removeSubscriber(sub) // will Close() the sub
- }
- ctx.subscriber = nil
-}
-
-// Start a new pubsub subscriber. It can (un) subscribe to channels and
-// patterns, and has a channel to get published messages. Close it with
-// Close().
-// Does not close itself when there are no subscriptions left.
-func (m *Miniredis) NewSubscriber() *Subscriber {
- sub := newSubscriber()
-
- m.Lock()
- m.addSubscriber(sub)
- m.Unlock()
-
- return sub
-}
-
-func (m *Miniredis) allSubscribers() []*Subscriber {
- var subs []*Subscriber
- for s := range m.subscribers {
- subs = append(subs, s)
- }
- return subs
-}
-
-func (m *Miniredis) Seed(seed int) {
- m.Lock()
- defer m.Unlock()
-
- // m.rand is not safe for concurrent use.
- m.rand = rand.New(rand.NewSource(int64(seed)))
-}
-
-func (m *Miniredis) randIntn(n int) int {
- if m.rand == nil {
- return rand.Intn(n)
- }
- return m.rand.Intn(n)
-}
-
-// shuffle shuffles a list of strings. Kinda.
-func (m *Miniredis) shuffle(l []string) {
- for range l {
- i := m.randIntn(len(l))
- j := m.randIntn(len(l))
- l[i], l[j] = l[j], l[i]
- }
-}
-
-func (m *Miniredis) effectiveNow() time.Time {
- if !m.now.IsZero() {
- return m.now
- }
- return time.Now().UTC()
-}
-
-// convert a unixtimestamp to a duration, to use an absolute time as TTL.
-// d can be either time.Second or time.Millisecond.
-func (m *Miniredis) at(i int, d time.Duration) time.Duration {
- var ts time.Time
- switch d {
- case time.Millisecond:
- ts = time.Unix(int64(i/1000), 1000000*int64(i%1000))
- case time.Second:
- ts = time.Unix(int64(i), 0)
- default:
- panic("invalid time unit (d). Fixme!")
- }
- now := m.effectiveNow()
- return ts.Sub(now)
-}
-
-// copy does not mind if dst already exists.
-func (m *Miniredis) copy(
- srcDB *RedisDB, src string,
- destDB *RedisDB, dst string,
-) error {
- if !srcDB.exists(src) {
- return ErrKeyNotFound
- }
-
- switch srcDB.t(src) {
- case "string":
- destDB.stringKeys[dst] = srcDB.stringKeys[src]
- case "hash":
- destDB.hashKeys[dst] = copyHashKey(srcDB.hashKeys[src])
- case "list":
- destDB.listKeys[dst] = srcDB.listKeys[src]
- case "set":
- destDB.setKeys[dst] = copySetKey(srcDB.setKeys[src])
- case "zset":
- destDB.sortedsetKeys[dst] = copySortedSet(srcDB.sortedsetKeys[src])
- case "stream":
- destDB.streamKeys[dst] = srcDB.streamKeys[src].copy()
- case "hll":
- destDB.hllKeys[dst] = srcDB.hllKeys[src].copy()
- default:
- panic("missing case")
- }
- destDB.keys[dst] = srcDB.keys[src]
- destDB.keyVersion[dst]++
- if v, ok := srcDB.ttl[src]; ok {
- destDB.ttl[dst] = v
- }
- return nil
-}
-
-func copyHashKey(orig hashKey) hashKey {
- cpy := hashKey{}
- for k, v := range orig {
- cpy[k] = v
- }
- return cpy
-}
-
-func copySetKey(orig setKey) setKey {
- cpy := setKey{}
- for k, v := range orig {
- cpy[k] = v
- }
- return cpy
-}
-
-func copySortedSet(orig sortedSet) sortedSet {
- cpy := sortedSet{}
- for k, v := range orig {
- cpy[k] = v
- }
- return cpy
-}
diff --git a/vendor/github.com/alicebob/miniredis/v2/opts.go b/vendor/github.com/alicebob/miniredis/v2/opts.go
deleted file mode 100644
index f53f228b..00000000
--- a/vendor/github.com/alicebob/miniredis/v2/opts.go
+++ /dev/null
@@ -1,25 +0,0 @@
-package miniredis
-
-import (
- "strconv"
-
- "github.com/alicebob/miniredis/v2/server"
-)
-
-// optInt parses an int option in a command.
-// Writes "invalid integer" error to c if it's not a valid integer. Returns
-// whether or not things were okay.
-func optInt(c *server.Peer, src string, dest *int) bool {
- return optIntErr(c, src, dest, msgInvalidInt)
-}
-
-func optIntErr(c *server.Peer, src string, dest *int, errMsg string) bool {
- n, err := strconv.Atoi(src)
- if err != nil {
- setDirty(c)
- c.WriteError(errMsg)
- return false
- }
- *dest = n
- return true
-}
diff --git a/vendor/github.com/alicebob/miniredis/v2/pubsub.go b/vendor/github.com/alicebob/miniredis/v2/pubsub.go
deleted file mode 100644
index bb31f80a..00000000
--- a/vendor/github.com/alicebob/miniredis/v2/pubsub.go
+++ /dev/null
@@ -1,240 +0,0 @@
-package miniredis
-
-import (
- "regexp"
- "sort"
- "sync"
-
- "github.com/alicebob/miniredis/v2/server"
-)
-
-// PubsubMessage is what gets broadcasted over pubsub channels.
-type PubsubMessage struct {
- Channel string
- Message string
-}
-
-type PubsubPmessage struct {
- Pattern string
- Channel string
- Message string
-}
-
-// Subscriber has the (p)subscriptions.
-type Subscriber struct {
- publish chan PubsubMessage
- ppublish chan PubsubPmessage
- channels map[string]struct{}
- patterns map[string]*regexp.Regexp
- mu sync.Mutex
-}
-
-// Make a new subscriber. The channel is not buffered, so you will need to keep
-// reading using Messages(). Use Close() when done, or unsubscribe.
-func newSubscriber() *Subscriber {
- return &Subscriber{
- publish: make(chan PubsubMessage),
- ppublish: make(chan PubsubPmessage),
- channels: map[string]struct{}{},
- patterns: map[string]*regexp.Regexp{},
- }
-}
-
-// Close the listening channel
-func (s *Subscriber) Close() {
- close(s.publish)
- close(s.ppublish)
-}
-
-// Count the total number of channels and patterns
-func (s *Subscriber) Count() int {
- s.mu.Lock()
- defer s.mu.Unlock()
- return s.count()
-}
-
-func (s *Subscriber) count() int {
- return len(s.channels) + len(s.patterns)
-}
-
-// Subscribe to a channel. Returns the total number of (p)subscriptions after
-// subscribing.
-func (s *Subscriber) Subscribe(c string) int {
- s.mu.Lock()
- defer s.mu.Unlock()
-
- s.channels[c] = struct{}{}
- return s.count()
-}
-
-// Unsubscribe a channel. Returns the total number of (p)subscriptions after
-// unsubscribing.
-func (s *Subscriber) Unsubscribe(c string) int {
- s.mu.Lock()
- defer s.mu.Unlock()
-
- delete(s.channels, c)
- return s.count()
-}
-
-// Subscribe to a pattern. Returns the total number of (p)subscriptions after
-// subscribing.
-func (s *Subscriber) Psubscribe(pat string) int {
- s.mu.Lock()
- defer s.mu.Unlock()
-
- s.patterns[pat] = patternRE(pat)
- return s.count()
-}
-
-// Unsubscribe a pattern. Returns the total number of (p)subscriptions after
-// unsubscribing.
-func (s *Subscriber) Punsubscribe(pat string) int {
- s.mu.Lock()
- defer s.mu.Unlock()
-
- delete(s.patterns, pat)
- return s.count()
-}
-
-// List all subscribed channels, in alphabetical order
-func (s *Subscriber) Channels() []string {
- s.mu.Lock()
- defer s.mu.Unlock()
-
- var cs []string
- for c := range s.channels {
- cs = append(cs, c)
- }
- sort.Strings(cs)
- return cs
-}
-
-// List all subscribed patterns, in alphabetical order
-func (s *Subscriber) Patterns() []string {
- s.mu.Lock()
- defer s.mu.Unlock()
-
- var ps []string
- for p := range s.patterns {
- ps = append(ps, p)
- }
- sort.Strings(ps)
- return ps
-}
-
-// Publish a message. Will return return how often we sent the message (can be
-// a match for a subscription and for a psubscription.
-func (s *Subscriber) Publish(c, msg string) int {
- s.mu.Lock()
- defer s.mu.Unlock()
-
- found := 0
-
-subs:
- for sub := range s.channels {
- if sub == c {
- s.publish <- PubsubMessage{c, msg}
- found++
- break subs
- }
- }
-
-pats:
- for orig, pat := range s.patterns {
- if pat != nil && pat.MatchString(c) {
- s.ppublish <- PubsubPmessage{orig, c, msg}
- found++
- break pats
- }
- }
-
- return found
-}
-
-// The channel to read messages for this subscriber. Only for messages matching
-// a SUBSCRIBE.
-func (s *Subscriber) Messages() <-chan PubsubMessage {
- return s.publish
-}
-
-// The channel to read messages for this subscriber. Only for messages matching
-// a PSUBSCRIBE.
-func (s *Subscriber) Pmessages() <-chan PubsubPmessage {
- return s.ppublish
-}
-
-// List all pubsub channels. If `pat` isn't empty channels names must match the
-// pattern. Channels are returned alphabetically.
-func activeChannels(subs []*Subscriber, pat string) []string {
- channels := map[string]struct{}{}
- for _, s := range subs {
- for c := range s.channels {
- channels[c] = struct{}{}
- }
- }
-
- var cpat *regexp.Regexp
- if pat != "" {
- cpat = patternRE(pat)
- }
-
- var cs []string
- for k := range channels {
- if cpat != nil && !cpat.MatchString(k) {
- continue
- }
- cs = append(cs, k)
- }
- sort.Strings(cs)
- return cs
-}
-
-// Count all subscribed (not psubscribed) clients for the given channel
-// pattern. Channels are returned alphabetically.
-func countSubs(subs []*Subscriber, channel string) int {
- n := 0
- for _, p := range subs {
- for c := range p.channels {
- if c == channel {
- n++
- break
- }
- }
- }
- return n
-}
-
-// Count the total of all client psubscriptions.
-func countPsubs(subs []*Subscriber) int {
- n := 0
- for _, p := range subs {
- n += len(p.patterns)
- }
- return n
-}
-
-func monitorPublish(conn *server.Peer, msgs <-chan PubsubMessage) {
- for msg := range msgs {
- conn.Block(func(c *server.Writer) {
- c.WritePushLen(3)
- c.WriteBulk("message")
- c.WriteBulk(msg.Channel)
- c.WriteBulk(msg.Message)
- c.Flush()
- })
- }
-}
-
-func monitorPpublish(conn *server.Peer, msgs <-chan PubsubPmessage) {
- for msg := range msgs {
- conn.Block(func(c *server.Writer) {
- c.WritePushLen(4)
- c.WriteBulk("pmessage")
- c.WriteBulk(msg.Pattern)
- c.WriteBulk(msg.Channel)
- c.WriteBulk(msg.Message)
- c.Flush()
- })
- }
-}
diff --git a/vendor/github.com/alicebob/miniredis/v2/redis.go b/vendor/github.com/alicebob/miniredis/v2/redis.go
deleted file mode 100644
index a0604803..00000000
--- a/vendor/github.com/alicebob/miniredis/v2/redis.go
+++ /dev/null
@@ -1,235 +0,0 @@
-package miniredis
-
-import (
- "context"
- "fmt"
- "math"
- "math/big"
- "strings"
- "sync"
- "time"
-
- "github.com/alicebob/miniredis/v2/server"
-)
-
-const (
- msgWrongType = "WRONGTYPE Operation against a key holding the wrong kind of value"
- msgNotValidHllValue = "WRONGTYPE Key is not a valid HyperLogLog string value."
- msgInvalidInt = "ERR value is not an integer or out of range"
- msgInvalidFloat = "ERR value is not a valid float"
- msgInvalidMinMax = "ERR min or max is not a float"
- msgInvalidRangeItem = "ERR min or max not valid string range item"
- msgInvalidTimeout = "ERR timeout is not a float or out of range"
- msgSyntaxError = "ERR syntax error"
- msgKeyNotFound = "ERR no such key"
- msgOutOfRange = "ERR index out of range"
- msgInvalidCursor = "ERR invalid cursor"
- msgXXandNX = "ERR XX and NX options at the same time are not compatible"
- msgNegTimeout = "ERR timeout is negative"
- msgInvalidSETime = "ERR invalid expire time in set"
- msgInvalidSETEXTime = "ERR invalid expire time in setex"
- msgInvalidPSETEXTime = "ERR invalid expire time in psetex"
- msgInvalidKeysNumber = "ERR Number of keys can't be greater than number of args"
- msgNegativeKeysNumber = "ERR Number of keys can't be negative"
- msgFScriptUsage = "ERR Unknown subcommand or wrong number of arguments for '%s'. Try SCRIPT HELP."
- msgFPubsubUsage = "ERR Unknown subcommand or wrong number of arguments for '%s'. Try PUBSUB HELP."
- msgScriptFlush = "ERR SCRIPT FLUSH only support SYNC|ASYNC option"
- msgSingleElementPair = "ERR INCR option supports a single increment-element pair"
- msgGTLTandNX = "ERR GT, LT, and/or NX options at the same time are not compatible"
- msgInvalidStreamID = "ERR Invalid stream ID specified as stream command argument"
- msgStreamIDTooSmall = "ERR The ID specified in XADD is equal or smaller than the target stream top item"
- msgStreamIDZero = "ERR The ID specified in XADD must be greater than 0-0"
- msgNoScriptFound = "NOSCRIPT No matching script. Please use EVAL."
- msgUnsupportedUnit = "ERR unsupported unit provided. please use m, km, ft, mi"
- msgNotFromScripts = "This Redis command is not allowed from scripts"
- msgXreadUnbalanced = "ERR Unbalanced XREAD list of streams: for each stream key an ID or '$' must be specified."
- msgXgroupKeyNotFound = "ERR The XGROUP subcommand requires the key to exist. Note that for CREATE you may want to use the MKSTREAM option to create an empty stream automatically."
- msgXtrimInvalidStrategy = "ERR unsupported XTRIM strategy. Please use MAXLEN, MINID"
- msgXtrimInvalidMaxLen = "ERR value is not an integer or out of range"
- msgXtrimInvalidLimit = "ERR syntax error, LIMIT cannot be used without the special ~ option"
- msgDBIndexOutOfRange = "ERR DB index is out of range"
- msgLimitCombination = "ERR syntax error, LIMIT is only supported in combination with either BYSCORE or BYLEX"
- msgRankIsZero = "ERR RANK can't be zero: use 1 to start from the first match, 2 from the second ... or use negative to start from the end of the list"
- msgCountIsNegative = "ERR COUNT can't be negative"
- msgMaxLengthIsNegative = "ERR MAXLEN can't be negative"
-)
-
-func errWrongNumber(cmd string) string {
- return fmt.Sprintf("ERR wrong number of arguments for '%s' command", strings.ToLower(cmd))
-}
-
-func errLuaParseError(err error) string {
- return fmt.Sprintf("ERR Error compiling script (new function): %s", err.Error())
-}
-
-func errReadgroup(key, group string) error {
- return fmt.Errorf("NOGROUP No such key '%s' or consumer group '%s'", key, group)
-}
-
-func errXreadgroup(key, group string) error {
- return fmt.Errorf("NOGROUP No such key '%s' or consumer group '%s' in XREADGROUP with GROUP option", key, group)
-}
-
-// withTx wraps the non-argument-checking part of command handling code in
-// transaction logic.
-func withTx(
- m *Miniredis,
- c *server.Peer,
- cb txCmd,
-) {
- ctx := getCtx(c)
-
- if ctx.nested {
- // this is a call via Lua's .call(). It's already locked.
- cb(c, ctx)
- m.signal.Broadcast()
- return
- }
-
- if inTx(ctx) {
- addTxCmd(ctx, cb)
- c.WriteInline("QUEUED")
- return
- }
- m.Lock()
- cb(c, ctx)
- // done, wake up anyone who waits on anything.
- m.signal.Broadcast()
- m.Unlock()
-}
-
-// blockCmd is executed returns whether it is done
-type blockCmd func(*server.Peer, *connCtx) bool
-
-// blocking keeps trying a command until the callback returns true. Calls
-// onTimeout after the timeout (or when we call this in a transaction).
-func blocking(
- m *Miniredis,
- c *server.Peer,
- timeout time.Duration,
- cb blockCmd,
- onTimeout func(*server.Peer),
-) {
- var (
- ctx = getCtx(c)
- )
- if inTx(ctx) {
- addTxCmd(ctx, func(c *server.Peer, ctx *connCtx) {
- if !cb(c, ctx) {
- onTimeout(c)
- }
- })
- c.WriteInline("QUEUED")
- return
- }
-
- localCtx, cancel := context.WithCancel(m.Ctx)
- defer cancel()
- timedOut := false
- if timeout != 0 {
- go setCondTimer(localCtx, m.signal, &timedOut, timeout)
- }
- go func() {
- <-localCtx.Done()
- m.signal.Broadcast() // main loop might miss this signal
- }()
-
- m.Lock()
- defer m.Unlock()
- for {
- done := cb(c, ctx)
- if done {
- return
- }
-
- if m.Ctx.Err() != nil {
- return
- }
- if timedOut {
- onTimeout(c)
- return
- }
-
- m.signal.Wait()
- }
-}
-
-func setCondTimer(ctx context.Context, sig *sync.Cond, timedOut *bool, timeout time.Duration) {
- dl := time.NewTimer(timeout)
- defer dl.Stop()
- select {
- case <-dl.C:
- sig.L.Lock() // for timedOut
- *timedOut = true
- sig.Broadcast() // main loop might miss this signal
- sig.L.Unlock()
- case <-ctx.Done():
- }
-}
-
-// formatBig formats a float the way redis does
-func formatBig(v *big.Float) string {
- // Format with %f and strip trailing 0s.
- if v.IsInf() {
- return "inf"
- }
- // if math.IsInf(v, -1) {
- // return "-inf"
- // }
- return stripZeros(fmt.Sprintf("%.17f", v))
-}
-
-func stripZeros(sv string) string {
- for strings.Contains(sv, ".") {
- if sv[len(sv)-1] != '0' {
- break
- }
- // Remove trailing 0s.
- sv = sv[:len(sv)-1]
- // Ends with a '.'.
- if sv[len(sv)-1] == '.' {
- sv = sv[:len(sv)-1]
- break
- }
- }
- return sv
-}
-
-// redisRange gives Go offsets for something l long with start/end in
-// Redis semantics. Both start and end can be negative.
-// Used for string range and list range things.
-// The results can be used as: v[start:end]
-// Note that GETRANGE (on a string key) never returns an empty string when end
-// is a large negative number.
-func redisRange(l, start, end int, stringSymantics bool) (int, int) {
- if start < 0 {
- start = l + start
- if start < 0 {
- start = 0
- }
- }
- if start > l {
- start = l
- }
-
- if end < 0 {
- end = l + end
- if end < 0 {
- end = -1
- if stringSymantics {
- end = 0
- }
- }
- }
- if end < math.MaxInt32 {
- end++ // end argument is inclusive in Redis.
- }
- if end > l {
- end = l
- }
-
- if end < start {
- return 0, 0
- }
- return start, end
-}
diff --git a/vendor/github.com/alicebob/miniredis/v2/server/Makefile b/vendor/github.com/alicebob/miniredis/v2/server/Makefile
deleted file mode 100644
index c82e336f..00000000
--- a/vendor/github.com/alicebob/miniredis/v2/server/Makefile
+++ /dev/null
@@ -1,9 +0,0 @@
-.PHONY: all build test
-
-all: build test
-
-build:
- go build
-
-test:
- go test
diff --git a/vendor/github.com/alicebob/miniredis/v2/server/proto.go b/vendor/github.com/alicebob/miniredis/v2/server/proto.go
deleted file mode 100644
index f62e1d73..00000000
--- a/vendor/github.com/alicebob/miniredis/v2/server/proto.go
+++ /dev/null
@@ -1,157 +0,0 @@
-package server
-
-import (
- "bufio"
- "errors"
- "strconv"
-)
-
-type Simple string
-
-// ErrProtocol is the general error for unexpected input
-var ErrProtocol = errors.New("invalid request")
-
-// client always sends arrays with bulk strings
-func readArray(rd *bufio.Reader) ([]string, error) {
- line, err := rd.ReadString('\n')
- if err != nil {
- return nil, err
- }
- if len(line) < 3 {
- return nil, ErrProtocol
- }
-
- switch line[0] {
- default:
- return nil, ErrProtocol
- case '*':
- l, err := strconv.Atoi(line[1 : len(line)-2])
- if err != nil {
- return nil, err
- }
- // l can be -1
- var fields []string
- for ; l > 0; l-- {
- s, err := readString(rd)
- if err != nil {
- return nil, err
- }
- fields = append(fields, s)
- }
- return fields, nil
- }
-}
-
-func readString(rd *bufio.Reader) (string, error) {
- line, err := rd.ReadString('\n')
- if err != nil {
- return "", err
- }
- if len(line) < 3 {
- return "", ErrProtocol
- }
-
- switch line[0] {
- default:
- return "", ErrProtocol
- case '+', '-', ':':
- // +: simple string
- // -: errors
- // :: integer
- // Simple line based replies.
- return string(line[1 : len(line)-2]), nil
- case '$':
- // bulk strings are: `$5\r\nhello\r\n`
- length, err := strconv.Atoi(line[1 : len(line)-2])
- if err != nil {
- return "", err
- }
- if length < 0 {
- // -1 is a nil response
- return "", nil
- }
- var (
- buf = make([]byte, length+2)
- pos = 0
- )
- for pos < length+2 {
- n, err := rd.Read(buf[pos:])
- if err != nil {
- return "", err
- }
- pos += n
- }
- return string(buf[:length]), nil
- }
-}
-
-// parse a reply
-func ParseReply(rd *bufio.Reader) (interface{}, error) {
- line, err := rd.ReadString('\n')
- if err != nil {
- return nil, err
- }
- if len(line) < 3 {
- return nil, ErrProtocol
- }
-
- switch line[0] {
- default:
- return nil, ErrProtocol
- case '+':
- // +: simple string
- return Simple(line[1 : len(line)-2]), nil
- case '-':
- // -: errors
- return nil, errors.New(string(line[1 : len(line)-2]))
- case ':':
- // :: integer
- v := line[1 : len(line)-2]
- if v == "" {
- return 0, nil
- }
- n, err := strconv.Atoi(v)
- if err != nil {
- return nil, ErrProtocol
- }
- return n, nil
- case '$':
- // bulk strings are: `$5\r\nhello\r\n`
- length, err := strconv.Atoi(line[1 : len(line)-2])
- if err != nil {
- return "", err
- }
- if length < 0 {
- // -1 is a nil response
- return nil, nil
- }
- var (
- buf = make([]byte, length+2)
- pos = 0
- )
- for pos < length+2 {
- n, err := rd.Read(buf[pos:])
- if err != nil {
- return "", err
- }
- pos += n
- }
- return string(buf[:length]), nil
- case '*':
- // array
- l, err := strconv.Atoi(line[1 : len(line)-2])
- if err != nil {
- return nil, ErrProtocol
- }
- // l can be -1
- var fields []interface{}
- for ; l > 0; l-- {
- s, err := ParseReply(rd)
- if err != nil {
- return nil, err
- }
- fields = append(fields, s)
- }
- return fields, nil
- }
-}
diff --git a/vendor/github.com/alicebob/miniredis/v2/server/server.go b/vendor/github.com/alicebob/miniredis/v2/server/server.go
deleted file mode 100644
index 60e391f2..00000000
--- a/vendor/github.com/alicebob/miniredis/v2/server/server.go
+++ /dev/null
@@ -1,487 +0,0 @@
-package server
-
-import (
- "bufio"
- "crypto/tls"
- "fmt"
- "math"
- "net"
- "strings"
- "sync"
- "unicode"
-)
-
-func errUnknownCommand(cmd string, args []string) string {
- s := fmt.Sprintf("ERR unknown command `%s`, with args beginning with: ", cmd)
- if len(args) > 20 {
- args = args[:20]
- }
- for _, a := range args {
- s += fmt.Sprintf("`%s`, ", a)
- }
- return s
-}
-
-// Cmd is what Register expects
-type Cmd func(c *Peer, cmd string, args []string)
-
-type DisconnectHandler func(c *Peer)
-
-// Hook is can be added to run before every cmd. Return true if the command is done.
-type Hook func(*Peer, string, ...string) bool
-
-// Server is a simple redis server
-type Server struct {
- l net.Listener
- cmds map[string]Cmd
- preHook Hook
- peers map[net.Conn]struct{}
- mu sync.Mutex
- wg sync.WaitGroup
- infoConns int
- infoCmds int
-}
-
-// NewServer makes a server listening on addr. Close with .Close().
-func NewServer(addr string) (*Server, error) {
- l, err := net.Listen("tcp", addr)
- if err != nil {
- return nil, err
- }
- return newServer(l), nil
-}
-
-func NewServerTLS(addr string, cfg *tls.Config) (*Server, error) {
- l, err := tls.Listen("tcp", addr, cfg)
- if err != nil {
- return nil, err
- }
- return newServer(l), nil
-}
-
-func newServer(l net.Listener) *Server {
- s := Server{
- cmds: map[string]Cmd{},
- peers: map[net.Conn]struct{}{},
- l: l,
- }
-
- s.wg.Add(1)
- go func() {
- defer s.wg.Done()
- s.serve(l)
-
- s.mu.Lock()
- for c := range s.peers {
- c.Close()
- }
- s.mu.Unlock()
- }()
- return &s
-}
-
-// (un)set a hook which is ran before every call. It returns true if the command is done.
-func (s *Server) SetPreHook(h Hook) {
- s.mu.Lock()
- s.preHook = h
- s.mu.Unlock()
-}
-
-func (s *Server) serve(l net.Listener) {
- for {
- conn, err := l.Accept()
- if err != nil {
- return
- }
- s.ServeConn(conn)
- }
-}
-
-// ServeConn handles a net.Conn. Nice with net.Pipe()
-func (s *Server) ServeConn(conn net.Conn) {
- s.wg.Add(1)
- s.mu.Lock()
- s.peers[conn] = struct{}{}
- s.infoConns++
- s.mu.Unlock()
-
- go func() {
- defer s.wg.Done()
- defer conn.Close()
-
- s.servePeer(conn)
-
- s.mu.Lock()
- delete(s.peers, conn)
- s.mu.Unlock()
- }()
-}
-
-// Addr has the net.Addr struct
-func (s *Server) Addr() *net.TCPAddr {
- s.mu.Lock()
- defer s.mu.Unlock()
- if s.l == nil {
- return nil
- }
- return s.l.Addr().(*net.TCPAddr)
-}
-
-// Close a server started with NewServer. It will wait until all clients are
-// closed.
-func (s *Server) Close() {
- s.mu.Lock()
- if s.l != nil {
- s.l.Close()
- }
- s.l = nil
- s.mu.Unlock()
-
- s.wg.Wait()
-}
-
-// Register a command. It can't have been registered before. Safe to call on a
-// running server.
-func (s *Server) Register(cmd string, f Cmd) error {
- s.mu.Lock()
- defer s.mu.Unlock()
- cmd = strings.ToUpper(cmd)
- if _, ok := s.cmds[cmd]; ok {
- return fmt.Errorf("command already registered: %s", cmd)
- }
- s.cmds[cmd] = f
- return nil
-}
-
-func (s *Server) servePeer(c net.Conn) {
- r := bufio.NewReader(c)
- peer := &Peer{
- w: bufio.NewWriter(c),
- }
- defer func() {
- for _, f := range peer.onDisconnect {
- f()
- }
- }()
-
- for {
- args, err := readArray(r)
- if err != nil {
- return
- }
- s.Dispatch(peer, args)
- peer.Flush()
-
- s.mu.Lock()
- closed := peer.closed
- s.mu.Unlock()
- if closed {
- c.Close()
- }
- }
-}
-
-func (s *Server) Dispatch(c *Peer, args []string) {
- cmd, args := args[0], args[1:]
- cmdUp := strings.ToUpper(cmd)
- s.mu.Lock()
- h := s.preHook
- s.mu.Unlock()
- if h != nil {
- if h(c, cmdUp, args...) {
- return
- }
- }
-
- s.mu.Lock()
- cb, ok := s.cmds[cmdUp]
- s.mu.Unlock()
- if !ok {
- c.WriteError(errUnknownCommand(cmd, args))
- return
- }
-
- s.mu.Lock()
- s.infoCmds++
- s.mu.Unlock()
- cb(c, cmdUp, args)
-}
-
-// TotalCommands is total (known) commands since this the server started
-func (s *Server) TotalCommands() int {
- s.mu.Lock()
- defer s.mu.Unlock()
- return s.infoCmds
-}
-
-// ClientsLen gives the number of connected clients right now
-func (s *Server) ClientsLen() int {
- s.mu.Lock()
- defer s.mu.Unlock()
- return len(s.peers)
-}
-
-// TotalConnections give the number of clients connected since the server
-// started, including the currently connected ones
-func (s *Server) TotalConnections() int {
- s.mu.Lock()
- defer s.mu.Unlock()
- return s.infoConns
-}
-
-// Peer is a client connected to the server
-type Peer struct {
- w *bufio.Writer
- closed bool
- Resp3 bool
- Ctx interface{} // anything goes, server won't touch this
- onDisconnect []func() // list of callbacks
- mu sync.Mutex // for Block()
-}
-
-func NewPeer(w *bufio.Writer) *Peer {
- return &Peer{
- w: w,
- }
-}
-
-// Flush the write buffer. Called automatically after every redis command
-func (c *Peer) Flush() {
- c.mu.Lock()
- defer c.mu.Unlock()
- c.w.Flush()
-}
-
-// Close the client connection after the current command is done.
-func (c *Peer) Close() {
- c.mu.Lock()
- defer c.mu.Unlock()
- c.closed = true
-}
-
-// Register a function to execute on disconnect. There can be multiple
-// functions registered.
-func (c *Peer) OnDisconnect(f func()) {
- c.onDisconnect = append(c.onDisconnect, f)
-}
-
-// issue multiple calls, guarded with a mutex
-func (c *Peer) Block(f func(*Writer)) {
- c.mu.Lock()
- defer c.mu.Unlock()
- f(&Writer{c.w, c.Resp3})
-}
-
-// WriteError writes a redis 'Error'
-func (c *Peer) WriteError(e string) {
- c.Block(func(w *Writer) {
- w.WriteError(e)
- })
-}
-
-// WriteInline writes a redis inline string
-func (c *Peer) WriteInline(s string) {
- c.Block(func(w *Writer) {
- w.WriteInline(s)
- })
-}
-
-// WriteOK write the inline string `OK`
-func (c *Peer) WriteOK() {
- c.WriteInline("OK")
-}
-
-// WriteBulk writes a bulk string
-func (c *Peer) WriteBulk(s string) {
- c.Block(func(w *Writer) {
- w.WriteBulk(s)
- })
-}
-
-// WriteNull writes a redis Null element
-func (c *Peer) WriteNull() {
- c.Block(func(w *Writer) {
- w.WriteNull()
- })
-}
-
-// WriteLen starts an array with the given length
-func (c *Peer) WriteLen(n int) {
- c.Block(func(w *Writer) {
- w.WriteLen(n)
- })
-}
-
-// WriteMapLen starts a map with the given length (number of keys)
-func (c *Peer) WriteMapLen(n int) {
- c.Block(func(w *Writer) {
- w.WriteMapLen(n)
- })
-}
-
-// WriteSetLen starts a set with the given length (number of elements)
-func (c *Peer) WriteSetLen(n int) {
- c.Block(func(w *Writer) {
- w.WriteSetLen(n)
- })
-}
-
-// WritePushLen starts a push-data array with the given length
-func (c *Peer) WritePushLen(n int) {
- c.Block(func(w *Writer) {
- w.WritePushLen(n)
- })
-}
-
-// WriteInt writes an integer
-func (c *Peer) WriteInt(n int) {
- c.Block(func(w *Writer) {
- w.WriteInt(n)
- })
-}
-
-// WriteFloat writes a float
-func (c *Peer) WriteFloat(n float64) {
- c.Block(func(w *Writer) {
- w.WriteFloat(n)
- })
-}
-
-// WriteRaw writes a raw redis response
-func (c *Peer) WriteRaw(s string) {
- c.Block(func(w *Writer) {
- w.WriteRaw(s)
- })
-}
-
-// WriteStrings is a helper to (bulk)write a string list
-func (c *Peer) WriteStrings(strs []string) {
- c.Block(func(w *Writer) {
- w.WriteStrings(strs)
- })
-}
-
-func toInline(s string) string {
- return strings.Map(func(r rune) rune {
- if unicode.IsSpace(r) {
- return ' '
- }
- return r
- }, s)
-}
-
-// A Writer is given to the callback in Block()
-type Writer struct {
- w *bufio.Writer
- resp3 bool
-}
-
-// WriteError writes a redis 'Error'
-func (w *Writer) WriteError(e string) {
- fmt.Fprintf(w.w, "-%s\r\n", toInline(e))
-}
-
-func (w *Writer) WriteLen(n int) {
- fmt.Fprintf(w.w, "*%d\r\n", n)
-}
-
-func (w *Writer) WriteMapLen(n int) {
- if w.resp3 {
- fmt.Fprintf(w.w, "%%%d\r\n", n)
- return
- }
- w.WriteLen(n * 2)
-}
-
-func (w *Writer) WriteSetLen(n int) {
- if w.resp3 {
- fmt.Fprintf(w.w, "~%d\r\n", n)
- return
- }
- w.WriteLen(n)
-}
-
-func (w *Writer) WritePushLen(n int) {
- if w.resp3 {
- fmt.Fprintf(w.w, ">%d\r\n", n)
- return
- }
- w.WriteLen(n)
-}
-
-// WriteBulk writes a bulk string
-func (w *Writer) WriteBulk(s string) {
- fmt.Fprintf(w.w, "$%d\r\n%s\r\n", len(s), s)
-}
-
-// WriteStrings writes a list of strings (bulk)
-func (w *Writer) WriteStrings(strs []string) {
- w.WriteLen(len(strs))
- for _, s := range strs {
- w.WriteBulk(s)
- }
-}
-
-// WriteInt writes an integer
-func (w *Writer) WriteInt(n int) {
- fmt.Fprintf(w.w, ":%d\r\n", n)
-}
-
-// WriteFloat writes a float
-func (w *Writer) WriteFloat(n float64) {
- if w.resp3 {
- fmt.Fprintf(w.w, ",%s\r\n", formatFloat(n))
- return
- }
- w.WriteBulk(formatFloat(n))
-}
-
-// WriteNull writes a redis Null element
-func (w *Writer) WriteNull() {
- if w.resp3 {
- fmt.Fprint(w.w, "_\r\n")
- return
- }
- fmt.Fprintf(w.w, "$-1\r\n")
-}
-
-// WriteInline writes a redis inline string
-func (w *Writer) WriteInline(s string) {
- fmt.Fprintf(w.w, "+%s\r\n", toInline(s))
-}
-
-// WriteRaw writes a raw redis response
-func (w *Writer) WriteRaw(s string) {
- fmt.Fprint(w.w, s)
-}
-
-func (w *Writer) Flush() {
- w.w.Flush()
-}
-
-// formatFloat formats a float the way redis does (sort-of)
-func formatFloat(v float64) string {
- if math.IsInf(v, 1) {
- return "inf"
- }
- if math.IsInf(v, -1) {
- return "-inf"
- }
- return stripZeros(fmt.Sprintf("%.12f", v))
-}
-
-func stripZeros(sv string) string {
- for strings.Contains(sv, ".") {
- if sv[len(sv)-1] != '0' {
- break
- }
- // Remove trailing 0s.
- sv = sv[:len(sv)-1]
- // Ends with a '.'.
- if sv[len(sv)-1] == '.' {
- sv = sv[:len(sv)-1]
- break
- }
- }
- return sv
-}
diff --git a/vendor/github.com/alicebob/miniredis/v2/sorted_set.go b/vendor/github.com/alicebob/miniredis/v2/sorted_set.go
deleted file mode 100644
index 96ebd5d7..00000000
--- a/vendor/github.com/alicebob/miniredis/v2/sorted_set.go
+++ /dev/null
@@ -1,98 +0,0 @@
-package miniredis
-
-// The most KISS way to implement a sorted set. Luckily we don't care about
-// performance that much.
-
-import (
- "sort"
-)
-
-type direction int
-
-const (
- unsorted direction = iota
- asc
- desc
-)
-
-type sortedSet map[string]float64
-
-type ssElem struct {
- score float64
- member string
-}
-type ssElems []ssElem
-
-type byScore ssElems
-
-func (sse byScore) Len() int { return len(sse) }
-func (sse byScore) Swap(i, j int) { sse[i], sse[j] = sse[j], sse[i] }
-func (sse byScore) Less(i, j int) bool {
- if sse[i].score != sse[j].score {
- return sse[i].score < sse[j].score
- }
- return sse[i].member < sse[j].member
-}
-
-func newSortedSet() sortedSet {
- return sortedSet{}
-}
-
-func (ss *sortedSet) card() int {
- return len(*ss)
-}
-
-func (ss *sortedSet) set(score float64, member string) {
- (*ss)[member] = score
-}
-
-func (ss *sortedSet) get(member string) (float64, bool) {
- v, ok := (*ss)[member]
- return v, ok
-}
-
-// elems gives the list of ssElem, ready to sort.
-func (ss *sortedSet) elems() ssElems {
- elems := make(ssElems, 0, len(*ss))
- for e, s := range *ss {
- elems = append(elems, ssElem{s, e})
- }
- return elems
-}
-
-func (ss *sortedSet) byScore(d direction) ssElems {
- elems := ss.elems()
- sort.Sort(byScore(elems))
- if d == desc {
- reverseElems(elems)
- }
- return ssElems(elems)
-}
-
-// rankByScore gives the (0-based) index of member, or returns false.
-func (ss *sortedSet) rankByScore(member string, d direction) (int, bool) {
- if _, ok := (*ss)[member]; !ok {
- return 0, false
- }
- for i, e := range ss.byScore(d) {
- if e.member == member {
- return i, true
- }
- }
- // Can't happen
- return 0, false
-}
-
-func reverseSlice(o []string) {
- for i := range make([]struct{}, len(o)/2) {
- other := len(o) - 1 - i
- o[i], o[other] = o[other], o[i]
- }
-}
-
-func reverseElems(o ssElems) {
- for i := range make([]struct{}, len(o)/2) {
- other := len(o) - 1 - i
- o[i], o[other] = o[other], o[i]
- }
-}
diff --git a/vendor/github.com/alicebob/miniredis/v2/stream.go b/vendor/github.com/alicebob/miniredis/v2/stream.go
deleted file mode 100644
index c09051a2..00000000
--- a/vendor/github.com/alicebob/miniredis/v2/stream.go
+++ /dev/null
@@ -1,419 +0,0 @@
-// Basic stream implementation.
-
-package miniredis
-
-import (
- "errors"
- "fmt"
- "math"
- "sort"
- "strconv"
- "strings"
- "time"
-)
-
-// a Stream is a list of entries, lowest ID (oldest) first, and all "groups".
-type streamKey struct {
- entries []StreamEntry
- groups map[string]*streamGroup
- lastAllocatedID string
-}
-
-// a StreamEntry is an entry in a stream. The ID is always of the form
-// "123-123".
-// Values is an ordered list of key-value pairs.
-type StreamEntry struct {
- ID string
- Values []string
-}
-
-type streamGroup struct {
- stream *streamKey
- lastID string
- pending []pendingEntry
- consumers map[string]*consumer
-}
-
-type consumer struct {
- numPendingEntries int
- // TODO: "last seen" timestamp
-}
-
-type pendingEntry struct {
- id string
- consumer string
- deliveryCount int
- lastDelivery time.Time
-}
-
-func newStreamKey() *streamKey {
- return &streamKey{
- groups: map[string]*streamGroup{},
- }
-}
-
-func (s *streamKey) generateID(now time.Time) string {
- ts := uint64(now.UnixNano()) / 1_000_000
-
- next := fmt.Sprintf("%d-%d", ts, 0)
- if s.lastAllocatedID != "" && streamCmp(s.lastAllocatedID, next) >= 0 {
- last, _ := parseStreamID(s.lastAllocatedID)
- next = fmt.Sprintf("%d-%d", last[0], last[1]+1)
- }
-
- lastID := s.lastID()
- if streamCmp(lastID, next) >= 0 {
- last, _ := parseStreamID(lastID)
- next = fmt.Sprintf("%d-%d", last[0], last[1]+1)
- }
-
- s.lastAllocatedID = next
- return next
-}
-
-func (s *streamKey) lastID() string {
- if len(s.entries) == 0 {
- return "0-0"
- }
-
- return s.entries[len(s.entries)-1].ID
-}
-
-func (s *streamKey) copy() *streamKey {
- cpy := &streamKey{
- entries: s.entries,
- }
- groups := map[string]*streamGroup{}
- for k, v := range s.groups {
- gr := v.copy()
- gr.stream = cpy
- groups[k] = gr
- }
- cpy.groups = groups
- return cpy
-}
-
-func parseStreamID(id string) ([2]uint64, error) {
- var (
- res [2]uint64
- err error
- )
- parts := strings.SplitN(id, "-", 2)
- res[0], err = strconv.ParseUint(parts[0], 10, 64)
- if err != nil {
- return res, errors.New(msgInvalidStreamID)
- }
- if len(parts) == 2 {
- res[1], err = strconv.ParseUint(parts[1], 10, 64)
- if err != nil {
- return res, errors.New(msgInvalidStreamID)
- }
- }
- return res, nil
-}
-
-// compares two stream IDs (of the full format: "123-123"). Returns: -1, 0, 1
-// The given IDs should be valid stream IDs.
-func streamCmp(a, b string) int {
- ap, _ := parseStreamID(a)
- bp, _ := parseStreamID(b)
-
- switch {
- case ap[0] < bp[0]:
- return -1
- case ap[0] > bp[0]:
- return 1
- case ap[1] < bp[1]:
- return -1
- case ap[1] > bp[1]:
- return 1
- default:
- return 0
- }
-}
-
-// formatStreamID makes a full id ("42-42") out of a partial one ("42")
-func formatStreamID(id string) (string, error) {
- var ts [2]uint64
- parts := strings.SplitN(id, "-", 2)
-
- if len(parts) > 0 {
- p, err := strconv.ParseUint(parts[0], 10, 64)
- if err != nil {
- return "", errInvalidEntryID
- }
- ts[0] = p
- }
- if len(parts) > 1 {
- p, err := strconv.ParseUint(parts[1], 10, 64)
- if err != nil {
- return "", errInvalidEntryID
- }
- ts[1] = p
- }
- return fmt.Sprintf("%d-%d", ts[0], ts[1]), nil
-}
-
-func formatStreamRangeBound(id string, start bool, reverse bool) (string, error) {
- if id == "-" {
- return "0-0", nil
- }
-
- if id == "+" {
- return fmt.Sprintf("%d-%d", uint64(math.MaxUint64), uint64(math.MaxUint64)), nil
- }
-
- if id == "0" {
- return "0-0", nil
- }
-
- parts := strings.Split(id, "-")
- if len(parts) == 2 {
- return formatStreamID(id)
- }
-
- // Incomplete IDs case
- ts, err := strconv.ParseUint(parts[0], 10, 64)
- if err != nil {
- return "", errInvalidEntryID
- }
-
- if (!start && !reverse) || (start && reverse) {
- return fmt.Sprintf("%d-%d", ts, uint64(math.MaxUint64)), nil
- }
-
- return fmt.Sprintf("%d-%d", ts, 0), nil
-}
-
-func reversedStreamEntries(o []StreamEntry) []StreamEntry {
- newStream := make([]StreamEntry, len(o))
- for i, e := range o {
- newStream[len(o)-i-1] = e
- }
- return newStream
-}
-
-func (s *streamKey) createGroup(group, id string) error {
- if _, ok := s.groups[group]; ok {
- return errors.New("BUSYGROUP Consumer Group name already exists")
- }
-
- if id == "$" {
- id = s.lastID()
- }
- s.groups[group] = &streamGroup{
- stream: s,
- lastID: id,
- consumers: map[string]*consumer{},
- }
- return nil
-}
-
-// streamAdd adds an entry to a stream. Returns the new entry ID.
-// If id is empty or "*" the ID will be generated automatically.
-// `values` should have an even length.
-func (s *streamKey) add(entryID string, values []string, now time.Time) (string, error) {
- if entryID == "" || entryID == "*" {
- entryID = s.generateID(now)
- }
-
- entryID, err := formatStreamID(entryID)
- if err != nil {
- return "", err
- }
- if entryID == "0-0" {
- return "", errors.New(msgStreamIDZero)
- }
- if streamCmp(s.lastID(), entryID) != -1 {
- return "", errors.New(msgStreamIDTooSmall)
- }
-
- s.entries = append(s.entries, StreamEntry{
- ID: entryID,
- Values: values,
- })
- return entryID, nil
-}
-
-func (s *streamKey) trim(n int) {
- if len(s.entries) > n {
- s.entries = s.entries[len(s.entries)-n:]
- }
-}
-
-// all entries after "id"
-func (s *streamKey) after(id string) []StreamEntry {
- pos := sort.Search(len(s.entries), func(i int) bool {
- return streamCmp(id, s.entries[i].ID) < 0
- })
- return s.entries[pos:]
-}
-
-// get a stream entry by ID
-// Also returns the position in the entries slice, if found.
-func (s *streamKey) get(id string) (int, *StreamEntry) {
- pos := sort.Search(len(s.entries), func(i int) bool {
- return streamCmp(id, s.entries[i].ID) <= 0
- })
- if len(s.entries) <= pos || s.entries[pos].ID != id {
- return 0, nil
- }
- return pos, &s.entries[pos]
-}
-
-func (g *streamGroup) readGroup(
- now time.Time,
- consumerID,
- id string,
- count int,
- noack bool,
-) []StreamEntry {
- if id == ">" {
- // undelivered messages
- msgs := g.stream.after(g.lastID)
- if len(msgs) == 0 {
- return nil
- }
-
- if count > 0 && len(msgs) > count {
- msgs = msgs[:count]
- }
-
- if !noack {
- shouldAppend := len(g.pending) == 0
- for _, msg := range msgs {
- if !shouldAppend {
- shouldAppend = streamCmp(msg.ID, g.pending[len(g.pending)-1].id) == 1
- }
-
- var entry *pendingEntry
- if shouldAppend {
- g.pending = append(g.pending, pendingEntry{})
- entry = &g.pending[len(g.pending)-1]
- } else {
- var pos int
- pos, entry = g.searchPending(msg.ID)
- if entry == nil {
- g.pending = append(g.pending[:pos+1], g.pending[pos:]...)
- entry = &g.pending[pos]
- } else {
- g.consumers[entry.consumer].numPendingEntries--
- }
- }
-
- *entry = pendingEntry{
- id: msg.ID,
- consumer: consumerID,
- deliveryCount: 1,
- lastDelivery: now,
- }
- }
- }
- if _, ok := g.consumers[consumerID]; !ok {
- g.consumers[consumerID] = &consumer{}
- }
- g.consumers[consumerID].numPendingEntries += len(msgs)
- g.lastID = msgs[len(msgs)-1].ID
- return msgs
- }
-
- // re-deliver messages from the pending list.
- // con := gr.consumers[consumerID]
- msgs := g.pendingAfter(id)
- var res []StreamEntry
- for i, p := range msgs {
- if p.consumer != consumerID {
- continue
- }
- _, entry := g.stream.get(p.id)
- // not found. Weird?
- if entry == nil {
- continue
- }
- p.deliveryCount += 1
- p.lastDelivery = now
- msgs[i] = p
- res = append(res, *entry)
- }
- return res
-}
-
-func (g *streamGroup) searchPending(id string) (int, *pendingEntry) {
- pos := sort.Search(len(g.pending), func(i int) bool {
- return streamCmp(id, g.pending[i].id) <= 0
- })
- if pos >= len(g.pending) || g.pending[pos].id != id {
- return pos, nil
- }
- return pos, &g.pending[pos]
-}
-
-func (g *streamGroup) ack(ids []string) (int, error) {
- count := 0
- for _, id := range ids {
- if _, err := parseStreamID(id); err != nil {
- return 0, errors.New(msgInvalidStreamID)
- }
-
- pos, entry := g.searchPending(id)
- if entry == nil {
- continue
- }
-
- consumer := g.consumers[entry.consumer]
- consumer.numPendingEntries--
-
- g.pending = append(g.pending[:pos], g.pending[pos+1:]...)
- count++
- }
- return count, nil
-}
-
-func (s *streamKey) delete(ids []string) (int, error) {
- count := 0
- for _, id := range ids {
- if _, err := parseStreamID(id); err != nil {
- return 0, errors.New(msgInvalidStreamID)
- }
-
- i, entry := s.get(id)
- if entry == nil {
- continue
- }
-
- s.entries = append(s.entries[:i], s.entries[i+1:]...)
- count++
- }
- return count, nil
-}
-
-func (g *streamGroup) pendingAfter(id string) []pendingEntry {
- pos := sort.Search(len(g.pending), func(i int) bool {
- return streamCmp(id, g.pending[i].id) < 0
- })
- return g.pending[pos:]
-}
-
-func (g *streamGroup) pendingCount(consumer string) int {
- n := 0
- for _, p := range g.pending {
- if p.consumer == consumer {
- n++
- }
- }
- return n
-}
-
-func (g *streamGroup) copy() *streamGroup {
- cns := map[string]*consumer{}
- for k, v := range g.consumers {
- c := *v
- cns[k] = &c
- }
- return &streamGroup{
- // don't copy stream
- lastID: g.lastID,
- pending: g.pending,
- consumers: cns,
- }
-}
diff --git a/vendor/github.com/asdine/storm/LICENSE b/vendor/github.com/asdine/storm/LICENSE
deleted file mode 100644
index bf86c69d..00000000
--- a/vendor/github.com/asdine/storm/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) [2017] [Asdine El Hrychy]
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
diff --git a/vendor/github.com/asdine/storm/codec/gob/gob.go b/vendor/github.com/asdine/storm/codec/gob/gob.go
deleted file mode 100644
index 56e44b9c..00000000
--- a/vendor/github.com/asdine/storm/codec/gob/gob.go
+++ /dev/null
@@ -1,35 +0,0 @@
-// Package gob contains a codec to encode and decode entities in Gob format
-package gob
-
-import (
- "bytes"
- "encoding/gob"
-)
-
-const name = "gob"
-
-// Codec serializing objects using the gob package.
-// See https://golang.org/pkg/encoding/gob/
-var Codec = new(gobCodec)
-
-type gobCodec int
-
-func (c gobCodec) Marshal(v interface{}) ([]byte, error) {
- var b bytes.Buffer
- enc := gob.NewEncoder(&b)
- err := enc.Encode(v)
- if err != nil {
- return nil, err
- }
- return b.Bytes(), nil
-}
-
-func (c gobCodec) Unmarshal(b []byte, v interface{}) error {
- r := bytes.NewReader(b)
- dec := gob.NewDecoder(r)
- return dec.Decode(v)
-}
-
-func (c gobCodec) Name() string {
- return name
-}
diff --git a/vendor/github.com/asdine/storm/v3/.gitignore b/vendor/github.com/asdine/storm/v3/.gitignore
deleted file mode 100644
index feec3026..00000000
--- a/vendor/github.com/asdine/storm/v3/.gitignore
+++ /dev/null
@@ -1,32 +0,0 @@
-# IDE
-.idea/
-.vscode/
-*.iml
-
-# Compiled Object files, Static and Dynamic libs (Shared Objects)
-*.o
-*.a
-*.so
-
-# Folders
-_obj
-_test
-
-# Architecture specific extensions/prefixes
-*.[568vq]
-[568vq].out
-
-*.cgo1.go
-*.cgo2.c
-_cgo_defun.c
-_cgo_gotypes.go
-_cgo_export.*
-
-_testmain.go
-
-*.exe
-*.test
-*.prof
-
-# Golang vendor folder
-/vendor/
diff --git a/vendor/github.com/asdine/storm/v3/.travis.yml b/vendor/github.com/asdine/storm/v3/.travis.yml
deleted file mode 100644
index 01a9bfdf..00000000
--- a/vendor/github.com/asdine/storm/v3/.travis.yml
+++ /dev/null
@@ -1,19 +0,0 @@
-language: go
-
-before_install:
- - go get github.com/stretchr/testify
-
-env: GO111MODULE=on
-
-go:
- - "1.13.x"
- - "1.14.x"
- - tip
-
-matrix:
- allow_failures:
- - go: tip
-
-script:
- - go mod vendor
- - go test -mod vendor -race -v ./...
diff --git a/vendor/github.com/asdine/storm/v3/LICENSE b/vendor/github.com/asdine/storm/v3/LICENSE
deleted file mode 100644
index bf86c69d..00000000
--- a/vendor/github.com/asdine/storm/v3/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) [2017] [Asdine El Hrychy]
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
diff --git a/vendor/github.com/asdine/storm/v3/README.md b/vendor/github.com/asdine/storm/v3/README.md
deleted file mode 100644
index d7a030c5..00000000
--- a/vendor/github.com/asdine/storm/v3/README.md
+++ /dev/null
@@ -1,643 +0,0 @@
-# Storm
-
-[![Build Status](https://travis-ci.org/asdine/storm.svg)](https://travis-ci.org/asdine/storm)
-[![GoDoc](https://godoc.org/github.com/asdine/storm?status.svg)](https://godoc.org/github.com/asdine/storm)
-
-Storm is a simple and powerful toolkit for [BoltDB](https://github.com/coreos/bbolt). Basically, Storm provides indexes, a wide range of methods to store and fetch data, an advanced query system, and much more.
-
-In addition to the examples below, see also the [examples in the GoDoc](https://godoc.org/github.com/asdine/storm#pkg-examples).
-
-_For extended queries and support for [Badger](https://github.com/dgraph-io/badger), see also [Genji](https://github.com/asdine/genji)_
-
-## Table of Contents
-
-- [Getting Started](#getting-started)
-- [Import Storm](#import-storm)
-- [Open a database](#open-a-database)
-- [Simple CRUD system](#simple-crud-system)
- - [Declare your structures](#declare-your-structures)
- - [Save your object](#save-your-object)
- - [Auto Increment](#auto-increment)
- - [Simple queries](#simple-queries)
- - [Fetch one object](#fetch-one-object)
- - [Fetch multiple objects](#fetch-multiple-objects)
- - [Fetch all objects](#fetch-all-objects)
- - [Fetch all objects sorted by index](#fetch-all-objects-sorted-by-index)
- - [Fetch a range of objects](#fetch-a-range-of-objects)
- - [Fetch objects by prefix](#fetch-objects-by-prefix)
- - [Skip, Limit and Reverse](#skip-limit-and-reverse)
- - [Delete an object](#delete-an-object)
- - [Update an object](#update-an-object)
- - [Initialize buckets and indexes before saving an object](#initialize-buckets-and-indexes-before-saving-an-object)
- - [Drop a bucket](#drop-a-bucket)
- - [Re-index a bucket](#re-index-a-bucket)
- - [Advanced queries](#advanced-queries)
- - [Transactions](#transactions)
- - [Options](#options)
- - [BoltOptions](#boltoptions)
- - [MarshalUnmarshaler](#marshalunmarshaler)
- - [Provided Codecs](#provided-codecs)
- - [Use existing Bolt connection](#use-existing-bolt-connection)
- - [Batch mode](#batch-mode)
-- [Nodes and nested buckets](#nodes-and-nested-buckets)
- - [Node options](#node-options)
-- [Simple Key/Value store](#simple-keyvalue-store)
-- [BoltDB](#boltdb)
-- [License](#license)
-- [Credits](#credits)
-
-## Getting Started
-
-```bash
-GO111MODULE=on go get -u github.com/asdine/storm/v3
-```
-
-## Import Storm
-
-```go
-import "github.com/asdine/storm/v3"
-```
-
-## Open a database
-
-Quick way of opening a database
-
-```go
-db, err := storm.Open("my.db")
-
-defer db.Close()
-```
-
-`Open` can receive multiple options to customize the way it behaves. See [Options](#options) below
-
-## Simple CRUD system
-
-### Declare your structures
-
-```go
-type User struct {
- ID int // primary key
- Group string `storm:"index"` // this field will be indexed
- Email string `storm:"unique"` // this field will be indexed with a unique constraint
- Name string // this field will not be indexed
- Age int `storm:"index"`
-}
-```
-
-The primary key can be of any type as long as it is not a zero value. Storm will search for the tag `id`, if not present Storm will search for a field named `ID`.
-
-```go
-type User struct {
- ThePrimaryKey string `storm:"id"`// primary key
- Group string `storm:"index"` // this field will be indexed
- Email string `storm:"unique"` // this field will be indexed with a unique constraint
- Name string // this field will not be indexed
-}
-```
-
-Storm handles tags in nested structures with the `inline` tag
-
-```go
-type Base struct {
- Ident bson.ObjectId `storm:"id"`
-}
-
-type User struct {
- Base `storm:"inline"`
- Group string `storm:"index"`
- Email string `storm:"unique"`
- Name string
- CreatedAt time.Time `storm:"index"`
-}
-```
-
-### Save your object
-
-```go
-user := User{
- ID: 10,
- Group: "staff",
- Email: "john@provider.com",
- Name: "John",
- Age: 21,
- CreatedAt: time.Now(),
-}
-
-err := db.Save(&user)
-// err == nil
-
-user.ID++
-err = db.Save(&user)
-// err == storm.ErrAlreadyExists
-```
-
-That's it.
-
-`Save` creates or updates all the required indexes and buckets, checks the unique constraints and saves the object to the store.
-
-#### Auto Increment
-
-Storm can auto increment integer values so you don't have to worry about that when saving your objects. Also, the new value is automatically inserted in your field.
-
-```go
-
-type Product struct {
- Pk int `storm:"id,increment"` // primary key with auto increment
- Name string
- IntegerField uint64 `storm:"increment"`
- IndexedIntegerField uint32 `storm:"index,increment"`
- UniqueIntegerField int16 `storm:"unique,increment=100"` // the starting value can be set
-}
-
-p := Product{Name: "Vaccum Cleaner"}
-
-fmt.Println(p.Pk)
-fmt.Println(p.IntegerField)
-fmt.Println(p.IndexedIntegerField)
-fmt.Println(p.UniqueIntegerField)
-// 0
-// 0
-// 0
-// 0
-
-_ = db.Save(&p)
-
-fmt.Println(p.Pk)
-fmt.Println(p.IntegerField)
-fmt.Println(p.IndexedIntegerField)
-fmt.Println(p.UniqueIntegerField)
-// 1
-// 1
-// 1
-// 100
-
-```
-
-### Simple queries
-
-Any object can be fetched, indexed or not. Storm uses indexes when available, otherwise it uses the [query system](#advanced-queries).
-
-#### Fetch one object
-
-```go
-var user User
-err := db.One("Email", "john@provider.com", &user)
-// err == nil
-
-err = db.One("Name", "John", &user)
-// err == nil
-
-err = db.One("Name", "Jack", &user)
-// err == storm.ErrNotFound
-```
-
-#### Fetch multiple objects
-
-```go
-var users []User
-err := db.Find("Group", "staff", &users)
-```
-
-#### Fetch all objects
-
-```go
-var users []User
-err := db.All(&users)
-```
-
-#### Fetch all objects sorted by index
-
-```go
-var users []User
-err := db.AllByIndex("CreatedAt", &users)
-```
-
-#### Fetch a range of objects
-
-```go
-var users []User
-err := db.Range("Age", 10, 21, &users)
-```
-
-#### Fetch objects by prefix
-
-```go
-var users []User
-err := db.Prefix("Name", "Jo", &users)
-```
-
-#### Skip, Limit and Reverse
-
-```go
-var users []User
-err := db.Find("Group", "staff", &users, storm.Skip(10))
-err = db.Find("Group", "staff", &users, storm.Limit(10))
-err = db.Find("Group", "staff", &users, storm.Reverse())
-err = db.Find("Group", "staff", &users, storm.Limit(10), storm.Skip(10), storm.Reverse())
-
-err = db.All(&users, storm.Limit(10), storm.Skip(10), storm.Reverse())
-err = db.AllByIndex("CreatedAt", &users, storm.Limit(10), storm.Skip(10), storm.Reverse())
-err = db.Range("Age", 10, 21, &users, storm.Limit(10), storm.Skip(10), storm.Reverse())
-```
-
-#### Delete an object
-
-```go
-err := db.DeleteStruct(&user)
-```
-
-#### Update an object
-
-```go
-// Update multiple fields
-err := db.Update(&User{ID: 10, Name: "Jack", Age: 45})
-
-// Update a single field
-err := db.UpdateField(&User{ID: 10}, "Age", 0)
-```
-
-#### Initialize buckets and indexes before saving an object
-
-```go
-err := db.Init(&User{})
-```
-
-Useful when starting your application
-
-#### Drop a bucket
-
-Using the struct
-
-```go
-err := db.Drop(&User)
-```
-
-Using the bucket name
-
-```go
-err := db.Drop("User")
-```
-
-#### Re-index a bucket
-
-```go
-err := db.ReIndex(&User{})
-```
-
-Useful when the structure has changed
-
-### Advanced queries
-
-For more complex queries, you can use the `Select` method.
-`Select` takes any number of [`Matcher`](https://godoc.org/github.com/asdine/storm/q#Matcher) from the [`q`](https://godoc.org/github.com/asdine/storm/q) package.
-
-Here are some common Matchers:
-
-```go
-// Equality
-q.Eq("Name", John)
-
-// Strictly greater than
-q.Gt("Age", 7)
-
-// Lesser than or equal to
-q.Lte("Age", 77)
-
-// Regex with name that starts with the letter D
-q.Re("Name", "^D")
-
-// In the given slice of values
-q.In("Group", []string{"Staff", "Admin"})
-
-// Comparing fields
-q.EqF("FieldName", "SecondFieldName")
-q.LtF("FieldName", "SecondFieldName")
-q.GtF("FieldName", "SecondFieldName")
-q.LteF("FieldName", "SecondFieldName")
-q.GteF("FieldName", "SecondFieldName")
-```
-
-Matchers can also be combined with `And`, `Or` and `Not`:
-
-```go
-
-// Match if all match
-q.And(
- q.Gt("Age", 7),
- q.Re("Name", "^D")
-)
-
-// Match if one matches
-q.Or(
- q.Re("Name", "^A"),
- q.Not(
- q.Re("Name", "^B")
- ),
- q.Re("Name", "^C"),
- q.In("Group", []string{"Staff", "Admin"}),
- q.And(
- q.StrictEq("Password", []byte(password)),
- q.Eq("Registered", true)
- )
-)
-```
-
-You can find the complete list in the [documentation](https://godoc.org/github.com/asdine/storm/q#Matcher).
-
-`Select` takes any number of matchers and wraps them into a `q.And()` so it's not necessary to specify it. It returns a [`Query`](https://godoc.org/github.com/asdine/storm#Query) type.
-
-```go
-query := db.Select(q.Gte("Age", 7), q.Lte("Age", 77))
-```
-
-The `Query` type contains methods to filter and order the records.
-
-```go
-// Limit
-query = query.Limit(10)
-
-// Skip
-query = query.Skip(20)
-
-// Calls can also be chained
-query = query.Limit(10).Skip(20).OrderBy("Age").Reverse()
-```
-
-But also to specify how to fetch them.
-
-```go
-var users []User
-err = query.Find(&users)
-
-var user User
-err = query.First(&user)
-```
-
-Examples with `Select`:
-
-```go
-// Find all users with an ID between 10 and 100
-err = db.Select(q.Gte("ID", 10), q.Lte("ID", 100)).Find(&users)
-
-// Nested matchers
-err = db.Select(q.Or(
- q.Gt("ID", 50),
- q.Lt("Age", 21),
- q.And(
- q.Eq("Group", "admin"),
- q.Gte("Age", 21),
- ),
-)).Find(&users)
-
-query := db.Select(q.Gte("ID", 10), q.Lte("ID", 100)).Limit(10).Skip(5).Reverse().OrderBy("Age", "Name")
-
-// Find multiple records
-err = query.Find(&users)
-// or
-err = db.Select(q.Gte("ID", 10), q.Lte("ID", 100)).Limit(10).Skip(5).Reverse().OrderBy("Age", "Name").Find(&users)
-
-// Find first record
-err = query.First(&user)
-// or
-err = db.Select(q.Gte("ID", 10), q.Lte("ID", 100)).Limit(10).Skip(5).Reverse().OrderBy("Age", "Name").First(&user)
-
-// Delete all matching records
-err = query.Delete(new(User))
-
-// Fetching records one by one (useful when the bucket contains a lot of records)
-query = db.Select(q.Gte("ID", 10),q.Lte("ID", 100)).OrderBy("Age", "Name")
-
-err = query.Each(new(User), func(record interface{}) error) {
- u := record.(*User)
- ...
- return nil
-}
-```
-
-See the [documentation](https://godoc.org/github.com/asdine/storm#Query) for a complete list of methods.
-
-### Transactions
-
-```go
-tx, err := db.Begin(true)
-if err != nil {
- return err
-}
-defer tx.Rollback()
-
-accountA.Amount -= 100
-accountB.Amount += 100
-
-err = tx.Save(accountA)
-if err != nil {
- return err
-}
-
-err = tx.Save(accountB)
-if err != nil {
- return err
-}
-
-return tx.Commit()
-```
-
-### Options
-
-Storm options are functions that can be passed when constructing you Storm instance. You can pass it any number of options.
-
-#### BoltOptions
-
-By default, Storm opens a database with the mode `0600` and a timeout of one second.
-You can change this behavior by using `BoltOptions`
-
-```go
-db, err := storm.Open("my.db", storm.BoltOptions(0600, &bolt.Options{Timeout: 1 * time.Second}))
-```
-
-#### MarshalUnmarshaler
-
-To store the data in BoltDB, Storm marshals it in JSON by default. If you wish to change this behavior you can pass a codec that implements [`codec.MarshalUnmarshaler`](https://godoc.org/github.com/asdine/storm/codec#MarshalUnmarshaler) via the [`storm.Codec`](https://godoc.org/github.com/asdine/storm#Codec) option:
-
-```go
-db := storm.Open("my.db", storm.Codec(myCodec))
-```
-
-##### Provided Codecs
-
-You can easily implement your own `MarshalUnmarshaler`, but Storm comes with built-in support for [JSON](https://godoc.org/github.com/asdine/storm/codec/json) (default), [GOB](https://godoc.org/github.com/asdine/storm/codec/gob), [Sereal](https://godoc.org/github.com/asdine/storm/codec/sereal), [Protocol Buffers](https://godoc.org/github.com/asdine/storm/codec/protobuf) and [MessagePack](https://godoc.org/github.com/asdine/storm/codec/msgpack).
-
-These can be used by importing the relevant package and use that codec to configure Storm. The example below shows all variants (without proper error handling):
-
-```go
-import (
- "github.com/asdine/storm/v3"
- "github.com/asdine/storm/v3/codec/gob"
- "github.com/asdine/storm/v3/codec/json"
- "github.com/asdine/storm/v3/codec/sereal"
- "github.com/asdine/storm/v3/codec/protobuf"
- "github.com/asdine/storm/v3/codec/msgpack"
-)
-
-var gobDb, _ = storm.Open("gob.db", storm.Codec(gob.Codec))
-var jsonDb, _ = storm.Open("json.db", storm.Codec(json.Codec))
-var serealDb, _ = storm.Open("sereal.db", storm.Codec(sereal.Codec))
-var protobufDb, _ = storm.Open("protobuf.db", storm.Codec(protobuf.Codec))
-var msgpackDb, _ = storm.Open("msgpack.db", storm.Codec(msgpack.Codec))
-```
-
-**Tip**: Adding Storm tags to generated Protobuf files can be tricky. A good solution is to use [this tool](https://github.com/favadi/protoc-go-inject-tag) to inject the tags during the compilation.
-
-#### Use existing Bolt connection
-
-You can use an existing connection and pass it to Storm
-
-```go
-bDB, _ := bolt.Open(filepath.Join(dir, "bolt.db"), 0600, &bolt.Options{Timeout: 10 * time.Second})
-db := storm.Open("my.db", storm.UseDB(bDB))
-```
-
-#### Batch mode
-
-Batch mode can be enabled to speed up concurrent writes (see [Batch read-write transactions](https://github.com/coreos/bbolt#batch-read-write-transactions))
-
-```go
-db := storm.Open("my.db", storm.Batch())
-```
-
-## Nodes and nested buckets
-
-Storm takes advantage of BoltDB nested buckets feature by using `storm.Node`.
-A `storm.Node` is the underlying object used by `storm.DB` to manipulate a bucket.
-To create a nested bucket and use the same API as `storm.DB`, you can use the `DB.From` method.
-
-```go
-repo := db.From("repo")
-
-err := repo.Save(&Issue{
- Title: "I want more features",
- Author: user.ID,
-})
-
-err = repo.Save(newRelease("0.10"))
-
-var issues []Issue
-err = repo.Find("Author", user.ID, &issues)
-
-var release Release
-err = repo.One("Tag", "0.10", &release)
-```
-
-You can also chain the nodes to create a hierarchy
-
-```go
-chars := db.From("characters")
-heroes := chars.From("heroes")
-enemies := chars.From("enemies")
-
-items := db.From("items")
-potions := items.From("consumables").From("medicine").From("potions")
-```
-
-You can even pass the entire hierarchy as arguments to `From`:
-
-```go
-privateNotes := db.From("notes", "private")
-workNotes := db.From("notes", "work")
-```
-
-### Node options
-
-A Node can also be configured. Activating an option on a Node creates a copy, so a Node is always thread-safe.
-
-```go
-n := db.From("my-node")
-```
-
-Give a bolt.Tx transaction to the Node
-
-```go
-n = n.WithTransaction(tx)
-```
-
-Enable batch mode
-
-```go
-n = n.WithBatch(true)
-```
-
-Use a Codec
-
-```go
-n = n.WithCodec(gob.Codec)
-```
-
-## Simple Key/Value store
-
-Storm can be used as a simple, robust, key/value store that can store anything.
-The key and the value can be of any type as long as the key is not a zero value.
-
-Saving data :
-
-```go
-db.Set("logs", time.Now(), "I'm eating my breakfast man")
-db.Set("sessions", bson.NewObjectId(), &someUser)
-db.Set("weird storage", "754-3010", map[string]interface{}{
- "hair": "blonde",
- "likes": []string{"cheese", "star wars"},
-})
-```
-
-Fetching data :
-
-```go
-user := User{}
-db.Get("sessions", someObjectId, &user)
-
-var details map[string]interface{}
-db.Get("weird storage", "754-3010", &details)
-
-db.Get("sessions", someObjectId, &details)
-```
-
-Deleting data :
-
-```go
-db.Delete("sessions", someObjectId)
-db.Delete("weird storage", "754-3010")
-```
-
-You can find other useful methods in the [documentation](https://godoc.org/github.com/asdine/storm#KeyValueStore).
-
-## BoltDB
-
-BoltDB is still easily accessible and can be used as usual
-
-```go
-db.Bolt.View(func(tx *bolt.Tx) error {
- bucket := tx.Bucket([]byte("my bucket"))
- val := bucket.Get([]byte("any id"))
- fmt.Println(string(val))
- return nil
-})
-```
-
-A transaction can be also be passed to Storm
-
-```go
-db.Bolt.Update(func(tx *bolt.Tx) error {
- ...
- dbx := db.WithTransaction(tx)
- err = dbx.Save(&user)
- ...
- return nil
-})
-```
-
-## License
-
-MIT
-
-## Credits
-
-- [Asdine El Hrychy](https://github.com/asdine)
-- [Bjørn Erik Pedersen](https://github.com/bep)
diff --git a/vendor/github.com/asdine/storm/v3/bucket.go b/vendor/github.com/asdine/storm/v3/bucket.go
deleted file mode 100644
index e2ef170e..00000000
--- a/vendor/github.com/asdine/storm/v3/bucket.go
+++ /dev/null
@@ -1,47 +0,0 @@
-package storm
-
-import bolt "go.etcd.io/bbolt"
-
-// CreateBucketIfNotExists creates the bucket below the current node if it doesn't
-// already exist.
-func (n *node) CreateBucketIfNotExists(tx *bolt.Tx, bucket string) (*bolt.Bucket, error) {
- var b *bolt.Bucket
- var err error
-
- bucketNames := append(n.rootBucket, bucket)
-
- for _, bucketName := range bucketNames {
- if b != nil {
- if b, err = b.CreateBucketIfNotExists([]byte(bucketName)); err != nil {
- return nil, err
- }
-
- } else {
- if b, err = tx.CreateBucketIfNotExists([]byte(bucketName)); err != nil {
- return nil, err
- }
- }
- }
-
- return b, nil
-}
-
-// GetBucket returns the given bucket below the current node.
-func (n *node) GetBucket(tx *bolt.Tx, children ...string) *bolt.Bucket {
- var b *bolt.Bucket
-
- bucketNames := append(n.rootBucket, children...)
- for _, bucketName := range bucketNames {
- if b != nil {
- if b = b.Bucket([]byte(bucketName)); b == nil {
- return nil
- }
- } else {
- if b = tx.Bucket([]byte(bucketName)); b == nil {
- return nil
- }
- }
- }
-
- return b
-}
diff --git a/vendor/github.com/asdine/storm/v3/codec/.gitignore b/vendor/github.com/asdine/storm/v3/codec/.gitignore
deleted file mode 100644
index 3997bead..00000000
--- a/vendor/github.com/asdine/storm/v3/codec/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-*.db
\ No newline at end of file
diff --git a/vendor/github.com/asdine/storm/v3/codec/codec.go b/vendor/github.com/asdine/storm/v3/codec/codec.go
deleted file mode 100644
index b157379c..00000000
--- a/vendor/github.com/asdine/storm/v3/codec/codec.go
+++ /dev/null
@@ -1,11 +0,0 @@
-// Package codec contains sub-packages with different codecs that can be used
-// to encode and decode entities in Storm.
-package codec
-
-// MarshalUnmarshaler represents a codec used to marshal and unmarshal entities.
-type MarshalUnmarshaler interface {
- Marshal(v interface{}) ([]byte, error)
- Unmarshal(b []byte, v interface{}) error
- // name of this codec
- Name() string
-}
diff --git a/vendor/github.com/asdine/storm/v3/codec/json/json.go b/vendor/github.com/asdine/storm/v3/codec/json/json.go
deleted file mode 100644
index 56b0b466..00000000
--- a/vendor/github.com/asdine/storm/v3/codec/json/json.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// Package json contains a codec to encode and decode entities in JSON format
-package json
-
-import (
- "encoding/json"
-)
-
-const name = "json"
-
-// Codec that encodes to and decodes from JSON.
-var Codec = new(jsonCodec)
-
-type jsonCodec int
-
-func (j jsonCodec) Marshal(v interface{}) ([]byte, error) {
- return json.Marshal(v)
-}
-
-func (j jsonCodec) Unmarshal(b []byte, v interface{}) error {
- return json.Unmarshal(b, v)
-}
-
-func (j jsonCodec) Name() string {
- return name
-}
diff --git a/vendor/github.com/asdine/storm/v3/errors.go b/vendor/github.com/asdine/storm/v3/errors.go
deleted file mode 100644
index ef526561..00000000
--- a/vendor/github.com/asdine/storm/v3/errors.go
+++ /dev/null
@@ -1,51 +0,0 @@
-package storm
-
-import "errors"
-
-// Errors
-var (
- // ErrNoID is returned when no ID field or id tag is found in the struct.
- ErrNoID = errors.New("missing struct tag id or ID field")
-
- // ErrZeroID is returned when the ID field is a zero value.
- ErrZeroID = errors.New("id field must not be a zero value")
-
- // ErrBadType is returned when a method receives an unexpected value type.
- ErrBadType = errors.New("provided data must be a struct or a pointer to struct")
-
- // ErrAlreadyExists is returned uses when trying to set an existing value on a field that has a unique index.
- ErrAlreadyExists = errors.New("already exists")
-
- // ErrNilParam is returned when the specified param is expected to be not nil.
- ErrNilParam = errors.New("param must not be nil")
-
- // ErrUnknownTag is returned when an unexpected tag is specified.
- ErrUnknownTag = errors.New("unknown tag")
-
- // ErrIdxNotFound is returned when the specified index is not found.
- ErrIdxNotFound = errors.New("index not found")
-
- // ErrSlicePtrNeeded is returned when an unexpected value is given, instead of a pointer to slice.
- ErrSlicePtrNeeded = errors.New("provided target must be a pointer to slice")
-
- // ErrStructPtrNeeded is returned when an unexpected value is given, instead of a pointer to struct.
- ErrStructPtrNeeded = errors.New("provided target must be a pointer to struct")
-
- // ErrPtrNeeded is returned when an unexpected value is given, instead of a pointer.
- ErrPtrNeeded = errors.New("provided target must be a pointer to a valid variable")
-
- // ErrNoName is returned when the specified struct has no name.
- ErrNoName = errors.New("provided target must have a name")
-
- // ErrNotFound is returned when the specified record is not saved in the bucket.
- ErrNotFound = errors.New("not found")
-
- // ErrNotInTransaction is returned when trying to rollback or commit when not in transaction.
- ErrNotInTransaction = errors.New("not in transaction")
-
- // ErrIncompatibleValue is returned when trying to set a value with a different type than the chosen field
- ErrIncompatibleValue = errors.New("incompatible value")
-
- // ErrDifferentCodec is returned when using a codec different than the first codec used with the bucket.
- ErrDifferentCodec = errors.New("the selected codec is incompatible with this bucket")
-)
diff --git a/vendor/github.com/asdine/storm/v3/extract.go b/vendor/github.com/asdine/storm/v3/extract.go
deleted file mode 100644
index f601050b..00000000
--- a/vendor/github.com/asdine/storm/v3/extract.go
+++ /dev/null
@@ -1,226 +0,0 @@
-package storm
-
-import (
- "fmt"
- "reflect"
- "strconv"
- "strings"
-
- "github.com/asdine/storm/v3/index"
- bolt "go.etcd.io/bbolt"
-)
-
-// Storm tags
-const (
- tagID = "id"
- tagIdx = "index"
- tagUniqueIdx = "unique"
- tagInline = "inline"
- tagIncrement = "increment"
- indexPrefix = "__storm_index_"
-)
-
-type fieldConfig struct {
- Name string
- Index string
- IsZero bool
- IsID bool
- Increment bool
- IncrementStart int64
- IsInteger bool
- Value *reflect.Value
- ForceUpdate bool
-}
-
-// structConfig is a structure gathering all the relevant informations about a model
-type structConfig struct {
- Name string
- Fields map[string]*fieldConfig
- ID *fieldConfig
-}
-
-func extract(s *reflect.Value, mi ...*structConfig) (*structConfig, error) {
- if s.Kind() == reflect.Ptr {
- e := s.Elem()
- s = &e
- }
- if s.Kind() != reflect.Struct {
- return nil, ErrBadType
- }
-
- typ := s.Type()
-
- var child bool
-
- var m *structConfig
- if len(mi) > 0 {
- m = mi[0]
- child = true
- } else {
- m = &structConfig{}
- m.Fields = make(map[string]*fieldConfig)
- }
-
- if m.Name == "" {
- m.Name = typ.Name()
- }
-
- numFields := s.NumField()
- for i := 0; i < numFields; i++ {
- field := typ.Field(i)
- value := s.Field(i)
-
- if field.PkgPath != "" {
- continue
- }
-
- err := extractField(&value, &field, m, child)
- if err != nil {
- return nil, err
- }
- }
-
- if child {
- return m, nil
- }
-
- if m.ID == nil {
- return nil, ErrNoID
- }
-
- if m.Name == "" {
- return nil, ErrNoName
- }
-
- return m, nil
-}
-
-func extractField(value *reflect.Value, field *reflect.StructField, m *structConfig, isChild bool) error {
- var f *fieldConfig
- var err error
-
- tag := field.Tag.Get("storm")
- if tag != "" {
- f = &fieldConfig{
- Name: field.Name,
- IsZero: isZero(value),
- IsInteger: isInteger(value),
- Value: value,
- IncrementStart: 1,
- }
-
- tags := strings.Split(tag, ",")
-
- for _, tag := range tags {
- switch tag {
- case "id":
- f.IsID = true
- f.Index = tagUniqueIdx
- case tagUniqueIdx, tagIdx:
- f.Index = tag
- case tagInline:
- if value.Kind() == reflect.Ptr {
- e := value.Elem()
- value = &e
- }
- if value.Kind() == reflect.Struct {
- a := value.Addr()
- _, err := extract(&a, m)
- if err != nil {
- return err
- }
- }
- // we don't need to save this field
- return nil
- default:
- if strings.HasPrefix(tag, tagIncrement) {
- f.Increment = true
- parts := strings.Split(tag, "=")
- if parts[0] != tagIncrement {
- return ErrUnknownTag
- }
- if len(parts) > 1 {
- f.IncrementStart, err = strconv.ParseInt(parts[1], 0, 64)
- if err != nil {
- return err
- }
- }
- } else {
- return ErrUnknownTag
- }
- }
- }
-
- if _, ok := m.Fields[f.Name]; !ok || !isChild {
- m.Fields[f.Name] = f
- }
- }
-
- if m.ID == nil && f != nil && f.IsID {
- m.ID = f
- }
-
- // the field is named ID and no ID field has been detected before
- if m.ID == nil && field.Name == "ID" {
- if f == nil {
- f = &fieldConfig{
- Index: tagUniqueIdx,
- Name: field.Name,
- IsZero: isZero(value),
- IsInteger: isInteger(value),
- IsID: true,
- Value: value,
- IncrementStart: 1,
- }
- m.Fields[field.Name] = f
- }
- m.ID = f
- }
-
- return nil
-}
-
-func extractSingleField(ref *reflect.Value, fieldName string) (*structConfig, error) {
- var cfg structConfig
- cfg.Fields = make(map[string]*fieldConfig)
-
- f, ok := ref.Type().FieldByName(fieldName)
- if !ok || f.PkgPath != "" {
- return nil, fmt.Errorf("field %s not found", fieldName)
- }
-
- v := ref.FieldByName(fieldName)
- err := extractField(&v, &f, &cfg, false)
- if err != nil {
- return nil, err
- }
-
- return &cfg, nil
-}
-
-func getIndex(bucket *bolt.Bucket, idxKind string, fieldName string) (index.Index, error) {
- var idx index.Index
- var err error
-
- switch idxKind {
- case tagUniqueIdx:
- idx, err = index.NewUniqueIndex(bucket, []byte(indexPrefix+fieldName))
- case tagIdx:
- idx, err = index.NewListIndex(bucket, []byte(indexPrefix+fieldName))
- default:
- err = ErrIdxNotFound
- }
-
- return idx, err
-}
-
-func isZero(v *reflect.Value) bool {
- zero := reflect.Zero(v.Type()).Interface()
- current := v.Interface()
- return reflect.DeepEqual(current, zero)
-}
-
-func isInteger(v *reflect.Value) bool {
- kind := v.Kind()
- return v != nil && kind >= reflect.Int && kind <= reflect.Uint64
-}
diff --git a/vendor/github.com/asdine/storm/v3/finder.go b/vendor/github.com/asdine/storm/v3/finder.go
deleted file mode 100644
index 81628629..00000000
--- a/vendor/github.com/asdine/storm/v3/finder.go
+++ /dev/null
@@ -1,499 +0,0 @@
-package storm
-
-import (
- "fmt"
- "reflect"
-
- "github.com/asdine/storm/v3/index"
- "github.com/asdine/storm/v3/q"
- bolt "go.etcd.io/bbolt"
-)
-
-// A Finder can fetch types from BoltDB.
-type Finder interface {
- // One returns one record by the specified index
- One(fieldName string, value interface{}, to interface{}) error
-
- // Find returns one or more records by the specified index
- Find(fieldName string, value interface{}, to interface{}, options ...func(q *index.Options)) error
-
- // AllByIndex gets all the records of a bucket that are indexed in the specified index
- AllByIndex(fieldName string, to interface{}, options ...func(*index.Options)) error
-
- // All gets all the records of a bucket.
- // If there are no records it returns no error and the 'to' parameter is set to an empty slice.
- All(to interface{}, options ...func(*index.Options)) error
-
- // Select a list of records that match a list of matchers. Doesn't use indexes.
- Select(matchers ...q.Matcher) Query
-
- // Range returns one or more records by the specified index within the specified range
- Range(fieldName string, min, max, to interface{}, options ...func(*index.Options)) error
-
- // Prefix returns one or more records whose given field starts with the specified prefix.
- Prefix(fieldName string, prefix string, to interface{}, options ...func(*index.Options)) error
-
- // Count counts all the records of a bucket
- Count(data interface{}) (int, error)
-}
-
-// One returns one record by the specified index
-func (n *node) One(fieldName string, value interface{}, to interface{}) error {
- sink, err := newFirstSink(n, to)
- if err != nil {
- return err
- }
-
- bucketName := sink.bucketName()
- if bucketName == "" {
- return ErrNoName
- }
-
- if fieldName == "" {
- return ErrNotFound
- }
-
- ref := reflect.Indirect(sink.ref)
- cfg, err := extractSingleField(&ref, fieldName)
- if err != nil {
- return err
- }
-
- field, ok := cfg.Fields[fieldName]
- if !ok || (!field.IsID && field.Index == "") {
- query := newQuery(n, q.StrictEq(fieldName, value))
- query.Limit(1)
-
- if n.tx != nil {
- err = query.query(n.tx, sink)
- } else {
- err = n.s.Bolt.View(func(tx *bolt.Tx) error {
- return query.query(tx, sink)
- })
- }
-
- if err != nil {
- return err
- }
-
- return sink.flush()
- }
-
- val, err := toBytes(value, n.codec)
- if err != nil {
- return err
- }
-
- return n.readTx(func(tx *bolt.Tx) error {
- return n.one(tx, bucketName, fieldName, cfg, to, val, field.IsID)
- })
-}
-
-func (n *node) one(tx *bolt.Tx, bucketName, fieldName string, cfg *structConfig, to interface{}, val []byte, skipIndex bool) error {
- bucket := n.GetBucket(tx, bucketName)
- if bucket == nil {
- return ErrNotFound
- }
-
- var id []byte
- if !skipIndex {
- idx, err := getIndex(bucket, cfg.Fields[fieldName].Index, fieldName)
- if err != nil {
- if err == index.ErrNotFound {
- return ErrNotFound
- }
- return err
- }
-
- id = idx.Get(val)
- } else {
- id = val
- }
-
- if id == nil {
- return ErrNotFound
- }
-
- raw := bucket.Get(id)
- if raw == nil {
- return ErrNotFound
- }
-
- return n.codec.Unmarshal(raw, to)
-}
-
-// Find returns one or more records by the specified index
-func (n *node) Find(fieldName string, value interface{}, to interface{}, options ...func(q *index.Options)) error {
- sink, err := newListSink(n, to)
- if err != nil {
- return err
- }
- bucketName := sink.bucketName()
- if bucketName == "" {
- return ErrNoName
- }
-
- ref := reflect.Indirect(reflect.New(sink.elemType))
- cfg, err := extractSingleField(&ref, fieldName)
- if err != nil {
- return err
- }
-
- opts := index.NewOptions()
- for _, fn := range options {
- fn(opts)
- }
-
- field, ok := cfg.Fields[fieldName]
- if !ok || (!field.IsID && (field.Index == "" || value == nil)) {
- query := newQuery(n, q.Eq(fieldName, value))
- query.Skip(opts.Skip).Limit(opts.Limit)
-
- if opts.Reverse {
- query.Reverse()
- }
-
- err = n.readTx(func(tx *bolt.Tx) error {
- return query.query(tx, sink)
- })
-
- if err != nil {
- return err
- }
-
- return sink.flush()
- }
-
- val, err := toBytes(value, n.codec)
- if err != nil {
- return err
- }
-
- return n.readTx(func(tx *bolt.Tx) error {
- return n.find(tx, bucketName, fieldName, cfg, sink, val, opts)
- })
-}
-
-func (n *node) find(tx *bolt.Tx, bucketName, fieldName string, cfg *structConfig, sink *listSink, val []byte, opts *index.Options) error {
- bucket := n.GetBucket(tx, bucketName)
- if bucket == nil {
- return ErrNotFound
- }
- idx, err := getIndex(bucket, cfg.Fields[fieldName].Index, fieldName)
- if err != nil {
- return err
- }
-
- list, err := idx.All(val, opts)
- if err != nil {
- if err == index.ErrNotFound {
- return ErrNotFound
- }
- return err
- }
-
- sink.results = reflect.MakeSlice(reflect.Indirect(sink.ref).Type(), len(list), len(list))
-
- sorter := newSorter(n, sink)
- for i := range list {
- raw := bucket.Get(list[i])
- if raw == nil {
- return ErrNotFound
- }
-
- if _, err := sorter.filter(nil, bucket, list[i], raw); err != nil {
- return err
- }
- }
-
- return sorter.flush()
-}
-
-// AllByIndex gets all the records of a bucket that are indexed in the specified index
-func (n *node) AllByIndex(fieldName string, to interface{}, options ...func(*index.Options)) error {
- if fieldName == "" {
- return n.All(to, options...)
- }
-
- ref := reflect.ValueOf(to)
-
- if ref.Kind() != reflect.Ptr || ref.Elem().Kind() != reflect.Slice {
- return ErrSlicePtrNeeded
- }
-
- typ := reflect.Indirect(ref).Type().Elem()
-
- if typ.Kind() == reflect.Ptr {
- typ = typ.Elem()
- }
-
- newElem := reflect.New(typ)
-
- cfg, err := extract(&newElem)
- if err != nil {
- return err
- }
-
- if cfg.ID.Name == fieldName {
- return n.All(to, options...)
- }
-
- opts := index.NewOptions()
- for _, fn := range options {
- fn(opts)
- }
-
- return n.readTx(func(tx *bolt.Tx) error {
- return n.allByIndex(tx, fieldName, cfg, &ref, opts)
- })
-}
-
-func (n *node) allByIndex(tx *bolt.Tx, fieldName string, cfg *structConfig, ref *reflect.Value, opts *index.Options) error {
- bucket := n.GetBucket(tx, cfg.Name)
- if bucket == nil {
- return ErrNotFound
- }
-
- fieldCfg, ok := cfg.Fields[fieldName]
- if !ok {
- return ErrNotFound
- }
-
- idx, err := getIndex(bucket, fieldCfg.Index, fieldName)
- if err != nil {
- return err
- }
-
- list, err := idx.AllRecords(opts)
- if err != nil {
- if err == index.ErrNotFound {
- return ErrNotFound
- }
- return err
- }
-
- results := reflect.MakeSlice(reflect.Indirect(*ref).Type(), len(list), len(list))
-
- for i := range list {
- raw := bucket.Get(list[i])
- if raw == nil {
- return ErrNotFound
- }
-
- err = n.codec.Unmarshal(raw, results.Index(i).Addr().Interface())
- if err != nil {
- return err
- }
- }
-
- reflect.Indirect(*ref).Set(results)
- return nil
-}
-
-// All gets all the records of a bucket.
-// If there are no records it returns no error and the 'to' parameter is set to an empty slice.
-func (n *node) All(to interface{}, options ...func(*index.Options)) error {
- opts := index.NewOptions()
- for _, fn := range options {
- fn(opts)
- }
-
- query := newQuery(n, nil).Limit(opts.Limit).Skip(opts.Skip)
- if opts.Reverse {
- query.Reverse()
- }
-
- err := query.Find(to)
- if err != nil && err != ErrNotFound {
- return err
- }
-
- if err == ErrNotFound {
- ref := reflect.ValueOf(to)
- results := reflect.MakeSlice(reflect.Indirect(ref).Type(), 0, 0)
- reflect.Indirect(ref).Set(results)
- }
- return nil
-}
-
-// Range returns one or more records by the specified index within the specified range
-func (n *node) Range(fieldName string, min, max, to interface{}, options ...func(*index.Options)) error {
- sink, err := newListSink(n, to)
- if err != nil {
- return err
- }
-
- bucketName := sink.bucketName()
- if bucketName == "" {
- return ErrNoName
- }
-
- ref := reflect.Indirect(reflect.New(sink.elemType))
- cfg, err := extractSingleField(&ref, fieldName)
- if err != nil {
- return err
- }
-
- opts := index.NewOptions()
- for _, fn := range options {
- fn(opts)
- }
-
- field, ok := cfg.Fields[fieldName]
- if !ok || (!field.IsID && field.Index == "") {
- query := newQuery(n, q.And(q.Gte(fieldName, min), q.Lte(fieldName, max)))
- query.Skip(opts.Skip).Limit(opts.Limit)
-
- if opts.Reverse {
- query.Reverse()
- }
-
- err = n.readTx(func(tx *bolt.Tx) error {
- return query.query(tx, sink)
- })
-
- if err != nil {
- return err
- }
-
- return sink.flush()
- }
-
- mn, err := toBytes(min, n.codec)
- if err != nil {
- return err
- }
-
- mx, err := toBytes(max, n.codec)
- if err != nil {
- return err
- }
-
- return n.readTx(func(tx *bolt.Tx) error {
- return n.rnge(tx, bucketName, fieldName, cfg, sink, mn, mx, opts)
- })
-}
-
-func (n *node) rnge(tx *bolt.Tx, bucketName, fieldName string, cfg *structConfig, sink *listSink, min, max []byte, opts *index.Options) error {
- bucket := n.GetBucket(tx, bucketName)
- if bucket == nil {
- reflect.Indirect(sink.ref).SetLen(0)
- return nil
- }
-
- idx, err := getIndex(bucket, cfg.Fields[fieldName].Index, fieldName)
- if err != nil {
- return err
- }
-
- list, err := idx.Range(min, max, opts)
- if err != nil {
- return err
- }
-
- sink.results = reflect.MakeSlice(reflect.Indirect(sink.ref).Type(), len(list), len(list))
- sorter := newSorter(n, sink)
- for i := range list {
- raw := bucket.Get(list[i])
- if raw == nil {
- return ErrNotFound
- }
-
- if _, err := sorter.filter(nil, bucket, list[i], raw); err != nil {
- return err
- }
- }
-
- return sorter.flush()
-}
-
-// Prefix returns one or more records whose given field starts with the specified prefix.
-func (n *node) Prefix(fieldName string, prefix string, to interface{}, options ...func(*index.Options)) error {
- sink, err := newListSink(n, to)
- if err != nil {
- return err
- }
-
- bucketName := sink.bucketName()
- if bucketName == "" {
- return ErrNoName
- }
-
- ref := reflect.Indirect(reflect.New(sink.elemType))
- cfg, err := extractSingleField(&ref, fieldName)
- if err != nil {
- return err
- }
-
- opts := index.NewOptions()
- for _, fn := range options {
- fn(opts)
- }
-
- field, ok := cfg.Fields[fieldName]
- if !ok || (!field.IsID && field.Index == "") {
- query := newQuery(n, q.Re(fieldName, fmt.Sprintf("^%s", prefix)))
- query.Skip(opts.Skip).Limit(opts.Limit)
-
- if opts.Reverse {
- query.Reverse()
- }
-
- err = n.readTx(func(tx *bolt.Tx) error {
- return query.query(tx, sink)
- })
-
- if err != nil {
- return err
- }
-
- return sink.flush()
- }
-
- prfx, err := toBytes(prefix, n.codec)
- if err != nil {
- return err
- }
-
- return n.readTx(func(tx *bolt.Tx) error {
- return n.prefix(tx, bucketName, fieldName, cfg, sink, prfx, opts)
- })
-}
-
-func (n *node) prefix(tx *bolt.Tx, bucketName, fieldName string, cfg *structConfig, sink *listSink, prefix []byte, opts *index.Options) error {
- bucket := n.GetBucket(tx, bucketName)
- if bucket == nil {
- reflect.Indirect(sink.ref).SetLen(0)
- return nil
- }
-
- idx, err := getIndex(bucket, cfg.Fields[fieldName].Index, fieldName)
- if err != nil {
- return err
- }
-
- list, err := idx.Prefix(prefix, opts)
- if err != nil {
- return err
- }
-
- sink.results = reflect.MakeSlice(reflect.Indirect(sink.ref).Type(), len(list), len(list))
- sorter := newSorter(n, sink)
- for i := range list {
- raw := bucket.Get(list[i])
- if raw == nil {
- return ErrNotFound
- }
-
- if _, err := sorter.filter(nil, bucket, list[i], raw); err != nil {
- return err
- }
- }
-
- return sorter.flush()
-}
-
-// Count counts all the records of a bucket
-func (n *node) Count(data interface{}) (int, error) {
- return n.Select().Count(data)
-}
diff --git a/vendor/github.com/asdine/storm/v3/index/errors.go b/vendor/github.com/asdine/storm/v3/index/errors.go
deleted file mode 100644
index b8c66c65..00000000
--- a/vendor/github.com/asdine/storm/v3/index/errors.go
+++ /dev/null
@@ -1,14 +0,0 @@
-package index
-
-import "errors"
-
-var (
- // ErrNotFound is returned when the specified record is not saved in the bucket.
- ErrNotFound = errors.New("not found")
-
- // ErrAlreadyExists is returned uses when trying to set an existing value on a field that has a unique index.
- ErrAlreadyExists = errors.New("already exists")
-
- // ErrNilParam is returned when the specified param is expected to be not nil.
- ErrNilParam = errors.New("param must not be nil")
-)
diff --git a/vendor/github.com/asdine/storm/v3/index/indexes.go b/vendor/github.com/asdine/storm/v3/index/indexes.go
deleted file mode 100644
index 1af5cf85..00000000
--- a/vendor/github.com/asdine/storm/v3/index/indexes.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// Package index contains Index engines used to store values and their corresponding IDs
-package index
-
-// Index interface
-type Index interface {
- Add(value []byte, targetID []byte) error
- Remove(value []byte) error
- RemoveID(id []byte) error
- Get(value []byte) []byte
- All(value []byte, opts *Options) ([][]byte, error)
- AllRecords(opts *Options) ([][]byte, error)
- Range(min []byte, max []byte, opts *Options) ([][]byte, error)
- Prefix(prefix []byte, opts *Options) ([][]byte, error)
-}
diff --git a/vendor/github.com/asdine/storm/v3/index/list.go b/vendor/github.com/asdine/storm/v3/index/list.go
deleted file mode 100644
index 27ce410b..00000000
--- a/vendor/github.com/asdine/storm/v3/index/list.go
+++ /dev/null
@@ -1,283 +0,0 @@
-package index
-
-import (
- "bytes"
-
- "github.com/asdine/storm/v3/internal"
- bolt "go.etcd.io/bbolt"
-)
-
-// NewListIndex loads a ListIndex
-func NewListIndex(parent *bolt.Bucket, indexName []byte) (*ListIndex, error) {
- var err error
- b := parent.Bucket(indexName)
- if b == nil {
- if !parent.Writable() {
- return nil, ErrNotFound
- }
- b, err = parent.CreateBucket(indexName)
- if err != nil {
- return nil, err
- }
- }
-
- ids, err := NewUniqueIndex(b, []byte("storm__ids"))
- if err != nil {
- return nil, err
- }
-
- return &ListIndex{
- IndexBucket: b,
- Parent: parent,
- IDs: ids,
- }, nil
-}
-
-// ListIndex is an index that references values and the corresponding IDs.
-type ListIndex struct {
- Parent *bolt.Bucket
- IndexBucket *bolt.Bucket
- IDs *UniqueIndex
-}
-
-// Add a value to the list index
-func (idx *ListIndex) Add(newValue []byte, targetID []byte) error {
- if newValue == nil || len(newValue) == 0 {
- return ErrNilParam
- }
- if targetID == nil || len(targetID) == 0 {
- return ErrNilParam
- }
-
- key := idx.IDs.Get(targetID)
- if key != nil {
- err := idx.IndexBucket.Delete(key)
- if err != nil {
- return err
- }
-
- err = idx.IDs.Remove(targetID)
- if err != nil {
- return err
- }
-
- key = key[:0]
- }
-
- key = append(key, newValue...)
- key = append(key, '_')
- key = append(key, '_')
- key = append(key, targetID...)
-
- err := idx.IDs.Add(targetID, key)
- if err != nil {
- return err
- }
-
- return idx.IndexBucket.Put(key, targetID)
-}
-
-// Remove a value from the unique index
-func (idx *ListIndex) Remove(value []byte) error {
- var err error
- var keys [][]byte
-
- c := idx.IndexBucket.Cursor()
- prefix := generatePrefix(value)
-
- for k, _ := c.Seek(prefix); bytes.HasPrefix(k, prefix); k, _ = c.Next() {
- keys = append(keys, k)
- }
-
- for _, k := range keys {
- err = idx.IndexBucket.Delete(k)
- if err != nil {
- return err
- }
- }
-
- return idx.IDs.RemoveID(value)
-}
-
-// RemoveID removes an ID from the list index
-func (idx *ListIndex) RemoveID(targetID []byte) error {
- value := idx.IDs.Get(targetID)
- if value == nil {
- return nil
- }
-
- err := idx.IndexBucket.Delete(value)
- if err != nil {
- return err
- }
-
- return idx.IDs.Remove(targetID)
-}
-
-// Get the first ID corresponding to the given value
-func (idx *ListIndex) Get(value []byte) []byte {
- c := idx.IndexBucket.Cursor()
- prefix := generatePrefix(value)
-
- for k, id := c.Seek(prefix); bytes.HasPrefix(k, prefix); k, id = c.Next() {
- return id
- }
-
- return nil
-}
-
-// All the IDs corresponding to the given value
-func (idx *ListIndex) All(value []byte, opts *Options) ([][]byte, error) {
- var list [][]byte
- c := idx.IndexBucket.Cursor()
- cur := internal.Cursor{C: c, Reverse: opts != nil && opts.Reverse}
-
- prefix := generatePrefix(value)
-
- k, id := c.Seek(prefix)
- if cur.Reverse {
- var count int
- kc := k
- idc := id
- for ; kc != nil && bytes.HasPrefix(kc, prefix); kc, idc = c.Next() {
- count++
- k, id = kc, idc
- }
- if kc != nil {
- k, id = c.Prev()
- }
- list = make([][]byte, 0, count)
- }
-
- for ; bytes.HasPrefix(k, prefix); k, id = cur.Next() {
- if opts != nil && opts.Skip > 0 {
- opts.Skip--
- continue
- }
-
- if opts != nil && opts.Limit == 0 {
- break
- }
-
- if opts != nil && opts.Limit > 0 {
- opts.Limit--
- }
-
- list = append(list, id)
- }
-
- return list, nil
-}
-
-// AllRecords returns all the IDs of this index
-func (idx *ListIndex) AllRecords(opts *Options) ([][]byte, error) {
- var list [][]byte
-
- c := internal.Cursor{C: idx.IndexBucket.Cursor(), Reverse: opts != nil && opts.Reverse}
-
- for k, id := c.First(); k != nil; k, id = c.Next() {
- if id == nil || bytes.Equal(k, []byte("storm__ids")) {
- continue
- }
-
- if opts != nil && opts.Skip > 0 {
- opts.Skip--
- continue
- }
-
- if opts != nil && opts.Limit == 0 {
- break
- }
-
- if opts != nil && opts.Limit > 0 {
- opts.Limit--
- }
-
- list = append(list, id)
- }
-
- return list, nil
-}
-
-// Range returns the ids corresponding to the given range of values
-func (idx *ListIndex) Range(min []byte, max []byte, opts *Options) ([][]byte, error) {
- var list [][]byte
-
- c := internal.RangeCursor{
- C: idx.IndexBucket.Cursor(),
- Reverse: opts != nil && opts.Reverse,
- Min: min,
- Max: max,
- CompareFn: func(val, limit []byte) int {
- pos := bytes.LastIndex(val, []byte("__"))
- return bytes.Compare(val[:pos], limit)
- },
- }
-
- for k, id := c.First(); c.Continue(k); k, id = c.Next() {
- if id == nil || bytes.Equal(k, []byte("storm__ids")) {
- continue
- }
-
- if opts != nil && opts.Skip > 0 {
- opts.Skip--
- continue
- }
-
- if opts != nil && opts.Limit == 0 {
- break
- }
-
- if opts != nil && opts.Limit > 0 {
- opts.Limit--
- }
-
- list = append(list, id)
- }
-
- return list, nil
-}
-
-// Prefix returns the ids whose values have the given prefix.
-func (idx *ListIndex) Prefix(prefix []byte, opts *Options) ([][]byte, error) {
- var list [][]byte
-
- c := internal.PrefixCursor{
- C: idx.IndexBucket.Cursor(),
- Reverse: opts != nil && opts.Reverse,
- Prefix: prefix,
- }
-
- for k, id := c.First(); k != nil && c.Continue(k); k, id = c.Next() {
- if id == nil || bytes.Equal(k, []byte("storm__ids")) {
- continue
- }
-
- if opts != nil && opts.Skip > 0 {
- opts.Skip--
- continue
- }
-
- if opts != nil && opts.Limit == 0 {
- break
- }
-
- if opts != nil && opts.Limit > 0 {
- opts.Limit--
- }
-
- list = append(list, id)
- }
- return list, nil
-}
-
-func generatePrefix(value []byte) []byte {
- prefix := make([]byte, len(value)+2)
- var i int
- for i = range value {
- prefix[i] = value[i]
- }
- prefix[i+1] = '_'
- prefix[i+2] = '_'
- return prefix
-}
diff --git a/vendor/github.com/asdine/storm/v3/index/options.go b/vendor/github.com/asdine/storm/v3/index/options.go
deleted file mode 100644
index 6c4ae7b5..00000000
--- a/vendor/github.com/asdine/storm/v3/index/options.go
+++ /dev/null
@@ -1,15 +0,0 @@
-package index
-
-// NewOptions creates initialized Options
-func NewOptions() *Options {
- return &Options{
- Limit: -1,
- }
-}
-
-// Options are used to customize queries
-type Options struct {
- Limit int
- Skip int
- Reverse bool
-}
diff --git a/vendor/github.com/asdine/storm/v3/index/unique.go b/vendor/github.com/asdine/storm/v3/index/unique.go
deleted file mode 100644
index ec5989d4..00000000
--- a/vendor/github.com/asdine/storm/v3/index/unique.go
+++ /dev/null
@@ -1,183 +0,0 @@
-package index
-
-import (
- "bytes"
-
- "github.com/asdine/storm/v3/internal"
- bolt "go.etcd.io/bbolt"
-)
-
-// NewUniqueIndex loads a UniqueIndex
-func NewUniqueIndex(parent *bolt.Bucket, indexName []byte) (*UniqueIndex, error) {
- var err error
- b := parent.Bucket(indexName)
- if b == nil {
- if !parent.Writable() {
- return nil, ErrNotFound
- }
- b, err = parent.CreateBucket(indexName)
- if err != nil {
- return nil, err
- }
- }
-
- return &UniqueIndex{
- IndexBucket: b,
- Parent: parent,
- }, nil
-}
-
-// UniqueIndex is an index that references unique values and the corresponding ID.
-type UniqueIndex struct {
- Parent *bolt.Bucket
- IndexBucket *bolt.Bucket
-}
-
-// Add a value to the unique index
-func (idx *UniqueIndex) Add(value []byte, targetID []byte) error {
- if value == nil || len(value) == 0 {
- return ErrNilParam
- }
- if targetID == nil || len(targetID) == 0 {
- return ErrNilParam
- }
-
- exists := idx.IndexBucket.Get(value)
- if exists != nil {
- if bytes.Equal(exists, targetID) {
- return nil
- }
- return ErrAlreadyExists
- }
-
- return idx.IndexBucket.Put(value, targetID)
-}
-
-// Remove a value from the unique index
-func (idx *UniqueIndex) Remove(value []byte) error {
- return idx.IndexBucket.Delete(value)
-}
-
-// RemoveID removes an ID from the unique index
-func (idx *UniqueIndex) RemoveID(id []byte) error {
- c := idx.IndexBucket.Cursor()
-
- for val, ident := c.First(); val != nil; val, ident = c.Next() {
- if bytes.Equal(ident, id) {
- return idx.Remove(val)
- }
- }
- return nil
-}
-
-// Get the id corresponding to the given value
-func (idx *UniqueIndex) Get(value []byte) []byte {
- return idx.IndexBucket.Get(value)
-}
-
-// All returns all the ids corresponding to the given value
-func (idx *UniqueIndex) All(value []byte, opts *Options) ([][]byte, error) {
- id := idx.IndexBucket.Get(value)
- if id != nil {
- return [][]byte{id}, nil
- }
-
- return nil, nil
-}
-
-// AllRecords returns all the IDs of this index
-func (idx *UniqueIndex) AllRecords(opts *Options) ([][]byte, error) {
- var list [][]byte
-
- c := internal.Cursor{C: idx.IndexBucket.Cursor(), Reverse: opts != nil && opts.Reverse}
-
- for val, ident := c.First(); val != nil; val, ident = c.Next() {
- if opts != nil && opts.Skip > 0 {
- opts.Skip--
- continue
- }
-
- if opts != nil && opts.Limit == 0 {
- break
- }
-
- if opts != nil && opts.Limit > 0 {
- opts.Limit--
- }
-
- list = append(list, ident)
- }
- return list, nil
-}
-
-// Range returns the ids corresponding to the given range of values
-func (idx *UniqueIndex) Range(min []byte, max []byte, opts *Options) ([][]byte, error) {
- var list [][]byte
-
- c := internal.RangeCursor{
- C: idx.IndexBucket.Cursor(),
- Reverse: opts != nil && opts.Reverse,
- Min: min,
- Max: max,
- CompareFn: func(val, limit []byte) int {
- return bytes.Compare(val, limit)
- },
- }
-
- for val, ident := c.First(); val != nil && c.Continue(val); val, ident = c.Next() {
- if opts != nil && opts.Skip > 0 {
- opts.Skip--
- continue
- }
-
- if opts != nil && opts.Limit == 0 {
- break
- }
-
- if opts != nil && opts.Limit > 0 {
- opts.Limit--
- }
-
- list = append(list, ident)
- }
- return list, nil
-}
-
-// Prefix returns the ids whose values have the given prefix.
-func (idx *UniqueIndex) Prefix(prefix []byte, opts *Options) ([][]byte, error) {
- var list [][]byte
-
- c := internal.PrefixCursor{
- C: idx.IndexBucket.Cursor(),
- Reverse: opts != nil && opts.Reverse,
- Prefix: prefix,
- }
-
- for val, ident := c.First(); val != nil && c.Continue(val); val, ident = c.Next() {
- if opts != nil && opts.Skip > 0 {
- opts.Skip--
- continue
- }
-
- if opts != nil && opts.Limit == 0 {
- break
- }
-
- if opts != nil && opts.Limit > 0 {
- opts.Limit--
- }
-
- list = append(list, ident)
- }
- return list, nil
-}
-
-// first returns the first ID of this index
-func (idx *UniqueIndex) first() []byte {
- c := idx.IndexBucket.Cursor()
-
- for val, ident := c.First(); val != nil; val, ident = c.Next() {
- return ident
- }
- return nil
-}
diff --git a/vendor/github.com/asdine/storm/v3/internal/boltdb.go b/vendor/github.com/asdine/storm/v3/internal/boltdb.go
deleted file mode 100644
index abc14ab0..00000000
--- a/vendor/github.com/asdine/storm/v3/internal/boltdb.go
+++ /dev/null
@@ -1,121 +0,0 @@
-package internal
-
-import (
- "bytes"
-
- bolt "go.etcd.io/bbolt"
-)
-
-// Cursor that can be reversed
-type Cursor struct {
- C *bolt.Cursor
- Reverse bool
-}
-
-// First element
-func (c *Cursor) First() ([]byte, []byte) {
- if c.Reverse {
- return c.C.Last()
- }
-
- return c.C.First()
-}
-
-// Next element
-func (c *Cursor) Next() ([]byte, []byte) {
- if c.Reverse {
- return c.C.Prev()
- }
-
- return c.C.Next()
-}
-
-// RangeCursor that can be reversed
-type RangeCursor struct {
- C *bolt.Cursor
- Reverse bool
- Min []byte
- Max []byte
- CompareFn func([]byte, []byte) int
-}
-
-// First element
-func (c *RangeCursor) First() ([]byte, []byte) {
- if c.Reverse {
- k, v := c.C.Seek(c.Max)
-
- // If Seek doesn't find a key it goes to the next.
- // If so, we need to get the previous one to avoid
- // including bigger values. #218
- if !bytes.HasPrefix(k, c.Max) && k != nil {
- k, v = c.C.Prev()
- }
-
- return k, v
- }
-
- return c.C.Seek(c.Min)
-}
-
-// Next element
-func (c *RangeCursor) Next() ([]byte, []byte) {
- if c.Reverse {
- return c.C.Prev()
- }
-
- return c.C.Next()
-}
-
-// Continue tells if the loop needs to continue
-func (c *RangeCursor) Continue(val []byte) bool {
- if c.Reverse {
- return val != nil && c.CompareFn(val, c.Min) >= 0
- }
-
- return val != nil && c.CompareFn(val, c.Max) <= 0
-}
-
-// PrefixCursor that can be reversed
-type PrefixCursor struct {
- C *bolt.Cursor
- Reverse bool
- Prefix []byte
-}
-
-// First element
-func (c *PrefixCursor) First() ([]byte, []byte) {
- var k, v []byte
-
- for k, v = c.C.First(); k != nil && !bytes.HasPrefix(k, c.Prefix); k, v = c.C.Next() {
- }
-
- if k == nil {
- return nil, nil
- }
-
- if c.Reverse {
- kc, vc := k, v
- for ; kc != nil && bytes.HasPrefix(kc, c.Prefix); kc, vc = c.C.Next() {
- k, v = kc, vc
- }
- if kc != nil {
- k, v = c.C.Prev()
- }
- }
-
- return k, v
-}
-
-// Next element
-func (c *PrefixCursor) Next() ([]byte, []byte) {
- if c.Reverse {
- return c.C.Prev()
- }
-
- return c.C.Next()
-}
-
-// Continue tells if the loop needs to continue
-func (c *PrefixCursor) Continue(val []byte) bool {
- return val != nil && bytes.HasPrefix(val, c.Prefix)
-}
diff --git a/vendor/github.com/asdine/storm/v3/kv.go b/vendor/github.com/asdine/storm/v3/kv.go
deleted file mode 100644
index 7e2fb0aa..00000000
--- a/vendor/github.com/asdine/storm/v3/kv.go
+++ /dev/null
@@ -1,170 +0,0 @@
-package storm
-
-import (
- "reflect"
-
- bolt "go.etcd.io/bbolt"
-)
-
-// KeyValueStore can store and fetch values by key
-type KeyValueStore interface {
- // Get a value from a bucket
- Get(bucketName string, key interface{}, to interface{}) error
- // Set a key/value pair into a bucket
- Set(bucketName string, key interface{}, value interface{}) error
- // Delete deletes a key from a bucket
- Delete(bucketName string, key interface{}) error
- // GetBytes gets a raw value from a bucket.
- GetBytes(bucketName string, key interface{}) ([]byte, error)
- // SetBytes sets a raw value into a bucket.
- SetBytes(bucketName string, key interface{}, value []byte) error
- // KeyExists reports the presence of a key in a bucket.
- KeyExists(bucketName string, key interface{}) (bool, error)
-}
-
-// GetBytes gets a raw value from a bucket.
-func (n *node) GetBytes(bucketName string, key interface{}) ([]byte, error) {
- id, err := toBytes(key, n.codec)
- if err != nil {
- return nil, err
- }
-
- var val []byte
- return val, n.readTx(func(tx *bolt.Tx) error {
- raw, err := n.getBytes(tx, bucketName, id)
- if err != nil {
- return err
- }
-
- val = make([]byte, len(raw))
- copy(val, raw)
- return nil
- })
-}
-
-// GetBytes gets a raw value from a bucket.
-func (n *node) getBytes(tx *bolt.Tx, bucketName string, id []byte) ([]byte, error) {
- bucket := n.GetBucket(tx, bucketName)
- if bucket == nil {
- return nil, ErrNotFound
- }
-
- raw := bucket.Get(id)
- if raw == nil {
- return nil, ErrNotFound
- }
-
- return raw, nil
-}
-
-// SetBytes sets a raw value into a bucket.
-func (n *node) SetBytes(bucketName string, key interface{}, value []byte) error {
- if key == nil {
- return ErrNilParam
- }
-
- id, err := toBytes(key, n.codec)
- if err != nil {
- return err
- }
-
- return n.readWriteTx(func(tx *bolt.Tx) error {
- return n.setBytes(tx, bucketName, id, value)
- })
-}
-
-func (n *node) setBytes(tx *bolt.Tx, bucketName string, id, data []byte) error {
- bucket, err := n.CreateBucketIfNotExists(tx, bucketName)
- if err != nil {
- return err
- }
-
- // save node configuration in the bucket
- _, err = newMeta(bucket, n)
- if err != nil {
- return err
- }
-
- return bucket.Put(id, data)
-}
-
-// Get a value from a bucket
-func (n *node) Get(bucketName string, key interface{}, to interface{}) error {
- ref := reflect.ValueOf(to)
-
- if !ref.IsValid() || ref.Kind() != reflect.Ptr {
- return ErrPtrNeeded
- }
-
- id, err := toBytes(key, n.codec)
- if err != nil {
- return err
- }
-
- return n.readTx(func(tx *bolt.Tx) error {
- raw, err := n.getBytes(tx, bucketName, id)
- if err != nil {
- return err
- }
-
- return n.codec.Unmarshal(raw, to)
- })
-}
-
-// Set a key/value pair into a bucket
-func (n *node) Set(bucketName string, key interface{}, value interface{}) error {
- var data []byte
- var err error
- if value != nil {
- data, err = n.codec.Marshal(value)
- if err != nil {
- return err
- }
- }
-
- return n.SetBytes(bucketName, key, data)
-}
-
-// Delete deletes a key from a bucket
-func (n *node) Delete(bucketName string, key interface{}) error {
- id, err := toBytes(key, n.codec)
- if err != nil {
- return err
- }
-
- return n.readWriteTx(func(tx *bolt.Tx) error {
- return n.delete(tx, bucketName, id)
- })
-}
-
-func (n *node) delete(tx *bolt.Tx, bucketName string, id []byte) error {
- bucket := n.GetBucket(tx, bucketName)
- if bucket == nil {
- return ErrNotFound
- }
-
- return bucket.Delete(id)
-}
-
-// KeyExists reports the presence of a key in a bucket.
-func (n *node) KeyExists(bucketName string, key interface{}) (bool, error) {
- id, err := toBytes(key, n.codec)
- if err != nil {
- return false, err
- }
-
- var exists bool
- return exists, n.readTx(func(tx *bolt.Tx) error {
- bucket := n.GetBucket(tx, bucketName)
- if bucket == nil {
- return ErrNotFound
- }
-
- v := bucket.Get(id)
- if v != nil {
- exists = true
- }
-
- return nil
- })
-}
diff --git a/vendor/github.com/asdine/storm/v3/metadata.go b/vendor/github.com/asdine/storm/v3/metadata.go
deleted file mode 100644
index 21f3bbae..00000000
--- a/vendor/github.com/asdine/storm/v3/metadata.go
+++ /dev/null
@@ -1,69 +0,0 @@
-package storm
-
-import (
- "reflect"
-
- bolt "go.etcd.io/bbolt"
-)
-
-const (
- metaCodec = "codec"
-)
-
-func newMeta(b *bolt.Bucket, n Node) (*meta, error) {
- m := b.Bucket([]byte(metadataBucket))
- if m != nil {
- name := m.Get([]byte(metaCodec))
- if string(name) != n.Codec().Name() {
- return nil, ErrDifferentCodec
- }
- return &meta{
- node: n,
- bucket: m,
- }, nil
- }
-
- m, err := b.CreateBucket([]byte(metadataBucket))
- if err != nil {
- return nil, err
- }
-
- m.Put([]byte(metaCodec), []byte(n.Codec().Name()))
- return &meta{
- node: n,
- bucket: m,
- }, nil
-}
-
-type meta struct {
- node Node
- bucket *bolt.Bucket
-}
-
-func (m *meta) increment(field *fieldConfig) error {
- var err error
- counter := field.IncrementStart
-
- raw := m.bucket.Get([]byte(field.Name + "counter"))
- if raw != nil {
- counter, err = numberfromb(raw)
- if err != nil {
- return err
- }
- counter++
- }
-
- raw, err = numbertob(counter)
- if err != nil {
- return err
- }
-
- err = m.bucket.Put([]byte(field.Name+"counter"), raw)
- if err != nil {
- return err
- }
-
- field.Value.Set(reflect.ValueOf(counter).Convert(field.Value.Type()))
- field.IsZero = false
- return nil
-}
diff --git a/vendor/github.com/asdine/storm/v3/node.go b/vendor/github.com/asdine/storm/v3/node.go
deleted file mode 100644
index 75fbb882..00000000
--- a/vendor/github.com/asdine/storm/v3/node.go
+++ /dev/null
@@ -1,126 +0,0 @@
-package storm
-
-import (
- "github.com/asdine/storm/v3/codec"
- bolt "go.etcd.io/bbolt"
-)
-
-// A Node in Storm represents the API to a BoltDB bucket.
-type Node interface {
- Tx
- TypeStore
- KeyValueStore
- BucketScanner
-
- // From returns a new Storm node with a new bucket root below the current.
- // All DB operations on the new node will be executed relative to this bucket.
- From(addend ...string) Node
-
- // Bucket returns the bucket name as a slice from the root.
- // In the normal, simple case this will be empty.
- Bucket() []string
-
- // GetBucket returns the given bucket below the current node.
- GetBucket(tx *bolt.Tx, children ...string) *bolt.Bucket
-
- // CreateBucketIfNotExists creates the bucket below the current node if it doesn't
- // already exist.
- CreateBucketIfNotExists(tx *bolt.Tx, bucket string) (*bolt.Bucket, error)
-
- // WithTransaction returns a New Storm node that will use the given transaction.
- WithTransaction(tx *bolt.Tx) Node
-
- // Begin starts a new transaction.
- Begin(writable bool) (Node, error)
-
- // Codec used by this instance of Storm
- Codec() codec.MarshalUnmarshaler
-
- // WithCodec returns a New Storm Node that will use the given Codec.
- WithCodec(codec codec.MarshalUnmarshaler) Node
-
- // WithBatch returns a new Storm Node with the batch mode enabled.
- WithBatch(enabled bool) Node
-}
-
-// A Node in Storm represents the API to a BoltDB bucket.
-type node struct {
- s *DB
-
- // The root bucket. In the normal, simple case this will be empty.
- rootBucket []string
-
- // Transaction object. Nil if not in transaction
- tx *bolt.Tx
-
- // Codec of this node
- codec codec.MarshalUnmarshaler
-
- // Enable batch mode for read-write transaction, instead of update mode
- batchMode bool
-}
-
-// From returns a new Storm Node with a new bucket root below the current.
-// All DB operations on the new node will be executed relative to this bucket.
-func (n node) From(addend ...string) Node {
- n.rootBucket = append(n.rootBucket, addend...)
- return &n
-}
-
-// WithTransaction returns a new Storm Node that will use the given transaction.
-func (n node) WithTransaction(tx *bolt.Tx) Node {
- n.tx = tx
- return &n
-}
-
-// WithCodec returns a new Storm Node that will use the given Codec.
-func (n node) WithCodec(codec codec.MarshalUnmarshaler) Node {
- n.codec = codec
- return &n
-}
-
-// WithBatch returns a new Storm Node with the batch mode enabled.
-func (n node) WithBatch(enabled bool) Node {
- n.batchMode = enabled
- return &n
-}
-
-// Bucket returns the bucket name as a slice from the root.
-// In the normal, simple case this will be empty.
-func (n *node) Bucket() []string {
- return n.rootBucket
-}
-
-// Codec returns the EncodeDecoder used by this instance of Storm
-func (n *node) Codec() codec.MarshalUnmarshaler {
- return n.codec
-}
-
-// Detects if already in transaction or runs a read write transaction.
-// Uses batch mode if enabled.
-func (n *node) readWriteTx(fn func(tx *bolt.Tx) error) error {
- if n.tx != nil {
- return fn(n.tx)
- }
-
- if n.batchMode {
- return n.s.Bolt.Batch(func(tx *bolt.Tx) error {
- return fn(tx)
- })
- }
-
- return n.s.Bolt.Update(func(tx *bolt.Tx) error {
- return fn(tx)
- })
-}
-
-// Detects if already in transaction or runs a read transaction.
-func (n *node) readTx(fn func(tx *bolt.Tx) error) error {
- if n.tx != nil {
- return fn(n.tx)
- }
-
- return n.s.Bolt.View(func(tx *bolt.Tx) error {
- return fn(tx)
- })
-}
diff --git a/vendor/github.com/asdine/storm/v3/options.go b/vendor/github.com/asdine/storm/v3/options.go
deleted file mode 100644
index ff899849..00000000
--- a/vendor/github.com/asdine/storm/v3/options.go
+++ /dev/null
@@ -1,97 +0,0 @@
-package storm
-
-import (
- "os"
-
- "github.com/asdine/storm/v3/codec"
- "github.com/asdine/storm/v3/index"
- bolt "go.etcd.io/bbolt"
-)
-
-// BoltOptions used to pass options to BoltDB.
-func BoltOptions(mode os.FileMode, options *bolt.Options) func(*Options) error {
- return func(opts *Options) error {
- opts.boltMode = mode
- opts.boltOptions = options
- return nil
- }
-}
-
-// Codec used to set a custom encoder and decoder. The default is JSON.
-func Codec(c codec.MarshalUnmarshaler) func(*Options) error {
- return func(opts *Options) error {
- opts.codec = c
- return nil
- }
-}
-
-// Batch enables the use of batch instead of update for read-write transactions.
-func Batch() func(*Options) error {
- return func(opts *Options) error {
- opts.batchMode = true
- return nil
- }
-}
-
-// Root used to set the root bucket. See also the From method.
-func Root(root ...string) func(*Options) error {
- return func(opts *Options) error {
- opts.rootBucket = root
- return nil
- }
-}
-
-// UseDB allows Storm to use an existing open Bolt.DB.
-// Warning: storm.DB.Close() will close the bolt.DB instance.
-func UseDB(b *bolt.DB) func(*Options) error {
- return func(opts *Options) error {
- opts.path = b.Path()
- opts.bolt = b
- return nil
- }
-}
-
-// Limit sets the maximum number of records to return
-func Limit(limit int) func(*index.Options) {
- return func(opts *index.Options) {
- opts.Limit = limit
- }
-}
-
-// Skip sets the number of records to skip
-func Skip(offset int) func(*index.Options) {
- return func(opts *index.Options) {
- opts.Skip = offset
- }
-}
-
-// Reverse will return the results in descending order
-func Reverse() func(*index.Options) {
- return func(opts *index.Options) {
- opts.Reverse = true
- }
-}
-
-// Options are used to customize the way Storm opens a database.
-type Options struct {
- // Handles encoding and decoding of objects
- codec codec.MarshalUnmarshaler
-
- // Bolt file mode
- boltMode os.FileMode
-
- // Bolt options
- boltOptions *bolt.Options
-
- // Enable batch mode for read-write transaction, instead of update mode
- batchMode bool
-
- // The root bucket name
- rootBucket []string
-
- // Path of the database file
- path string
-
- // Bolt is still easily accessible
- bolt *bolt.DB
-}
diff --git a/vendor/github.com/asdine/storm/v3/q/compare.go b/vendor/github.com/asdine/storm/v3/q/compare.go
deleted file mode 100644
index acd14c7e..00000000
--- a/vendor/github.com/asdine/storm/v3/q/compare.go
+++ /dev/null
@@ -1,122 +0,0 @@
-package q
-
-import (
- "go/constant"
- "go/token"
- "reflect"
- "strconv"
-)
-
-func compare(a, b interface{}, tok token.Token) bool {
- vala := reflect.ValueOf(a)
- valb := reflect.ValueOf(b)
-
- ak := vala.Kind()
- bk := valb.Kind()
- switch {
- // comparing nil values
- case (ak == reflect.Ptr || ak == reflect.Slice || ak == reflect.Interface || ak == reflect.Invalid) &&
- (bk == reflect.Ptr || ak == reflect.Slice || bk == reflect.Interface || bk == reflect.Invalid) &&
- (!vala.IsValid() || vala.IsNil()) && (!valb.IsValid() || valb.IsNil()):
- return true
- case ak >= reflect.Int && ak <= reflect.Int64:
- if bk >= reflect.Int && bk <= reflect.Int64 {
- return constant.Compare(constant.MakeInt64(vala.Int()), tok, constant.MakeInt64(valb.Int()))
- }
-
- if bk >= reflect.Uint && bk <= reflect.Uint64 {
- return constant.Compare(constant.MakeInt64(vala.Int()), tok, constant.MakeInt64(int64(valb.Uint())))
- }
-
- if bk == reflect.Float32 || bk == reflect.Float64 {
- return constant.Compare(constant.MakeFloat64(float64(vala.Int())), tok, constant.MakeFloat64(valb.Float()))
- }
-
- if bk == reflect.String {
- bla, err := strconv.ParseFloat(valb.String(), 64)
- if err != nil {
- return false
- }
-
- return constant.Compare(constant.MakeFloat64(float64(vala.Int())), tok, constant.MakeFloat64(bla))
- }
- case ak >= reflect.Uint && ak <= reflect.Uint64:
- if bk >= reflect.Uint && bk <= reflect.Uint64 {
- return constant.Compare(constant.MakeUint64(vala.Uint()), tok, constant.MakeUint64(valb.Uint()))
- }
-
- if bk >= reflect.Int && bk <= reflect.Int64 {
- return constant.Compare(constant.MakeUint64(vala.Uint()), tok, constant.MakeUint64(uint64(valb.Int())))
- }
-
- if bk == reflect.Float32 || bk == reflect.Float64 {
- return constant.Compare(constant.MakeFloat64(float64(vala.Uint())), tok, constant.MakeFloat64(valb.Float()))
- }
-
- if bk == reflect.String {
- bla, err := strconv.ParseFloat(valb.String(), 64)
- if err != nil {
- return false
- }
-
- return constant.Compare(constant.MakeFloat64(float64(vala.Uint())), tok, constant.MakeFloat64(bla))
- }
- case ak == reflect.Float32 || ak == reflect.Float64:
- if bk == reflect.Float32 || bk == reflect.Float64 {
- return constant.Compare(constant.MakeFloat64(vala.Float()), tok, constant.MakeFloat64(valb.Float()))
- }
-
- if bk >= reflect.Int && bk <= reflect.Int64 {
- return constant.Compare(constant.MakeFloat64(vala.Float()), tok, constant.MakeFloat64(float64(valb.Int())))
- }
-
- if bk >= reflect.Uint && bk <= reflect.Uint64 {
- return constant.Compare(constant.MakeFloat64(vala.Float()), tok, constant.MakeFloat64(float64(valb.Uint())))
- }
-
- if bk == reflect.String {
- bla, err := strconv.ParseFloat(valb.String(), 64)
- if err != nil {
- return false
- }
-
- return constant.Compare(constant.MakeFloat64(vala.Float()), tok, constant.MakeFloat64(bla))
- }
- case ak == reflect.String:
- if bk == reflect.String {
- return constant.Compare(constant.MakeString(vala.String()), tok, constant.MakeString(valb.String()))
- }
- }
-
- typea, typeb := reflect.TypeOf(a), reflect.TypeOf(b)
-
- if typea != nil && (typea.String() == "time.Time" || typea.String() == "*time.Time") &&
- typeb != nil && (typeb.String() == "time.Time" || typeb.String() == "*time.Time") {
-
- if typea.String() == "*time.Time" && vala.IsNil() {
- return true
- }
-
- if typeb.String() == "*time.Time" {
- if valb.IsNil() {
- return true
- }
- valb = valb.Elem()
- }
-
- var x, y int64
- x = 1
- if vala.MethodByName("Equal").Call([]reflect.Value{valb})[0].Bool() {
- y = 1
- } else if vala.MethodByName("Before").Call([]reflect.Value{valb})[0].Bool() {
- y = 2
- }
- return constant.Compare(constant.MakeInt64(x), tok, constant.MakeInt64(y))
- }
-
- if tok == token.EQL {
- return reflect.DeepEqual(a, b)
- }
-
- return false
-}
diff --git a/vendor/github.com/asdine/storm/v3/q/fieldmatcher.go b/vendor/github.com/asdine/storm/v3/q/fieldmatcher.go
deleted file mode 100644
index c5c82bf0..00000000
--- a/vendor/github.com/asdine/storm/v3/q/fieldmatcher.go
+++ /dev/null
@@ -1,67 +0,0 @@
-package q
-
-import (
- "errors"
- "go/token"
- "reflect"
-)
-
-// ErrUnknownField is returned when an unknown field is passed.
-var ErrUnknownField = errors.New("unknown field")
-
-type fieldMatcherDelegate struct {
- FieldMatcher
- Field string
-}
-
-// NewFieldMatcher creates a Matcher for a given field.
-func NewFieldMatcher(field string, fm FieldMatcher) Matcher {
- return fieldMatcherDelegate{Field: field, FieldMatcher: fm}
-}
-
-// FieldMatcher can be used in NewFieldMatcher as a simple way to create the
-// most common Matcher: A Matcher that evaluates one field's value.
-// For more complex scenarios, implement the Matcher interface directly.
-type FieldMatcher interface {
- MatchField(v interface{}) (bool, error)
-}
-
-func (r fieldMatcherDelegate) Match(i interface{}) (bool, error) {
- v := reflect.Indirect(reflect.ValueOf(i))
- return r.MatchValue(&v)
-}
-
-func (r fieldMatcherDelegate) MatchValue(v *reflect.Value) (bool, error) {
- field := v.FieldByName(r.Field)
- if !field.IsValid() {
- return false, ErrUnknownField
- }
- return r.MatchField(field.Interface())
-}
-
-// NewField2FieldMatcher creates a Matcher for a given field1 and field2.
-func NewField2FieldMatcher(field1, field2 string, tok token.Token) Matcher {
- return field2fieldMatcherDelegate{Field1: field1, Field2: field2, Tok: tok}
-}
-
-type field2fieldMatcherDelegate struct {
- Field1, Field2 string
- Tok token.Token
-}
-
-func (r field2fieldMatcherDelegate) Match(i interface{}) (bool, error) {
- v := reflect.Indirect(reflect.ValueOf(i))
- return r.MatchValue(&v)
-}
-
-func (r field2fieldMatcherDelegate) MatchValue(v *reflect.Value) (bool, error) {
- field1 := v.FieldByName(r.Field1)
- if !field1.IsValid() {
- return false, ErrUnknownField
- }
- field2 := v.FieldByName(r.Field2)
- if !field2.IsValid() {
- return false, ErrUnknownField
- }
- return compare(field1.Interface(), field2.Interface(), r.Tok), nil
-}
diff --git a/vendor/github.com/asdine/storm/v3/q/regexp.go b/vendor/github.com/asdine/storm/v3/q/regexp.go
deleted file mode 100644
index 8d382b17..00000000
--- a/vendor/github.com/asdine/storm/v3/q/regexp.go
+++ /dev/null
@@ -1,51 +0,0 @@
-package q
-
-import (
- "fmt"
- "regexp"
- "sync"
-)
-
-// Re creates a regexp matcher. It checks if the given field matches the given regexp.
-// Note that this only supports fields of type string or []byte.
-func Re(field string, re string) Matcher {
- regexpCache.RLock()
- if r, ok := regexpCache.m[re]; ok {
- regexpCache.RUnlock()
- return NewFieldMatcher(field, ®expMatcher{r: r})
- }
- regexpCache.RUnlock()
-
- regexpCache.Lock()
- r, err := regexp.Compile(re)
- if err == nil {
- regexpCache.m[re] = r
- }
- regexpCache.Unlock()
-
- return NewFieldMatcher(field, ®expMatcher{r: r, err: err})
-}
-
-var regexpCache = struct {
- sync.RWMutex
- m map[string]*regexp.Regexp
-}{m: make(map[string]*regexp.Regexp)}
-
-type regexpMatcher struct {
- r *regexp.Regexp
- err error
-}
-
-func (r *regexpMatcher) MatchField(v interface{}) (bool, error) {
- if r.err != nil {
- return false, r.err
- }
- switch fieldValue := v.(type) {
- case string:
- return r.r.MatchString(fieldValue), nil
- case []byte:
- return r.r.Match(fieldValue), nil
- default:
- return false, fmt.Errorf("Only string and []byte supported for regexp matcher, got %T", fieldValue)
- }
-}
diff --git a/vendor/github.com/asdine/storm/v3/q/tree.go b/vendor/github.com/asdine/storm/v3/q/tree.go
deleted file mode 100644
index c3cbe0f4..00000000
--- a/vendor/github.com/asdine/storm/v3/q/tree.go
+++ /dev/null
@@ -1,247 +0,0 @@
-// Package q contains a list of Matchers used to compare struct fields with values
-package q
-
-import (
- "go/token"
- "reflect"
-)
-
-// A Matcher is used to test against a record to see if it matches.
-type Matcher interface {
- // Match is used to test the criteria against a structure.
- Match(interface{}) (bool, error)
-}
-
-// A ValueMatcher is used to test against a reflect.Value.
-type ValueMatcher interface {
- // MatchValue tests if the given reflect.Value matches.
- // It is useful when the reflect.Value of an object already exists.
- MatchValue(*reflect.Value) (bool, error)
-}
-
-type cmp struct {
- value interface{}
- token token.Token
-}
-
-func (c *cmp) MatchField(v interface{}) (bool, error) {
- return compare(v, c.value, c.token), nil
-}
-
-type trueMatcher struct{}
-
-func (*trueMatcher) Match(i interface{}) (bool, error) {
- return true, nil
-}
-
-func (*trueMatcher) MatchValue(v *reflect.Value) (bool, error) {
- return true, nil
-}
-
-type or struct {
- children []Matcher
-}
-
-func (c *or) Match(i interface{}) (bool, error) {
- v := reflect.Indirect(reflect.ValueOf(i))
- return c.MatchValue(&v)
-}
-
-func (c *or) MatchValue(v *reflect.Value) (bool, error) {
- for _, matcher := range c.children {
- if vm, ok := matcher.(ValueMatcher); ok {
- ok, err := vm.MatchValue(v)
- if err != nil {
- return false, err
- }
- if ok {
- return true, nil
- }
- continue
- }
-
- ok, err := matcher.Match(v.Interface())
- if err != nil {
- return false, err
- }
- if ok {
- return true, nil
- }
- }
-
- return false, nil
-}
-
-type and struct {
- children []Matcher
-}
-
-func (c *and) Match(i interface{}) (bool, error) {
- v := reflect.Indirect(reflect.ValueOf(i))
- return c.MatchValue(&v)
-}
-
-func (c *and) MatchValue(v *reflect.Value) (bool, error) {
- for _, matcher := range c.children {
- if vm, ok := matcher.(ValueMatcher); ok {
- ok, err := vm.MatchValue(v)
- if err != nil {
- return false, err
- }
- if !ok {
- return false, nil
- }
- continue
- }
-
- ok, err := matcher.Match(v.Interface())
- if err != nil {
- return false, err
- }
- if !ok {
- return false, nil
- }
- }
-
- return true, nil
-}
-
-type strictEq struct {
- field string
- value interface{}
-}
-
-func (s *strictEq) MatchField(v interface{}) (bool, error) {
- return reflect.DeepEqual(v, s.value), nil
-}
-
-type in struct {
- list interface{}
-}
-
-func (i *in) MatchField(v interface{}) (bool, error) {
- ref := reflect.ValueOf(i.list)
- if ref.Kind() != reflect.Slice {
- return false, nil
- }
-
- c := cmp{
- token: token.EQL,
- }
-
- for i := 0; i < ref.Len(); i++ {
- c.value = ref.Index(i).Interface()
- ok, err := c.MatchField(v)
- if err != nil {
- return false, err
- }
- if ok {
- return true, nil
- }
- }
-
- return false, nil
-}
-
-type not struct {
- children []Matcher
-}
-
-func (n *not) Match(i interface{}) (bool, error) {
- v := reflect.Indirect(reflect.ValueOf(i))
- return n.MatchValue(&v)
-}
-
-func (n *not) MatchValue(v *reflect.Value) (bool, error) {
- var err error
-
- for _, matcher := range n.children {
- vm, ok := matcher.(ValueMatcher)
- if ok {
- ok, err = vm.MatchValue(v)
- } else {
- ok, err = matcher.Match(v.Interface())
- }
- if err != nil {
- return false, err
- }
- if ok {
- return false, nil
- }
- }
-
- return true, nil
-}
-
-// Eq matcher, checks if the given field is equal to the given value
-func Eq(field string, v interface{}) Matcher {
- return NewFieldMatcher(field, &cmp{value: v, token: token.EQL})
-}
-
-// EqF matcher, checks if the given field is equal to the given field
-func EqF(field1, field2 string) Matcher {
- return NewField2FieldMatcher(field1, field2, token.EQL)
-}
-
-// StrictEq matcher, checks if the given field is deeply equal to the given value
-func StrictEq(field string, v interface{}) Matcher {
- return NewFieldMatcher(field, &strictEq{value: v})
-}
-
-// Gt matcher, checks if the given field is greater than the given value
-func Gt(field string, v interface{}) Matcher {
- return NewFieldMatcher(field, &cmp{value: v, token: token.GTR})
-}
-
-// GtF matcher, checks if the given field is greater than the given field
-func GtF(field1, field2 string) Matcher {
- return NewField2FieldMatcher(field1, field2, token.GTR)
-}
-
-// Gte matcher, checks if the given field is greater than or equal to the given value
-func Gte(field string, v interface{}) Matcher {
- return NewFieldMatcher(field, &cmp{value: v, token: token.GEQ})
-}
-
-// GteF matcher, checks if the given field is greater than or equal to the given field
-func GteF(field1, field2 string) Matcher {
- return NewField2FieldMatcher(field1, field2, token.GEQ)
-}
-
-// Lt matcher, checks if the given field is lesser than the given value
-func Lt(field string, v interface{}) Matcher {
- return NewFieldMatcher(field, &cmp{value: v, token: token.LSS})
-}
-
-// LtF matcher, checks if the given field is lesser than the given field
-func LtF(field1, field2 string) Matcher {
- return NewField2FieldMatcher(field1, field2, token.LSS)
-}
-
-// Lte matcher, checks if the given field is lesser than or equal to the given value
-func Lte(field string, v interface{}) Matcher {
- return NewFieldMatcher(field, &cmp{value: v, token: token.LEQ})
-}
-
-// LteF matcher, checks if the given field is lesser than or equal to the given field
-func LteF(field1, field2 string) Matcher {
- return NewField2FieldMatcher(field1, field2, token.LEQ)
-}
-
-// In matcher, checks if the given field matches one of the value of the given slice.
-// v must be a slice.
-func In(field string, v interface{}) Matcher {
- return NewFieldMatcher(field, &in{list: v})
-}
-
-// True matcher, always returns true
-func True() Matcher { return &trueMatcher{} }
-
-// Or matcher, checks if at least one of the given matchers matches the record
-func Or(matchers ...Matcher) Matcher { return &or{children: matchers} }
-
-// And matcher, checks if all of the given matchers matches the record
-func And(matchers ...Matcher) Matcher { return &and{children: matchers} }
-
-// Not matcher, checks if all of the given matchers return false
-func Not(matchers ...Matcher) Matcher { return ¬{children: matchers} }
diff --git a/vendor/github.com/asdine/storm/v3/query.go b/vendor/github.com/asdine/storm/v3/query.go
deleted file mode 100644
index 5f57583a..00000000
--- a/vendor/github.com/asdine/storm/v3/query.go
+++ /dev/null
@@ -1,219 +0,0 @@
-package storm
-
-import (
- "github.com/asdine/storm/v3/internal"
- "github.com/asdine/storm/v3/q"
- bolt "go.etcd.io/bbolt"
-)
-
-// Select a list of records that match a list of matchers. Doesn't use indexes.
-func (n *node) Select(matchers ...q.Matcher) Query {
- tree := q.And(matchers...)
- return newQuery(n, tree)
-}
-
-// Query is the low level query engine used by Storm. It allows to operate searches through an entire bucket.
-type Query interface {
- // Skip matching records by the given number
- Skip(int) Query
-
- // Limit the results by the given number
- Limit(int) Query
-
- // Order by the given fields, in descending precedence, left-to-right.
- OrderBy(...string) Query
-
- // Reverse the order of the results
- Reverse() Query
-
- // Bucket specifies the bucket name
- Bucket(string) Query
-
- // Find a list of matching records
- Find(interface{}) error
-
- // First gets the first matching record
- First(interface{}) error
-
- // Delete all matching records
- Delete(interface{}) error
-
- // Count all the matching records
- Count(interface{}) (int, error)
-
- // Returns all the records without decoding them
- Raw() ([][]byte, error)
-
- // Execute the given function for each raw element
- RawEach(func([]byte, []byte) error) error
-
- // Execute the given function for each element
- Each(interface{}, func(interface{}) error) error
-}
-
-func newQuery(n *node, tree q.Matcher) *query {
- return &query{
- skip: 0,
- limit: -1,
- node: n,
- tree: tree,
- }
-}
-
-type query struct {
- limit int
- skip int
- reverse bool
- tree q.Matcher
- node *node
- bucket string
- orderBy []string
-}
-
-func (q *query) Skip(nb int) Query {
- q.skip = nb
- return q
-}
-
-func (q *query) Limit(nb int) Query {
- q.limit = nb
- return q
-}
-
-func (q *query) OrderBy(field ...string) Query {
- q.orderBy = field
- return q
-}
-
-func (q *query) Reverse() Query {
- q.reverse = true
- return q
-}
-
-func (q *query) Bucket(bucketName string) Query {
- q.bucket = bucketName
- return q
-}
-
-func (q *query) Find(to interface{}) error {
- sink, err := newListSink(q.node, to)
- if err != nil {
- return err
- }
-
- return q.runQuery(sink)
-}
-
-func (q *query) First(to interface{}) error {
- sink, err := newFirstSink(q.node, to)
- if err != nil {
- return err
- }
-
- q.limit = 1
- return q.runQuery(sink)
-}
-
-func (q *query) Delete(kind interface{}) error {
- sink, err := newDeleteSink(q.node, kind)
- if err != nil {
- return err
- }
-
- return q.runQuery(sink)
-}
-
-func (q *query) Count(kind interface{}) (int, error) {
- sink, err := newCountSink(q.node, kind)
- if err != nil {
- return 0, err
- }
-
- err = q.runQuery(sink)
- if err != nil {
- return 0, err
- }
-
- return sink.counter, nil
-}
-
-func (q *query) Raw() ([][]byte, error) {
- sink := newRawSink()
-
- err := q.runQuery(sink)
- if err != nil {
- return nil, err
- }
-
- return sink.results, nil
-}
-
-func (q *query) RawEach(fn func([]byte, []byte) error) error {
- sink := newRawSink()
-
- sink.execFn = fn
-
- return q.runQuery(sink)
-}
-
-func (q *query) Each(kind interface{}, fn func(interface{}) error) error {
- sink, err := newEachSink(kind)
- if err != nil {
- return err
- }
-
- sink.execFn = fn
-
- return q.runQuery(sink)
-}
-
-func (q *query) runQuery(sink sink) error {
- if q.node.tx != nil {
- return q.query(q.node.tx, sink)
- }
- if sink.readOnly() {
- return q.node.s.Bolt.View(func(tx *bolt.Tx) error {
- return q.query(tx, sink)
- })
- }
- return q.node.s.Bolt.Update(func(tx *bolt.Tx) error {
- return q.query(tx, sink)
- })
-}
-
-func (q *query) query(tx *bolt.Tx, sink sink) error {
- bucketName := q.bucket
- if bucketName == "" {
- bucketName = sink.bucketName()
- }
- bucket := q.node.GetBucket(tx, bucketName)
-
- if q.limit == 0 {
- return sink.flush()
- }
-
- sorter := newSorter(q.node, sink)
- sorter.orderBy = q.orderBy
- sorter.reverse = q.reverse
- sorter.skip = q.skip
- sorter.limit = q.limit
- if bucket != nil {
- c := internal.Cursor{C: bucket.Cursor(), Reverse: q.reverse}
- for k, v := c.First(); k != nil; k, v = c.Next() {
- if v == nil {
- continue
- }
-
- stop, err := sorter.filter(q.tree, bucket, k, v)
- if err != nil {
- return err
- }
-
- if stop {
- break
- }
- }
- }
-
- return sorter.flush()
-}
diff --git a/vendor/github.com/asdine/storm/v3/scan.go b/vendor/github.com/asdine/storm/v3/scan.go
deleted file mode 100644
index f2596e62..00000000
--- a/vendor/github.com/asdine/storm/v3/scan.go
+++ /dev/null
@@ -1,105 +0,0 @@
-package storm
-
-import (
- "bytes"
-
- bolt "go.etcd.io/bbolt"
-)
-
-// A BucketScanner scans a Node for a list of buckets
-type BucketScanner interface {
- // PrefixScan scans the root buckets for keys matching the given prefix.
- PrefixScan(prefix string) []Node
- // PrefixScan scans the buckets in this node for keys matching the given prefix.
- RangeScan(min, max string) []Node
-}
-
-// PrefixScan scans the buckets in this node for keys matching the given prefix.
-func (n *node) PrefixScan(prefix string) []Node {
- if n.tx != nil {
- return n.prefixScan(n.tx, prefix)
- }
-
- var nodes []Node
-
- n.readTx(func(tx *bolt.Tx) error {
- nodes = n.prefixScan(tx, prefix)
- return nil
- })
-
- return nodes
-}
-
-func (n *node) prefixScan(tx *bolt.Tx, prefix string) []Node {
- var (
- prefixBytes = []byte(prefix)
- nodes []Node
- c = n.cursor(tx)
- )
-
- if c == nil {
- return nil
- }
-
- for k, v := c.Seek(prefixBytes); k != nil && bytes.HasPrefix(k, prefixBytes); k, v = c.Next() {
- if v != nil {
- continue
- }
-
- nodes = append(nodes, n.From(string(k)))
- }
-
- return nodes
-}
-
-// RangeScan scans the buckets in this node over a range such as a sortable time range.
-func (n *node) RangeScan(min, max string) []Node {
- if n.tx != nil {
- return n.rangeScan(n.tx, min, max)
- }
-
- var nodes []Node
-
- n.readTx(func(tx *bolt.Tx) error {
- nodes = n.rangeScan(tx, min, max)
- return nil
- })
-
- return nodes
-}
-
-func (n *node) rangeScan(tx *bolt.Tx, min, max string) []Node {
- var (
- minBytes = []byte(min)
- maxBytes = []byte(max)
- nodes []Node
- c = n.cursor(tx)
- )
-
- for k, v := c.Seek(minBytes); k != nil && bytes.Compare(k, maxBytes) <= 0; k, v = c.Next() {
- if v != nil {
- continue
- }
-
- nodes = append(nodes, n.From(string(k)))
- }
-
- return nodes
-
-}
-
-func (n *node) cursor(tx *bolt.Tx) *bolt.Cursor {
- var c *bolt.Cursor
-
- if len(n.rootBucket) > 0 {
- b := n.GetBucket(tx)
- if b == nil {
- return nil
- }
- c = b.Cursor()
- } else {
- c = tx.Cursor()
- }
-
- return c
-}
diff --git a/vendor/github.com/asdine/storm/v3/sink.go b/vendor/github.com/asdine/storm/v3/sink.go
deleted file mode 100644
index 117119e9..00000000
--- a/vendor/github.com/asdine/storm/v3/sink.go
+++ /dev/null
@@ -1,620 +0,0 @@
-package storm
-
-import (
- "reflect"
- "sort"
- "time"
- "github.com/asdine/storm/v3/index"
- "github.com/asdine/storm/v3/q"
- bolt "go.etcd.io/bbolt"
-)
-
-type item struct {
- value *reflect.Value
- bucket *bolt.Bucket
- k []byte
- v []byte
-}
-
-func newSorter(n Node, snk sink) *sorter {
- return &sorter{
- node: n,
- sink: snk,
- skip: 0,
- limit: -1,
- list: make([]*item, 0),
- err: make(chan error),
- done: make(chan struct{}),
- }
-}
-
-type sorter struct {
- node Node
- sink sink
- list []*item
- skip int
- limit int
- orderBy []string
- reverse bool
- err chan error
- done chan struct{}
-}
-
-func (s *sorter) filter(tree q.Matcher, bucket *bolt.Bucket, k, v []byte) (bool, error) {
- itm := &item{
- bucket: bucket,
- k: k,
- v: v,
- }
- rsink, ok := s.sink.(reflectSink)
- if !ok {
- return s.add(itm)
- }
-
- newElem := rsink.elem()
- if err := s.node.Codec().Unmarshal(v, newElem.Interface()); err != nil {
- return false, err
- }
- itm.value = &newElem
-
- if tree != nil {
- ok, err := tree.Match(newElem.Interface())
- if err != nil {
- return false, err
- }
- if !ok {
- return false, nil
- }
- }
-
- if len(s.orderBy) == 0 {
- return s.add(itm)
- }
-
- if _, ok := s.sink.(sliceSink); ok {
- // add directly to sink, we'll apply skip/limits after sorting
- return false, s.sink.add(itm)
- }
-
- s.list = append(s.list, itm)
-
- return false, nil
-}
-
-func (s *sorter) add(itm *item) (stop bool, err error) {
- if s.limit == 0 {
- return true, nil
- }
-
- if s.skip > 0 {
- s.skip--
- return false, nil
- }
-
- if s.limit > 0 {
- s.limit--
- }
-
- err = s.sink.add(itm)
-
- return s.limit == 0, err
-}
-
-func (s *sorter) compareValue(left reflect.Value, right reflect.Value) int {
- if !left.IsValid() || !right.IsValid() {
- if left.IsValid() {
- return 1
- }
- return -1
- }
-
- switch left.Kind() {
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- l, r := left.Int(), right.Int()
- if l < r {
- return -1
- }
- if l > r {
- return 1
- }
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
- l, r := left.Uint(), right.Uint()
- if l < r {
- return -1
- }
- if l > r {
- return 1
- }
- case reflect.Float32, reflect.Float64:
- l, r := left.Float(), right.Float()
- if l < r {
- return -1
- }
- if l > r {
- return 1
- }
- case reflect.String:
- l, r := left.String(), right.String()
- if l < r {
- return -1
- }
- if l > r {
- return 1
- }
- case reflect.Struct:
- if lt, lok := left.Interface().(time.Time); lok {
- if rt, rok := right.Interface().(time.Time); rok {
- if lok && rok {
- if lt.Before(rt) {
- return -1
- } else {
- return 1
- }
- }
- }
- }
- default:
- rawLeft, err := toBytes(left.Interface(), s.node.Codec())
- if err != nil {
- return -1
- }
- rawRight, err := toBytes(right.Interface(), s.node.Codec())
- if err != nil {
- return 1
- }
-
- l, r := string(rawLeft), string(rawRight)
- if l < r {
- return -1
- }
- if l > r {
- return 1
- }
- }
-
- return 0
-}
-
-func (s *sorter) less(leftElem reflect.Value, rightElem reflect.Value) bool {
- for _, orderBy := range s.orderBy {
- leftField := reflect.Indirect(leftElem).FieldByName(orderBy)
- if !leftField.IsValid() {
- s.err <- ErrNotFound
- return false
- }
- rightField := reflect.Indirect(rightElem).FieldByName(orderBy)
- if !rightField.IsValid() {
- s.err <- ErrNotFound
- return false
- }
-
- direction := 1
- if s.reverse {
- direction = -1
- }
-
- switch s.compareValue(leftField, rightField) * direction {
- case -1:
- return true
- case 1:
- return false
- default:
- continue
- }
- }
-
- return false
-}
-
-func (s *sorter) flush() error {
- if len(s.orderBy) == 0 {
- return s.sink.flush()
- }
-
- go func() {
- sort.Sort(s)
- close(s.err)
- }()
- err := <-s.err
- close(s.done)
-
- if err != nil {
- return err
- }
-
- if ssink, ok := s.sink.(sliceSink); ok {
- if !ssink.slice().IsValid() {
- return s.sink.flush()
- }
- if s.skip >= ssink.slice().Len() {
- ssink.reset()
- return s.sink.flush()
- }
- leftBound := s.skip
- if leftBound < 0 {
- leftBound = 0
- }
- limit := s.limit
- if s.limit < 0 {
- limit = 0
- }
-
- rightBound := leftBound + limit
- if rightBound > ssink.slice().Len() || rightBound == leftBound {
- rightBound = ssink.slice().Len()
- }
- ssink.setSlice(ssink.slice().Slice(leftBound, rightBound))
- return s.sink.flush()
- }
-
- for _, itm := range s.list {
- if itm == nil {
- break
- }
- stop, err := s.add(itm)
- if err != nil {
- return err
- }
- if stop {
- break
- }
- }
-
- return s.sink.flush()
-}
-
-func (s *sorter) Len() int {
- // skip if we encountered an earlier error
- select {
- case <-s.done:
- return 0
- default:
- }
- if ssink, ok := s.sink.(sliceSink); ok {
- return ssink.slice().Len()
- }
- return len(s.list)
-
-}
-
-func (s *sorter) Less(i, j int) bool {
- // skip if we encountered an earlier error
- select {
- case <-s.done:
- return false
- default:
- }
-
- if ssink, ok := s.sink.(sliceSink); ok {
- return s.less(ssink.slice().Index(i), ssink.slice().Index(j))
- }
- return s.less(*s.list[i].value, *s.list[j].value)
-}
-
-type sink interface {
- bucketName() string
- flush() error
- add(*item) error
- readOnly() bool
-}
-
-type reflectSink interface {
- elem() reflect.Value
-}
-
-type sliceSink interface {
- slice() reflect.Value
- setSlice(reflect.Value)
- reset()
-}
-
-func newListSink(node Node, to interface{}) (*listSink, error) {
- ref := reflect.ValueOf(to)
-
- if ref.Kind() != reflect.Ptr || reflect.Indirect(ref).Kind() != reflect.Slice {
- return nil, ErrSlicePtrNeeded
- }
-
- sliceType := reflect.Indirect(ref).Type()
- elemType := sliceType.Elem()
-
- if elemType.Kind() == reflect.Ptr {
- elemType = elemType.Elem()
- }
-
- if elemType.Name() == "" {
- return nil, ErrNoName
- }
-
- return &listSink{
- node: node,
- ref: ref,
- isPtr: sliceType.Elem().Kind() == reflect.Ptr,
- elemType: elemType,
- name: elemType.Name(),
- results: reflect.MakeSlice(reflect.Indirect(ref).Type(), 0, 0),
- }, nil
-}
-
-type listSink struct {
- node Node
- ref reflect.Value
- results reflect.Value
- elemType reflect.Type
- name string
- isPtr bool
- idx int
-}
-
-func (l *listSink) slice() reflect.Value {
- return l.results
-}
-
-func (l *listSink) setSlice(s reflect.Value) {
- l.results = s
-}
-
-func (l *listSink) reset() {
- l.results = reflect.MakeSlice(reflect.Indirect(l.ref).Type(), 0, 0)
-}
-
-func (l *listSink) elem() reflect.Value {
- if l.results.IsValid() && l.idx < l.results.Len() {
- return l.results.Index(l.idx).Addr()
- }
- return reflect.New(l.elemType)
-}
-
-func (l *listSink) bucketName() string {
- return l.name
-}
-
-func (l *listSink) add(i *item) error {
- if l.idx == l.results.Len() {
- if l.isPtr {
- l.results = reflect.Append(l.results, *i.value)
- } else {
- l.results = reflect.Append(l.results, reflect.Indirect(*i.value))
- }
- }
-
- l.idx++
-
- return nil
-}
-
-func (l *listSink) flush() error {
- if l.results.IsValid() && l.results.Len() > 0 {
- reflect.Indirect(l.ref).Set(l.results)
- return nil
- }
-
- return ErrNotFound
-}
-
-func (l *listSink) readOnly() bool {
- return true
-}
-
-func newFirstSink(node Node, to interface{}) (*firstSink, error) {
- ref := reflect.ValueOf(to)
-
- if !ref.IsValid() || ref.Kind() != reflect.Ptr || ref.Elem().Kind() != reflect.Struct {
- return nil, ErrStructPtrNeeded
- }
-
- return &firstSink{
- node: node,
- ref: ref,
- }, nil
-}
-
-type firstSink struct {
- node Node
- ref reflect.Value
- found bool
-}
-
-func (f *firstSink) elem() reflect.Value {
- return reflect.New(reflect.Indirect(f.ref).Type())
-}
-
-func (f *firstSink) bucketName() string {
- return reflect.Indirect(f.ref).Type().Name()
-}
-
-func (f *firstSink) add(i *item) error {
- reflect.Indirect(f.ref).Set(i.value.Elem())
- f.found = true
- return nil
-}
-
-func (f *firstSink) flush() error {
- if !f.found {
- return ErrNotFound
- }
-
- return nil
-}
-
-func (f *firstSink) readOnly() bool {
- return true
-}
-
-func newDeleteSink(node Node, kind interface{}) (*deleteSink, error) {
- ref := reflect.ValueOf(kind)
-
- if !ref.IsValid() || ref.Kind() != reflect.Ptr || ref.Elem().Kind() != reflect.Struct {
- return nil, ErrStructPtrNeeded
- }
-
- return &deleteSink{
- node: node,
- ref: ref,
- }, nil
-}
-
-type deleteSink struct {
- node Node
- ref reflect.Value
- removed int
-}
-
-func (d *deleteSink) elem() reflect.Value {
- return reflect.New(reflect.Indirect(d.ref).Type())
-}
-
-func (d *deleteSink) bucketName() string {
- return reflect.Indirect(d.ref).Type().Name()
-}
-
-func (d *deleteSink) add(i *item) error {
- info, err := extract(&d.ref)
- if err != nil {
- return err
- }
-
- for fieldName, fieldCfg := range info.Fields {
- if fieldCfg.Index == "" {
- continue
- }
- idx, err := getIndex(i.bucket, fieldCfg.Index, fieldName)
- if err != nil {
- return err
- }
-
- err = idx.RemoveID(i.k)
- if err != nil {
- if err == index.ErrNotFound {
- return ErrNotFound
- }
- return err
- }
- }
-
- d.removed++
- return i.bucket.Delete(i.k)
-}
-
-func (d *deleteSink) flush() error {
- if d.removed == 0 {
- return ErrNotFound
- }
-
- return nil
-}
-
-func (d *deleteSink) readOnly() bool {
- return false
-}
-
-func newCountSink(node Node, kind interface{}) (*countSink, error) {
- ref := reflect.ValueOf(kind)
-
- if !ref.IsValid() || ref.Kind() != reflect.Ptr || ref.Elem().Kind() != reflect.Struct {
- return nil, ErrStructPtrNeeded
- }
-
- return &countSink{
- node: node,
- ref: ref,
- }, nil
-}
-
-type countSink struct {
- node Node
- ref reflect.Value
- counter int
-}
-
-func (c *countSink) elem() reflect.Value {
- return reflect.New(reflect.Indirect(c.ref).Type())
-}
-
-func (c *countSink) bucketName() string {
- return reflect.Indirect(c.ref).Type().Name()
-}
-
-func (c *countSink) add(i *item) error {
- c.counter++
- return nil
-}
-
-func (c *countSink) flush() error {
- return nil
-}
-
-func (c *countSink) readOnly() bool {
- return true
-}
-
-func newRawSink() *rawSink {
- return &rawSink{}
-}
-
-type rawSink struct {
- results [][]byte
- execFn func([]byte, []byte) error
-}
-
-func (r *rawSink) add(i *item) error {
- if r.execFn != nil {
- err := r.execFn(i.k, i.v)
- if err != nil {
- return err
- }
- } else {
- r.results = append(r.results, i.v)
- }
-
- return nil
-}
-
-func (r *rawSink) bucketName() string {
- return ""
-}
-
-func (r *rawSink) flush() error {
- return nil
-}
-
-func (r *rawSink) readOnly() bool {
- return true
-}
-
-func newEachSink(to interface{}) (*eachSink, error) {
- ref := reflect.ValueOf(to)
-
- if !ref.IsValid() || ref.Kind() != reflect.Ptr || ref.Elem().Kind() != reflect.Struct {
- return nil, ErrStructPtrNeeded
- }
-
- return &eachSink{
- ref: ref,
- }, nil
-}
-
-type eachSink struct {
- ref reflect.Value
- execFn func(interface{}) error
-}
-
-func (e *eachSink) elem() reflect.Value {
- return reflect.New(reflect.Indirect(e.ref).Type())
-}
-
-func (e *eachSink) bucketName() string {
- return reflect.Indirect(e.ref).Type().Name()
-}
-
-func (e *eachSink) add(i *item) error {
- return e.execFn(i.value.Interface())
-}
-
-func (e *eachSink) flush() error {
- return nil
-}
-
-func (e *eachSink) readOnly() bool {
- return true
-}
diff --git a/vendor/github.com/asdine/storm/v3/sink_sorter_swap.go b/vendor/github.com/asdine/storm/v3/sink_sorter_swap.go
deleted file mode 100644
index 43af7cdc..00000000
--- a/vendor/github.com/asdine/storm/v3/sink_sorter_swap.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// +build !go1.8
-
-package storm
-
-import "reflect"
-
-func (s *sorter) Swap(i, j int) {
- // skip if we encountered an earlier error
- select {
- case <-s.done:
- return
- default:
- }
-
- if ssink, ok := s.sink.(sliceSink); ok {
- x, y := ssink.slice().Index(i).Interface(), ssink.slice().Index(j).Interface()
- ssink.slice().Index(i).Set(reflect.ValueOf(y))
- ssink.slice().Index(j).Set(reflect.ValueOf(x))
- } else {
- s.list[i], s.list[j] = s.list[j], s.list[i]
- }
-}
diff --git a/vendor/github.com/asdine/storm/v3/sink_sorter_swap_go1.8.go b/vendor/github.com/asdine/storm/v3/sink_sorter_swap_go1.8.go
deleted file mode 100644
index 21bf7ae9..00000000
--- a/vendor/github.com/asdine/storm/v3/sink_sorter_swap_go1.8.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// +build go1.8
-
-package storm
-
-import "reflect"
-
-func (s *sorter) Swap(i, j int) {
- // skip if we encountered an earlier error
- select {
- case <-s.done:
- return
- default:
- }
-
- if ssink, ok := s.sink.(sliceSink); ok {
- reflect.Swapper(ssink.slice().Interface())(i, j)
- } else {
- s.list[i], s.list[j] = s.list[j], s.list[i]
- }
-}
diff --git a/vendor/github.com/asdine/storm/v3/store.go b/vendor/github.com/asdine/storm/v3/store.go
deleted file mode 100644
index f22651f4..00000000
--- a/vendor/github.com/asdine/storm/v3/store.go
+++ /dev/null
@@ -1,425 +0,0 @@
-package storm
-
-import (
- "bytes"
- "reflect"
-
- "github.com/asdine/storm/v3/index"
- "github.com/asdine/storm/v3/q"
- bolt "go.etcd.io/bbolt"
-)
-
-// TypeStore stores user defined types in BoltDB.
-type TypeStore interface {
- Finder
- // Init creates the indexes and buckets for a given structure
- Init(data interface{}) error
-
- // ReIndex rebuilds all the indexes of a bucket
- ReIndex(data interface{}) error
-
- // Save a structure
- Save(data interface{}) error
-
- // Update a structure
- Update(data interface{}) error
-
- // UpdateField updates a single field
- UpdateField(data interface{}, fieldName string, value interface{}) error
-
- // Drop a bucket
- Drop(data interface{}) error
-
- // DeleteStruct deletes a structure from the associated bucket
- DeleteStruct(data interface{}) error
-}
-
-// Init creates the indexes and buckets for a given structure
-func (n *node) Init(data interface{}) error {
- v := reflect.ValueOf(data)
- cfg, err := extract(&v)
- if err != nil {
- return err
- }
-
- return n.readWriteTx(func(tx *bolt.Tx) error {
- return n.init(tx, cfg)
- })
-}
-
-func (n *node) init(tx *bolt.Tx, cfg *structConfig) error {
- bucket, err := n.CreateBucketIfNotExists(tx, cfg.Name)
- if err != nil {
- return err
- }
-
- // save node configuration in the bucket
- _, err = newMeta(bucket, n)
- if err != nil {
- return err
- }
-
- for fieldName, fieldCfg := range cfg.Fields {
- if fieldCfg.Index == "" {
- continue
- }
- switch fieldCfg.Index {
- case tagUniqueIdx:
- _, err = index.NewUniqueIndex(bucket, []byte(indexPrefix+fieldName))
- case tagIdx:
- _, err = index.NewListIndex(bucket, []byte(indexPrefix+fieldName))
- default:
- err = ErrIdxNotFound
- }
-
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (n *node) ReIndex(data interface{}) error {
- ref := reflect.ValueOf(data)
-
- if !ref.IsValid() || ref.Kind() != reflect.Ptr || ref.Elem().Kind() != reflect.Struct {
- return ErrStructPtrNeeded
- }
-
- cfg, err := extract(&ref)
- if err != nil {
- return err
- }
-
- return n.readWriteTx(func(tx *bolt.Tx) error {
- return n.reIndex(tx, data, cfg)
- })
-}
-
-func (n *node) reIndex(tx *bolt.Tx, data interface{}, cfg *structConfig) error {
- root := n.WithTransaction(tx)
- nodes := root.From(cfg.Name).PrefixScan(indexPrefix)
- bucket := root.GetBucket(tx, cfg.Name)
- if bucket == nil {
- return ErrNotFound
- }
-
- for _, node := range nodes {
- buckets := node.Bucket()
- name := buckets[len(buckets)-1]
- err := bucket.DeleteBucket([]byte(name))
- if err != nil {
- return err
- }
- }
-
- total, err := root.Count(data)
- if err != nil {
- return err
- }
-
- for i := 0; i < total; i++ {
- err = root.Select(q.True()).Skip(i).First(data)
- if err != nil {
- return err
- }
-
- err = root.Update(data)
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// Save a structure
-func (n *node) Save(data interface{}) error {
- ref := reflect.ValueOf(data)
-
- if !ref.IsValid() || ref.Kind() != reflect.Ptr || ref.Elem().Kind() != reflect.Struct {
- return ErrStructPtrNeeded
- }
-
- cfg, err := extract(&ref)
- if err != nil {
- return err
- }
-
- if cfg.ID.IsZero {
- if !cfg.ID.IsInteger || !cfg.ID.Increment {
- return ErrZeroID
- }
- }
-
- return n.readWriteTx(func(tx *bolt.Tx) error {
- return n.save(tx, cfg, data, false)
- })
-}
-
-func (n *node) save(tx *bolt.Tx, cfg *structConfig, data interface{}, update bool) error {
- bucket, err := n.CreateBucketIfNotExists(tx, cfg.Name)
- if err != nil {
- return err
- }
-
- // save node configuration in the bucket
- meta, err := newMeta(bucket, n)
- if err != nil {
- return err
- }
-
- if cfg.ID.IsZero {
- err = meta.increment(cfg.ID)
- if err != nil {
- return err
- }
- }
-
- id, err := toBytes(cfg.ID.Value.Interface(), n.codec)
- if err != nil {
- return err
- }
-
- for fieldName, fieldCfg := range cfg.Fields {
- if !update && !fieldCfg.IsID && fieldCfg.Increment && fieldCfg.IsInteger && fieldCfg.IsZero {
- err = meta.increment(fieldCfg)
- if err != nil {
- return err
- }
- }
-
- if fieldCfg.Index == "" {
- continue
- }
-
- idx, err := getIndex(bucket, fieldCfg.Index, fieldName)
- if err != nil {
- return err
- }
-
- if update && fieldCfg.IsZero && !fieldCfg.ForceUpdate {
- continue
- }
-
- if fieldCfg.IsZero {
- err = idx.RemoveID(id)
- if err != nil {
- return err
- }
- continue
- }
-
- value, err := toBytes(fieldCfg.Value.Interface(), n.codec)
- if err != nil {
- return err
- }
-
- var found bool
- idsSaved, err := idx.All(value, nil)
- if err != nil {
- return err
- }
- for _, idSaved := range idsSaved {
- if bytes.Compare(idSaved, id) == 0 {
- found = true
- break
- }
- }
-
- if found {
- continue
- }
-
- err = idx.RemoveID(id)
- if err != nil {
- return err
- }
-
- err = idx.Add(value, id)
- if err != nil {
- if err == index.ErrAlreadyExists {
- return ErrAlreadyExists
- }
- return err
- }
- }
-
- raw, err := n.codec.Marshal(data)
- if err != nil {
- return err
- }
-
- return bucket.Put(id, raw)
-}
-
-// Update a structure
-func (n *node) Update(data interface{}) error {
- return n.update(data, func(ref *reflect.Value, current *reflect.Value, cfg *structConfig) error {
- numfield := ref.NumField()
- for i := 0; i < numfield; i++ {
- f := ref.Field(i)
- if ref.Type().Field(i).PkgPath != "" {
- continue
- }
- zero := reflect.Zero(f.Type()).Interface()
- actual := f.Interface()
- if !reflect.DeepEqual(actual, zero) {
- cf := current.Field(i)
- cf.Set(f)
- idxInfo, ok := cfg.Fields[ref.Type().Field(i).Name]
- if ok {
- idxInfo.Value = &cf
- }
- }
- }
- return nil
- })
-}
-
-// UpdateField updates a single field
-func (n *node) UpdateField(data interface{}, fieldName string, value interface{}) error {
- return n.update(data, func(ref *reflect.Value, current *reflect.Value, cfg *structConfig) error {
- f := current.FieldByName(fieldName)
- if !f.IsValid() {
- return ErrNotFound
- }
- tf, _ := current.Type().FieldByName(fieldName)
- if tf.PkgPath != "" {
- return ErrNotFound
- }
- v := reflect.ValueOf(value)
- if v.Kind() != f.Kind() {
- return ErrIncompatibleValue
- }
- f.Set(v)
- idxInfo, ok := cfg.Fields[fieldName]
- if ok {
- idxInfo.Value = &f
- idxInfo.IsZero = isZero(idxInfo.Value)
- idxInfo.ForceUpdate = true
- }
- return nil
- })
-}
-
-func (n *node) update(data interface{}, fn func(*reflect.Value, *reflect.Value, *structConfig) error) error {
- ref := reflect.ValueOf(data)
- if !ref.IsValid() || ref.Kind() != reflect.Ptr || ref.Elem().Kind() != reflect.Struct {
- return ErrStructPtrNeeded
- }
-
- cfg, err := extract(&ref)
- if err != nil {
- return err
- }
-
- if cfg.ID.IsZero {
- return ErrNoID
- }
-
- current := reflect.New(reflect.Indirect(ref).Type())
-
- return n.readWriteTx(func(tx *bolt.Tx) error {
- err = n.WithTransaction(tx).One(cfg.ID.Name, cfg.ID.Value.Interface(), current.Interface())
- if err != nil {
- return err
- }
-
- ref := reflect.ValueOf(data).Elem()
- cref := current.Elem()
- err = fn(&ref, &cref, cfg)
- if err != nil {
- return err
- }
-
- return n.save(tx, cfg, current.Interface(), true)
- })
-}
-
-// Drop a bucket
-func (n *node) Drop(data interface{}) error {
- var bucketName string
-
- v := reflect.ValueOf(data)
- if v.Kind() != reflect.String {
- info, err := extract(&v)
- if err != nil {
- return err
- }
-
- bucketName = info.Name
- } else {
- bucketName = v.Interface().(string)
- }
-
- return n.readWriteTx(func(tx *bolt.Tx) error {
- return n.drop(tx, bucketName)
- })
-}
-
-func (n *node) drop(tx *bolt.Tx, bucketName string) error {
- bucket := n.GetBucket(tx)
- if bucket == nil {
- return tx.DeleteBucket([]byte(bucketName))
- }
-
- return bucket.DeleteBucket([]byte(bucketName))
-}
-
-// DeleteStruct deletes a structure from the associated bucket
-func (n *node) DeleteStruct(data interface{}) error {
- ref := reflect.ValueOf(data)
-
- if !ref.IsValid() || ref.Kind() != reflect.Ptr || ref.Elem().Kind() != reflect.Struct {
- return ErrStructPtrNeeded
- }
-
- cfg, err := extract(&ref)
- if err != nil {
- return err
- }
-
- id, err := toBytes(cfg.ID.Value.Interface(), n.codec)
- if err != nil {
- return err
- }
-
- return n.readWriteTx(func(tx *bolt.Tx) error {
- return n.deleteStruct(tx, cfg, id)
- })
-}
-
-func (n *node) deleteStruct(tx *bolt.Tx, cfg *structConfig, id []byte) error {
- bucket := n.GetBucket(tx, cfg.Name)
- if bucket == nil {
- return ErrNotFound
- }
-
- for fieldName, fieldCfg := range cfg.Fields {
- if fieldCfg.Index == "" {
- continue
- }
-
- idx, err := getIndex(bucket, fieldCfg.Index, fieldName)
- if err != nil {
- return err
- }
-
- err = idx.RemoveID(id)
- if err != nil {
- if err == index.ErrNotFound {
- return ErrNotFound
- }
- return err
- }
- }
-
- raw := bucket.Get(id)
- if raw == nil {
- return ErrNotFound
- }
-
- return bucket.Delete(id)
-}
diff --git a/vendor/github.com/asdine/storm/v3/storm.go b/vendor/github.com/asdine/storm/v3/storm.go
deleted file mode 100644
index 88f16ea1..00000000
--- a/vendor/github.com/asdine/storm/v3/storm.go
+++ /dev/null
@@ -1,142 +0,0 @@
-package storm
-
-import (
- "bytes"
- "encoding/binary"
- "time"
-
- "github.com/asdine/storm/v3/codec"
- "github.com/asdine/storm/v3/codec/json"
- bolt "go.etcd.io/bbolt"
-)
-
-const (
- dbinfo = "__storm_db"
- metadataBucket = "__storm_metadata"
-)
-
-// Defaults to json
-var defaultCodec = json.Codec
-
-// Open opens a database at the given path with optional Storm options.
-func Open(path string, stormOptions ...func(*Options) error) (*DB, error) {
- var err error
-
- var opts Options
- for _, option := range stormOptions {
- if err = option(&opts); err != nil {
- return nil, err
- }
- }
-
- s := DB{
- Bolt: opts.bolt,
- }
-
- n := node{
- s: &s,
- codec: opts.codec,
- batchMode: opts.batchMode,
- rootBucket: opts.rootBucket,
- }
-
- if n.codec == nil {
- n.codec = defaultCodec
- }
-
- if opts.boltMode == 0 {
- opts.boltMode = 0600
- }
-
- if opts.boltOptions == nil {
- opts.boltOptions = &bolt.Options{Timeout: 1 * time.Second}
- }
-
- s.Node = &n
-
- // skip if UseDB option is used
- if s.Bolt == nil {
- s.Bolt, err = bolt.Open(path, opts.boltMode, opts.boltOptions)
- if err != nil {
- return nil, err
- }
- }
-
- err = s.checkVersion()
- if err != nil {
- return nil, err
- }
-
- return &s, nil
-}
-
-// DB is the wrapper around BoltDB. It contains an instance of BoltDB and uses it to perform all the
-// needed operations
-type DB struct {
- // The root node that points to the root bucket.
- Node
-
- // Bolt is still easily accessible
- Bolt *bolt.DB
-}
-
-// Close the database
-func (s *DB) Close() error {
- return s.Bolt.Close()
-}
-
-func (s *DB) checkVersion() error {
- var v string
- err := s.Get(dbinfo, "version", &v)
- if err != nil && err != ErrNotFound {
- return err
- }
-
- // for now, we only set the current version if it doesn't exist.
- // v1 and v2 database files are compatible.
- if v == "" {
- return s.Set(dbinfo, "version", Version)
- }
-
- return nil
-}
-
-// toBytes turns an interface into a slice of bytes
-func toBytes(key interface{}, codec codec.MarshalUnmarshaler) ([]byte, error) {
- if key == nil {
- return nil, nil
- }
- switch t := key.(type) {
- case []byte:
- return t, nil
- case string:
- return []byte(t), nil
- case int:
- return numbertob(int64(t))
- case uint:
- return numbertob(uint64(t))
- case int8, int16, int32, int64, uint8, uint16, uint32, uint64:
- return numbertob(t)
- default:
- return codec.Marshal(key)
- }
-}
-
-func numbertob(v interface{}) ([]byte, error) {
- var buf bytes.Buffer
- err := binary.Write(&buf, binary.BigEndian, v)
- if err != nil {
- return nil, err
- }
- return buf.Bytes(), nil
-}
-
-func numberfromb(raw []byte) (int64, error) {
- r := bytes.NewReader(raw)
- var to int64
- err := binary.Read(r, binary.BigEndian, &to)
- if err != nil {
- return 0, err
- }
- return to, nil
-}
diff --git a/vendor/github.com/asdine/storm/v3/transaction.go b/vendor/github.com/asdine/storm/v3/transaction.go
deleted file mode 100644
index c1f81838..00000000
--- a/vendor/github.com/asdine/storm/v3/transaction.go
+++ /dev/null
@@ -1,52 +0,0 @@
-package storm
-
-import bolt "go.etcd.io/bbolt"
-
-// Tx is a transaction.
-type Tx interface {
- // Commit writes all changes to disk.
- Commit() error
-
- // Rollback closes the transaction and ignores all previous updates.
- Rollback() error
-}
-
-// Begin starts a new transaction.
-func (n node) Begin(writable bool) (Node, error) {
- var err error
-
- n.tx, err = n.s.Bolt.Begin(writable)
- if err != nil {
- return nil, err
- }
-
- return &n, nil
-}
-
-// Rollback closes the transaction and ignores all previous updates.
-func (n *node) Rollback() error {
- if n.tx == nil {
- return ErrNotInTransaction
- }
-
- err := n.tx.Rollback()
- if err == bolt.ErrTxClosed {
- return ErrNotInTransaction
- }
-
- return err
-}
-
-// Commit writes all changes to disk.
-func (n *node) Commit() error {
- if n.tx == nil {
- return ErrNotInTransaction
- }
-
- err := n.tx.Commit()
- if err == bolt.ErrTxClosed {
- return ErrNotInTransaction
- }
-
- return err
-}
diff --git a/vendor/github.com/asdine/storm/v3/version.go b/vendor/github.com/asdine/storm/v3/version.go
deleted file mode 100644
index c8e89607..00000000
--- a/vendor/github.com/asdine/storm/v3/version.go
+++ /dev/null
@@ -1,4 +0,0 @@
-package storm
-
-// Version of Storm
-const Version = "2.0.0"
diff --git a/vendor/github.com/cespare/xxhash/v2/LICENSE.txt b/vendor/github.com/cespare/xxhash/v2/LICENSE.txt
deleted file mode 100644
index 24b53065..00000000
--- a/vendor/github.com/cespare/xxhash/v2/LICENSE.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-Copyright (c) 2016 Caleb Spare
-
-MIT License
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/cespare/xxhash/v2/README.md b/vendor/github.com/cespare/xxhash/v2/README.md
deleted file mode 100644
index 792b4a60..00000000
--- a/vendor/github.com/cespare/xxhash/v2/README.md
+++ /dev/null
@@ -1,69 +0,0 @@
-# xxhash
-
-[![Go Reference](https://pkg.go.dev/badge/github.com/cespare/xxhash/v2.svg)](https://pkg.go.dev/github.com/cespare/xxhash/v2)
-[![Test](https://github.com/cespare/xxhash/actions/workflows/test.yml/badge.svg)](https://github.com/cespare/xxhash/actions/workflows/test.yml)
-
-xxhash is a Go implementation of the 64-bit
-[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
-high-quality hashing algorithm that is much faster than anything in the Go
-standard library.
-
-This package provides a straightforward API:
-
-```
-func Sum64(b []byte) uint64
-func Sum64String(s string) uint64
-type Digest struct{ ... }
- func New() *Digest
-```
-
-The `Digest` type implements hash.Hash64. Its key methods are:
-
-```
-func (*Digest) Write([]byte) (int, error)
-func (*Digest) WriteString(string) (int, error)
-func (*Digest) Sum64() uint64
-```
-
-This implementation provides a fast pure-Go implementation and an even faster
-assembly implementation for amd64.
-
-## Compatibility
-
-This package is in a module and the latest code is in version 2 of the module.
-You need a version of Go with at least "minimal module compatibility" to use
-github.com/cespare/xxhash/v2:
-
-* 1.9.7+ for Go 1.9
-* 1.10.3+ for Go 1.10
-* Go 1.11 or later
-
-I recommend using the latest release of Go.
-
-## Benchmarks
-
-Here are some quick benchmarks comparing the pure-Go and assembly
-implementations of Sum64.
-
-| input size | purego | asm |
-| --- | --- | --- |
-| 5 B | 979.66 MB/s | 1291.17 MB/s |
-| 100 B | 7475.26 MB/s | 7973.40 MB/s |
-| 4 KB | 17573.46 MB/s | 17602.65 MB/s |
-| 10 MB | 17131.46 MB/s | 17142.16 MB/s |
-
-These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using
-the following commands under Go 1.11.2:
-
-```
-$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes'
-$ go test -benchtime 10s -bench '/xxhash,direct,bytes'
-```
-
-## Projects using this package
-
-- [InfluxDB](https://github.com/influxdata/influxdb)
-- [Prometheus](https://github.com/prometheus/prometheus)
-- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics)
-- [FreeCache](https://github.com/coocood/freecache)
-- [FastCache](https://github.com/VictoriaMetrics/fastcache)
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash.go b/vendor/github.com/cespare/xxhash/v2/xxhash.go
deleted file mode 100644
index 15c835d5..00000000
--- a/vendor/github.com/cespare/xxhash/v2/xxhash.go
+++ /dev/null
@@ -1,235 +0,0 @@
-// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described
-// at http://cyan4973.github.io/xxHash/.
-package xxhash
-
-import (
- "encoding/binary"
- "errors"
- "math/bits"
-)
-
-const (
- prime1 uint64 = 11400714785074694791
- prime2 uint64 = 14029467366897019727
- prime3 uint64 = 1609587929392839161
- prime4 uint64 = 9650029242287828579
- prime5 uint64 = 2870177450012600261
-)
-
-// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where
-// possible in the Go code is worth a small (but measurable) performance boost
-// by avoiding some MOVQs. Vars are needed for the asm and also are useful for
-// convenience in the Go code in a few places where we need to intentionally
-// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the
-// result overflows a uint64).
-var (
- prime1v = prime1
- prime2v = prime2
- prime3v = prime3
- prime4v = prime4
- prime5v = prime5
-)
-
-// Digest implements hash.Hash64.
-type Digest struct {
- v1 uint64
- v2 uint64
- v3 uint64
- v4 uint64
- total uint64
- mem [32]byte
- n int // how much of mem is used
-}
-
-// New creates a new Digest that computes the 64-bit xxHash algorithm.
-func New() *Digest {
- var d Digest
- d.Reset()
- return &d
-}
-
-// Reset clears the Digest's state so that it can be reused.
-func (d *Digest) Reset() {
- d.v1 = prime1v + prime2
- d.v2 = prime2
- d.v3 = 0
- d.v4 = -prime1v
- d.total = 0
- d.n = 0
-}
-
-// Size always returns 8 bytes.
-func (d *Digest) Size() int { return 8 }
-
-// BlockSize always returns 32 bytes.
-func (d *Digest) BlockSize() int { return 32 }
-
-// Write adds more data to d. It always returns len(b), nil.
-func (d *Digest) Write(b []byte) (n int, err error) {
- n = len(b)
- d.total += uint64(n)
-
- if d.n+n < 32 {
- // This new data doesn't even fill the current block.
- copy(d.mem[d.n:], b)
- d.n += n
- return
- }
-
- if d.n > 0 {
- // Finish off the partial block.
- copy(d.mem[d.n:], b)
- d.v1 = round(d.v1, u64(d.mem[0:8]))
- d.v2 = round(d.v2, u64(d.mem[8:16]))
- d.v3 = round(d.v3, u64(d.mem[16:24]))
- d.v4 = round(d.v4, u64(d.mem[24:32]))
- b = b[32-d.n:]
- d.n = 0
- }
-
- if len(b) >= 32 {
- // One or more full blocks left.
- nw := writeBlocks(d, b)
- b = b[nw:]
- }
-
- // Store any remaining partial block.
- copy(d.mem[:], b)
- d.n = len(b)
-
- return
-}
-
-// Sum appends the current hash to b and returns the resulting slice.
-func (d *Digest) Sum(b []byte) []byte {
- s := d.Sum64()
- return append(
- b,
- byte(s>>56),
- byte(s>>48),
- byte(s>>40),
- byte(s>>32),
- byte(s>>24),
- byte(s>>16),
- byte(s>>8),
- byte(s),
- )
-}
-
-// Sum64 returns the current hash.
-func (d *Digest) Sum64() uint64 {
- var h uint64
-
- if d.total >= 32 {
- v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
- h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
- h = mergeRound(h, v1)
- h = mergeRound(h, v2)
- h = mergeRound(h, v3)
- h = mergeRound(h, v4)
- } else {
- h = d.v3 + prime5
- }
-
- h += d.total
-
- i, end := 0, d.n
- for ; i+8 <= end; i += 8 {
- k1 := round(0, u64(d.mem[i:i+8]))
- h ^= k1
- h = rol27(h)*prime1 + prime4
- }
- if i+4 <= end {
- h ^= uint64(u32(d.mem[i:i+4])) * prime1
- h = rol23(h)*prime2 + prime3
- i += 4
- }
- for i < end {
- h ^= uint64(d.mem[i]) * prime5
- h = rol11(h) * prime1
- i++
- }
-
- h ^= h >> 33
- h *= prime2
- h ^= h >> 29
- h *= prime3
- h ^= h >> 32
-
- return h
-}
-
-const (
- magic = "xxh\x06"
- marshaledSize = len(magic) + 8*5 + 32
-)
-
-// MarshalBinary implements the encoding.BinaryMarshaler interface.
-func (d *Digest) MarshalBinary() ([]byte, error) {
- b := make([]byte, 0, marshaledSize)
- b = append(b, magic...)
- b = appendUint64(b, d.v1)
- b = appendUint64(b, d.v2)
- b = appendUint64(b, d.v3)
- b = appendUint64(b, d.v4)
- b = appendUint64(b, d.total)
- b = append(b, d.mem[:d.n]...)
- b = b[:len(b)+len(d.mem)-d.n]
- return b, nil
-}
-
-// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
-func (d *Digest) UnmarshalBinary(b []byte) error {
- if len(b) < len(magic) || string(b[:len(magic)]) != magic {
- return errors.New("xxhash: invalid hash state identifier")
- }
- if len(b) != marshaledSize {
- return errors.New("xxhash: invalid hash state size")
- }
- b = b[len(magic):]
- b, d.v1 = consumeUint64(b)
- b, d.v2 = consumeUint64(b)
- b, d.v3 = consumeUint64(b)
- b, d.v4 = consumeUint64(b)
- b, d.total = consumeUint64(b)
- copy(d.mem[:], b)
- d.n = int(d.total % uint64(len(d.mem)))
- return nil
-}
-
-func appendUint64(b []byte, x uint64) []byte {
- var a [8]byte
- binary.LittleEndian.PutUint64(a[:], x)
- return append(b, a[:]...)
-}
-
-func consumeUint64(b []byte) ([]byte, uint64) {
- x := u64(b)
- return b[8:], x
-}
-
-func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) }
-func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) }
-
-func round(acc, input uint64) uint64 {
- acc += input * prime2
- acc = rol31(acc)
- acc *= prime1
- return acc
-}
-
-func mergeRound(acc, val uint64) uint64 {
- val = round(0, val)
- acc ^= val
- acc = acc*prime1 + prime4
- return acc
-}
-
-func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) }
-func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) }
-func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) }
-func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) }
-func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) }
-func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) }
-func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) }
-func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) }
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go
deleted file mode 100644
index ad14b807..00000000
--- a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// +build !appengine
-// +build gc
-// +build !purego
-
-package xxhash
-
-// Sum64 computes the 64-bit xxHash digest of b.
-//
-//go:noescape
-func Sum64(b []byte) uint64
-
-//go:noescape
-func writeBlocks(d *Digest, b []byte) int
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
deleted file mode 100644
index be8db5bf..00000000
--- a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
+++ /dev/null
@@ -1,215 +0,0 @@
-// +build !appengine
-// +build gc
-// +build !purego
-
-#include "textflag.h"
-
-// Register allocation:
-// AX h
-// SI pointer to advance through b
-// DX n
-// BX loop end
-// R8 v1, k1
-// R9 v2
-// R10 v3
-// R11 v4
-// R12 tmp
-// R13 prime1v
-// R14 prime2v
-// DI prime4v
-
-// round reads from and advances the buffer pointer in SI.
-// It assumes that R13 has prime1v and R14 has prime2v.
-#define round(r) \
- MOVQ (SI), R12 \
- ADDQ $8, SI \
- IMULQ R14, R12 \
- ADDQ R12, r \
- ROLQ $31, r \
- IMULQ R13, r
-
-// mergeRound applies a merge round on the two registers acc and val.
-// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v.
-#define mergeRound(acc, val) \
- IMULQ R14, val \
- ROLQ $31, val \
- IMULQ R13, val \
- XORQ val, acc \
- IMULQ R13, acc \
- ADDQ DI, acc
-
-// func Sum64(b []byte) uint64
-TEXT ·Sum64(SB), NOSPLIT, $0-32
- // Load fixed primes.
- MOVQ ·prime1v(SB), R13
- MOVQ ·prime2v(SB), R14
- MOVQ ·prime4v(SB), DI
-
- // Load slice.
- MOVQ b_base+0(FP), SI
- MOVQ b_len+8(FP), DX
- LEAQ (SI)(DX*1), BX
-
- // The first loop limit will be len(b)-32.
- SUBQ $32, BX
-
- // Check whether we have at least one block.
- CMPQ DX, $32
- JLT noBlocks
-
- // Set up initial state (v1, v2, v3, v4).
- MOVQ R13, R8
- ADDQ R14, R8
- MOVQ R14, R9
- XORQ R10, R10
- XORQ R11, R11
- SUBQ R13, R11
-
- // Loop until SI > BX.
-blockLoop:
- round(R8)
- round(R9)
- round(R10)
- round(R11)
-
- CMPQ SI, BX
- JLE blockLoop
-
- MOVQ R8, AX
- ROLQ $1, AX
- MOVQ R9, R12
- ROLQ $7, R12
- ADDQ R12, AX
- MOVQ R10, R12
- ROLQ $12, R12
- ADDQ R12, AX
- MOVQ R11, R12
- ROLQ $18, R12
- ADDQ R12, AX
-
- mergeRound(AX, R8)
- mergeRound(AX, R9)
- mergeRound(AX, R10)
- mergeRound(AX, R11)
-
- JMP afterBlocks
-
-noBlocks:
- MOVQ ·prime5v(SB), AX
-
-afterBlocks:
- ADDQ DX, AX
-
- // Right now BX has len(b)-32, and we want to loop until SI > len(b)-8.
- ADDQ $24, BX
-
- CMPQ SI, BX
- JG fourByte
-
-wordLoop:
- // Calculate k1.
- MOVQ (SI), R8
- ADDQ $8, SI
- IMULQ R14, R8
- ROLQ $31, R8
- IMULQ R13, R8
-
- XORQ R8, AX
- ROLQ $27, AX
- IMULQ R13, AX
- ADDQ DI, AX
-
- CMPQ SI, BX
- JLE wordLoop
-
-fourByte:
- ADDQ $4, BX
- CMPQ SI, BX
- JG singles
-
- MOVL (SI), R8
- ADDQ $4, SI
- IMULQ R13, R8
- XORQ R8, AX
-
- ROLQ $23, AX
- IMULQ R14, AX
- ADDQ ·prime3v(SB), AX
-
-singles:
- ADDQ $4, BX
- CMPQ SI, BX
- JGE finalize
-
-singlesLoop:
- MOVBQZX (SI), R12
- ADDQ $1, SI
- IMULQ ·prime5v(SB), R12
- XORQ R12, AX
-
- ROLQ $11, AX
- IMULQ R13, AX
-
- CMPQ SI, BX
- JL singlesLoop
-
-finalize:
- MOVQ AX, R12
- SHRQ $33, R12
- XORQ R12, AX
- IMULQ R14, AX
- MOVQ AX, R12
- SHRQ $29, R12
- XORQ R12, AX
- IMULQ ·prime3v(SB), AX
- MOVQ AX, R12
- SHRQ $32, R12
- XORQ R12, AX
-
- MOVQ AX, ret+24(FP)
- RET
-
-// writeBlocks uses the same registers as above except that it uses AX to store
-// the d pointer.
-
-// func writeBlocks(d *Digest, b []byte) int
-TEXT ·writeBlocks(SB), NOSPLIT, $0-40
- // Load fixed primes needed for round.
- MOVQ ·prime1v(SB), R13
- MOVQ ·prime2v(SB), R14
-
- // Load slice.
- MOVQ b_base+8(FP), SI
- MOVQ b_len+16(FP), DX
- LEAQ (SI)(DX*1), BX
- SUBQ $32, BX
-
- // Load vN from d.
- MOVQ d+0(FP), AX
- MOVQ 0(AX), R8 // v1
- MOVQ 8(AX), R9 // v2
- MOVQ 16(AX), R10 // v3
- MOVQ 24(AX), R11 // v4
-
- // We don't need to check the loop condition here; this function is
- // always called with at least one block of data to process.
-blockLoop:
- round(R8)
- round(R9)
- round(R10)
- round(R11)
-
- CMPQ SI, BX
- JLE blockLoop
-
- // Copy vN back to d.
- MOVQ R8, 0(AX)
- MOVQ R9, 8(AX)
- MOVQ R10, 16(AX)
- MOVQ R11, 24(AX)
-
- // The number of bytes written is SI minus the old base pointer.
- SUBQ b_base+8(FP), SI
- MOVQ SI, ret+32(FP)
-
- RET
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go
deleted file mode 100644
index 4a5a8216..00000000
--- a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go
+++ /dev/null
@@ -1,76 +0,0 @@
-// +build !amd64 appengine !gc purego
-
-package xxhash
-
-// Sum64 computes the 64-bit xxHash digest of b.
-func Sum64(b []byte) uint64 {
- // A simpler version would be
- // d := New()
- // d.Write(b)
- // return d.Sum64()
- // but this is faster, particularly for small inputs.
-
- n := len(b)
- var h uint64
-
- if n >= 32 {
- v1 := prime1v + prime2
- v2 := prime2
- v3 := uint64(0)
- v4 := -prime1v
- for len(b) >= 32 {
- v1 = round(v1, u64(b[0:8:len(b)]))
- v2 = round(v2, u64(b[8:16:len(b)]))
- v3 = round(v3, u64(b[16:24:len(b)]))
- v4 = round(v4, u64(b[24:32:len(b)]))
- b = b[32:len(b):len(b)]
- }
- h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
- h = mergeRound(h, v1)
- h = mergeRound(h, v2)
- h = mergeRound(h, v3)
- h = mergeRound(h, v4)
- } else {
- h = prime5
- }
-
- h += uint64(n)
-
- i, end := 0, len(b)
- for ; i+8 <= end; i += 8 {
- k1 := round(0, u64(b[i:i+8:len(b)]))
- h ^= k1
- h = rol27(h)*prime1 + prime4
- }
- if i+4 <= end {
- h ^= uint64(u32(b[i:i+4:len(b)])) * prime1
- h = rol23(h)*prime2 + prime3
- i += 4
- }
- for ; i < end; i++ {
- h ^= uint64(b[i]) * prime5
- h = rol11(h) * prime1
- }
-
- h ^= h >> 33
- h *= prime2
- h ^= h >> 29
- h *= prime3
- h ^= h >> 32
-
- return h
-}
-
-func writeBlocks(d *Digest, b []byte) int {
- v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
- n := len(b)
- for len(b) >= 32 {
- v1 = round(v1, u64(b[0:8:len(b)]))
- v2 = round(v2, u64(b[8:16:len(b)]))
- v3 = round(v3, u64(b[16:24:len(b)]))
- v4 = round(v4, u64(b[24:32:len(b)]))
- b = b[32:len(b):len(b)]
- }
- d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4
- return n - len(b)
-}
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
deleted file mode 100644
index fc9bea7a..00000000
--- a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
+++ /dev/null
@@ -1,15 +0,0 @@
-// +build appengine
-
-// This file contains the safe implementations of otherwise unsafe-using code.
-
-package xxhash
-
-// Sum64String computes the 64-bit xxHash digest of s.
-func Sum64String(s string) uint64 {
- return Sum64([]byte(s))
-}
-
-// WriteString adds more data to d. It always returns len(s), nil.
-func (d *Digest) WriteString(s string) (n int, err error) {
- return d.Write([]byte(s))
-}
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
deleted file mode 100644
index 376e0ca2..00000000
--- a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
+++ /dev/null
@@ -1,57 +0,0 @@
-// +build !appengine
-
-// This file encapsulates usage of unsafe.
-// xxhash_safe.go contains the safe implementations.
-
-package xxhash
-
-import (
- "unsafe"
-)
-
-// In the future it's possible that compiler optimizations will make these
-// XxxString functions unnecessary by realizing that calls such as
-// Sum64([]byte(s)) don't need to copy s. See https://golang.org/issue/2205.
-// If that happens, even if we keep these functions they can be replaced with
-// the trivial safe code.
-
-// NOTE: The usual way of doing an unsafe string-to-[]byte conversion is:
-//
-// var b []byte
-// bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
-// bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
-// bh.Len = len(s)
-// bh.Cap = len(s)
-//
-// Unfortunately, as of Go 1.15.3 the inliner's cost model assigns a high enough
-// weight to this sequence of expressions that any function that uses it will
-// not be inlined. Instead, the functions below use a different unsafe
-// conversion designed to minimize the inliner weight and allow both to be
-// inlined. There is also a test (TestInlining) which verifies that these are
-// inlined.
-//
-// See https://github.com/golang/go/issues/42739 for discussion.
-
-// Sum64String computes the 64-bit xxHash digest of s.
-// It may be faster than Sum64([]byte(s)) by avoiding a copy.
-func Sum64String(s string) uint64 {
- b := *(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)}))
- return Sum64(b)
-}
-
-// WriteString adds more data to d. It always returns len(s), nil.
-// It may be faster than Write([]byte(s)) by avoiding a copy.
-func (d *Digest) WriteString(s string) (n int, err error) {
- d.Write(*(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)})))
- // d.Write always returns len(s), nil.
- // Ignoring the return output and returning these fixed values buys a
- // savings of 6 in the inliner's cost model.
- return len(s), nil
-}
-
-// sliceHeader is similar to reflect.SliceHeader, but it assumes that the layout
-// of the first two words is the same as the layout of a string.
-type sliceHeader struct {
- s string
- cap int
-}
diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE
deleted file mode 100644
index bc52e96f..00000000
--- a/vendor/github.com/davecgh/go-spew/LICENSE
+++ /dev/null
@@ -1,15 +0,0 @@
-ISC License
-
-Copyright (c) 2012-2016 Dave Collins
-
-Permission to use, copy, modify, and/or distribute this software for any
-purpose with or without fee is hereby granted, provided that the above
-copyright notice and this permission notice appear in all copies.
-
-THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go
deleted file mode 100644
index 79299478..00000000
--- a/vendor/github.com/davecgh/go-spew/spew/bypass.go
+++ /dev/null
@@ -1,145 +0,0 @@
-// Copyright (c) 2015-2016 Dave Collins
-//
-// Permission to use, copy, modify, and distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-// NOTE: Due to the following build constraints, this file will only be compiled
-// when the code is not running on Google App Engine, compiled by GopherJS, and
-// "-tags safe" is not added to the go build command line. The "disableunsafe"
-// tag is deprecated and thus should not be used.
-// Go versions prior to 1.4 are disabled because they use a different layout
-// for interfaces which make the implementation of unsafeReflectValue more complex.
-// +build !js,!appengine,!safe,!disableunsafe,go1.4
-
-package spew
-
-import (
- "reflect"
- "unsafe"
-)
-
-const (
- // UnsafeDisabled is a build-time constant which specifies whether or
- // not access to the unsafe package is available.
- UnsafeDisabled = false
-
- // ptrSize is the size of a pointer on the current arch.
- ptrSize = unsafe.Sizeof((*byte)(nil))
-)
-
-type flag uintptr
-
-var (
- // flagRO indicates whether the value field of a reflect.Value
- // is read-only.
- flagRO flag
-
- // flagAddr indicates whether the address of the reflect.Value's
- // value may be taken.
- flagAddr flag
-)
-
-// flagKindMask holds the bits that make up the kind
-// part of the flags field. In all the supported versions,
-// it is in the lower 5 bits.
-const flagKindMask = flag(0x1f)
-
-// Different versions of Go have used different
-// bit layouts for the flags type. This table
-// records the known combinations.
-var okFlags = []struct {
- ro, addr flag
-}{{
- // From Go 1.4 to 1.5
- ro: 1 << 5,
- addr: 1 << 7,
-}, {
- // Up to Go tip.
- ro: 1<<5 | 1<<6,
- addr: 1 << 8,
-}}
-
-var flagValOffset = func() uintptr {
- field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
- if !ok {
- panic("reflect.Value has no flag field")
- }
- return field.Offset
-}()
-
-// flagField returns a pointer to the flag field of a reflect.Value.
-func flagField(v *reflect.Value) *flag {
- return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset))
-}
-
-// unsafeReflectValue converts the passed reflect.Value into a one that bypasses
-// the typical safety restrictions preventing access to unaddressable and
-// unexported data. It works by digging the raw pointer to the underlying
-// value out of the protected value and generating a new unprotected (unsafe)
-// reflect.Value to it.
-//
-// This allows us to check for implementations of the Stringer and error
-// interfaces to be used for pretty printing ordinarily unaddressable and
-// inaccessible values such as unexported struct fields.
-func unsafeReflectValue(v reflect.Value) reflect.Value {
- if !v.IsValid() || (v.CanInterface() && v.CanAddr()) {
- return v
- }
- flagFieldPtr := flagField(&v)
- *flagFieldPtr &^= flagRO
- *flagFieldPtr |= flagAddr
- return v
-}
-
-// Sanity checks against future reflect package changes
-// to the type or semantics of the Value.flag field.
-func init() {
- field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
- if !ok {
- panic("reflect.Value has no flag field")
- }
- if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() {
- panic("reflect.Value flag field has changed kind")
- }
- type t0 int
- var t struct {
- A t0
- // t0 will have flagEmbedRO set.
- t0
- // a will have flagStickyRO set
- a t0
- }
- vA := reflect.ValueOf(t).FieldByName("A")
- va := reflect.ValueOf(t).FieldByName("a")
- vt0 := reflect.ValueOf(t).FieldByName("t0")
-
- // Infer flagRO from the difference between the flags
- // for the (otherwise identical) fields in t.
- flagPublic := *flagField(&vA)
- flagWithRO := *flagField(&va) | *flagField(&vt0)
- flagRO = flagPublic ^ flagWithRO
-
- // Infer flagAddr from the difference between a value
- // taken from a pointer and not.
- vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A")
- flagNoPtr := *flagField(&vA)
- flagPtr := *flagField(&vPtrA)
- flagAddr = flagNoPtr ^ flagPtr
-
- // Check that the inferred flags tally with one of the known versions.
- for _, f := range okFlags {
- if flagRO == f.ro && flagAddr == f.addr {
- return
- }
- }
- panic("reflect.Value read-only flag has changed semantics")
-}
diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
deleted file mode 100644
index 205c28d6..00000000
--- a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright (c) 2015-2016 Dave Collins
-//
-// Permission to use, copy, modify, and distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-// NOTE: Due to the following build constraints, this file will only be compiled
-// when the code is running on Google App Engine, compiled by GopherJS, or
-// "-tags safe" is added to the go build command line. The "disableunsafe"
-// tag is deprecated and thus should not be used.
-// +build js appengine safe disableunsafe !go1.4
-
-package spew
-
-import "reflect"
-
-const (
- // UnsafeDisabled is a build-time constant which specifies whether or
- // not access to the unsafe package is available.
- UnsafeDisabled = true
-)
-
-// unsafeReflectValue typically converts the passed reflect.Value into a one
-// that bypasses the typical safety restrictions preventing access to
-// unaddressable and unexported data. However, doing this relies on access to
-// the unsafe package. This is a stub version which simply returns the passed
-// reflect.Value when the unsafe package is not available.
-func unsafeReflectValue(v reflect.Value) reflect.Value {
- return v
-}
diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go
deleted file mode 100644
index 1be8ce94..00000000
--- a/vendor/github.com/davecgh/go-spew/spew/common.go
+++ /dev/null
@@ -1,341 +0,0 @@
-/*
- * Copyright (c) 2013-2016 Dave Collins
- *
- * Permission to use, copy, modify, and distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-package spew
-
-import (
- "bytes"
- "fmt"
- "io"
- "reflect"
- "sort"
- "strconv"
-)
-
-// Some constants in the form of bytes to avoid string overhead. This mirrors
-// the technique used in the fmt package.
-var (
- panicBytes = []byte("(PANIC=")
- plusBytes = []byte("+")
- iBytes = []byte("i")
- trueBytes = []byte("true")
- falseBytes = []byte("false")
- interfaceBytes = []byte("(interface {})")
- commaNewlineBytes = []byte(",\n")
- newlineBytes = []byte("\n")
- openBraceBytes = []byte("{")
- openBraceNewlineBytes = []byte("{\n")
- closeBraceBytes = []byte("}")
- asteriskBytes = []byte("*")
- colonBytes = []byte(":")
- colonSpaceBytes = []byte(": ")
- openParenBytes = []byte("(")
- closeParenBytes = []byte(")")
- spaceBytes = []byte(" ")
- pointerChainBytes = []byte("->")
- nilAngleBytes = []byte("")
- maxNewlineBytes = []byte("\n")
- maxShortBytes = []byte("")
- circularBytes = []byte("")
- circularShortBytes = []byte("")
- invalidAngleBytes = []byte("")
- openBracketBytes = []byte("[")
- closeBracketBytes = []byte("]")
- percentBytes = []byte("%")
- precisionBytes = []byte(".")
- openAngleBytes = []byte("<")
- closeAngleBytes = []byte(">")
- openMapBytes = []byte("map[")
- closeMapBytes = []byte("]")
- lenEqualsBytes = []byte("len=")
- capEqualsBytes = []byte("cap=")
-)
-
-// hexDigits is used to map a decimal value to a hex digit.
-var hexDigits = "0123456789abcdef"
-
-// catchPanic handles any panics that might occur during the handleMethods
-// calls.
-func catchPanic(w io.Writer, v reflect.Value) {
- if err := recover(); err != nil {
- w.Write(panicBytes)
- fmt.Fprintf(w, "%v", err)
- w.Write(closeParenBytes)
- }
-}
-
-// handleMethods attempts to call the Error and String methods on the underlying
-// type the passed reflect.Value represents and outputes the result to Writer w.
-//
-// It handles panics in any called methods by catching and displaying the error
-// as the formatted value.
-func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) {
- // We need an interface to check if the type implements the error or
- // Stringer interface. However, the reflect package won't give us an
- // interface on certain things like unexported struct fields in order
- // to enforce visibility rules. We use unsafe, when it's available,
- // to bypass these restrictions since this package does not mutate the
- // values.
- if !v.CanInterface() {
- if UnsafeDisabled {
- return false
- }
-
- v = unsafeReflectValue(v)
- }
-
- // Choose whether or not to do error and Stringer interface lookups against
- // the base type or a pointer to the base type depending on settings.
- // Technically calling one of these methods with a pointer receiver can
- // mutate the value, however, types which choose to satisify an error or
- // Stringer interface with a pointer receiver should not be mutating their
- // state inside these interface methods.
- if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() {
- v = unsafeReflectValue(v)
- }
- if v.CanAddr() {
- v = v.Addr()
- }
-
- // Is it an error or Stringer?
- switch iface := v.Interface().(type) {
- case error:
- defer catchPanic(w, v)
- if cs.ContinueOnMethod {
- w.Write(openParenBytes)
- w.Write([]byte(iface.Error()))
- w.Write(closeParenBytes)
- w.Write(spaceBytes)
- return false
- }
-
- w.Write([]byte(iface.Error()))
- return true
-
- case fmt.Stringer:
- defer catchPanic(w, v)
- if cs.ContinueOnMethod {
- w.Write(openParenBytes)
- w.Write([]byte(iface.String()))
- w.Write(closeParenBytes)
- w.Write(spaceBytes)
- return false
- }
- w.Write([]byte(iface.String()))
- return true
- }
- return false
-}
-
-// printBool outputs a boolean value as true or false to Writer w.
-func printBool(w io.Writer, val bool) {
- if val {
- w.Write(trueBytes)
- } else {
- w.Write(falseBytes)
- }
-}
-
-// printInt outputs a signed integer value to Writer w.
-func printInt(w io.Writer, val int64, base int) {
- w.Write([]byte(strconv.FormatInt(val, base)))
-}
-
-// printUint outputs an unsigned integer value to Writer w.
-func printUint(w io.Writer, val uint64, base int) {
- w.Write([]byte(strconv.FormatUint(val, base)))
-}
-
-// printFloat outputs a floating point value using the specified precision,
-// which is expected to be 32 or 64bit, to Writer w.
-func printFloat(w io.Writer, val float64, precision int) {
- w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision)))
-}
-
-// printComplex outputs a complex value using the specified float precision
-// for the real and imaginary parts to Writer w.
-func printComplex(w io.Writer, c complex128, floatPrecision int) {
- r := real(c)
- w.Write(openParenBytes)
- w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision)))
- i := imag(c)
- if i >= 0 {
- w.Write(plusBytes)
- }
- w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision)))
- w.Write(iBytes)
- w.Write(closeParenBytes)
-}
-
-// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x'
-// prefix to Writer w.
-func printHexPtr(w io.Writer, p uintptr) {
- // Null pointer.
- num := uint64(p)
- if num == 0 {
- w.Write(nilAngleBytes)
- return
- }
-
- // Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix
- buf := make([]byte, 18)
-
- // It's simpler to construct the hex string right to left.
- base := uint64(16)
- i := len(buf) - 1
- for num >= base {
- buf[i] = hexDigits[num%base]
- num /= base
- i--
- }
- buf[i] = hexDigits[num]
-
- // Add '0x' prefix.
- i--
- buf[i] = 'x'
- i--
- buf[i] = '0'
-
- // Strip unused leading bytes.
- buf = buf[i:]
- w.Write(buf)
-}
-
-// valuesSorter implements sort.Interface to allow a slice of reflect.Value
-// elements to be sorted.
-type valuesSorter struct {
- values []reflect.Value
- strings []string // either nil or same len and values
- cs *ConfigState
-}
-
-// newValuesSorter initializes a valuesSorter instance, which holds a set of
-// surrogate keys on which the data should be sorted. It uses flags in
-// ConfigState to decide if and how to populate those surrogate keys.
-func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface {
- vs := &valuesSorter{values: values, cs: cs}
- if canSortSimply(vs.values[0].Kind()) {
- return vs
- }
- if !cs.DisableMethods {
- vs.strings = make([]string, len(values))
- for i := range vs.values {
- b := bytes.Buffer{}
- if !handleMethods(cs, &b, vs.values[i]) {
- vs.strings = nil
- break
- }
- vs.strings[i] = b.String()
- }
- }
- if vs.strings == nil && cs.SpewKeys {
- vs.strings = make([]string, len(values))
- for i := range vs.values {
- vs.strings[i] = Sprintf("%#v", vs.values[i].Interface())
- }
- }
- return vs
-}
-
-// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted
-// directly, or whether it should be considered for sorting by surrogate keys
-// (if the ConfigState allows it).
-func canSortSimply(kind reflect.Kind) bool {
- // This switch parallels valueSortLess, except for the default case.
- switch kind {
- case reflect.Bool:
- return true
- case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
- return true
- case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
- return true
- case reflect.Float32, reflect.Float64:
- return true
- case reflect.String:
- return true
- case reflect.Uintptr:
- return true
- case reflect.Array:
- return true
- }
- return false
-}
-
-// Len returns the number of values in the slice. It is part of the
-// sort.Interface implementation.
-func (s *valuesSorter) Len() int {
- return len(s.values)
-}
-
-// Swap swaps the values at the passed indices. It is part of the
-// sort.Interface implementation.
-func (s *valuesSorter) Swap(i, j int) {
- s.values[i], s.values[j] = s.values[j], s.values[i]
- if s.strings != nil {
- s.strings[i], s.strings[j] = s.strings[j], s.strings[i]
- }
-}
-
-// valueSortLess returns whether the first value should sort before the second
-// value. It is used by valueSorter.Less as part of the sort.Interface
-// implementation.
-func valueSortLess(a, b reflect.Value) bool {
- switch a.Kind() {
- case reflect.Bool:
- return !a.Bool() && b.Bool()
- case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
- return a.Int() < b.Int()
- case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
- return a.Uint() < b.Uint()
- case reflect.Float32, reflect.Float64:
- return a.Float() < b.Float()
- case reflect.String:
- return a.String() < b.String()
- case reflect.Uintptr:
- return a.Uint() < b.Uint()
- case reflect.Array:
- // Compare the contents of both arrays.
- l := a.Len()
- for i := 0; i < l; i++ {
- av := a.Index(i)
- bv := b.Index(i)
- if av.Interface() == bv.Interface() {
- continue
- }
- return valueSortLess(av, bv)
- }
- }
- return a.String() < b.String()
-}
-
-// Less returns whether the value at index i should sort before the
-// value at index j. It is part of the sort.Interface implementation.
-func (s *valuesSorter) Less(i, j int) bool {
- if s.strings == nil {
- return valueSortLess(s.values[i], s.values[j])
- }
- return s.strings[i] < s.strings[j]
-}
-
-// sortValues is a sort function that handles both native types and any type that
-// can be converted to error or Stringer. Other inputs are sorted according to
-// their Value.String() value to ensure display stability.
-func sortValues(values []reflect.Value, cs *ConfigState) {
- if len(values) == 0 {
- return
- }
- sort.Sort(newValuesSorter(values, cs))
-}
diff --git a/vendor/github.com/davecgh/go-spew/spew/config.go b/vendor/github.com/davecgh/go-spew/spew/config.go
deleted file mode 100644
index 2e3d22f3..00000000
--- a/vendor/github.com/davecgh/go-spew/spew/config.go
+++ /dev/null
@@ -1,306 +0,0 @@
-/*
- * Copyright (c) 2013-2016 Dave Collins
- *
- * Permission to use, copy, modify, and distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-package spew
-
-import (
- "bytes"
- "fmt"
- "io"
- "os"
-)
-
-// ConfigState houses the configuration options used by spew to format and
-// display values. There is a global instance, Config, that is used to control
-// all top-level Formatter and Dump functionality. Each ConfigState instance
-// provides methods equivalent to the top-level functions.
-//
-// The zero value for ConfigState provides no indentation. You would typically
-// want to set it to a space or a tab.
-//
-// Alternatively, you can use NewDefaultConfig to get a ConfigState instance
-// with default settings. See the documentation of NewDefaultConfig for default
-// values.
-type ConfigState struct {
- // Indent specifies the string to use for each indentation level. The
- // global config instance that all top-level functions use set this to a
- // single space by default. If you would like more indentation, you might
- // set this to a tab with "\t" or perhaps two spaces with " ".
- Indent string
-
- // MaxDepth controls the maximum number of levels to descend into nested
- // data structures. The default, 0, means there is no limit.
- //
- // NOTE: Circular data structures are properly detected, so it is not
- // necessary to set this value unless you specifically want to limit deeply
- // nested data structures.
- MaxDepth int
-
- // DisableMethods specifies whether or not error and Stringer interfaces are
- // invoked for types that implement them.
- DisableMethods bool
-
- // DisablePointerMethods specifies whether or not to check for and invoke
- // error and Stringer interfaces on types which only accept a pointer
- // receiver when the current type is not a pointer.
- //
- // NOTE: This might be an unsafe action since calling one of these methods
- // with a pointer receiver could technically mutate the value, however,
- // in practice, types which choose to satisify an error or Stringer
- // interface with a pointer receiver should not be mutating their state
- // inside these interface methods. As a result, this option relies on
- // access to the unsafe package, so it will not have any effect when
- // running in environments without access to the unsafe package such as
- // Google App Engine or with the "safe" build tag specified.
- DisablePointerMethods bool
-
- // DisablePointerAddresses specifies whether to disable the printing of
- // pointer addresses. This is useful when diffing data structures in tests.
- DisablePointerAddresses bool
-
- // DisableCapacities specifies whether to disable the printing of capacities
- // for arrays, slices, maps and channels. This is useful when diffing
- // data structures in tests.
- DisableCapacities bool
-
- // ContinueOnMethod specifies whether or not recursion should continue once
- // a custom error or Stringer interface is invoked. The default, false,
- // means it will print the results of invoking the custom error or Stringer
- // interface and return immediately instead of continuing to recurse into
- // the internals of the data type.
- //
- // NOTE: This flag does not have any effect if method invocation is disabled
- // via the DisableMethods or DisablePointerMethods options.
- ContinueOnMethod bool
-
- // SortKeys specifies map keys should be sorted before being printed. Use
- // this to have a more deterministic, diffable output. Note that only
- // native types (bool, int, uint, floats, uintptr and string) and types
- // that support the error or Stringer interfaces (if methods are
- // enabled) are supported, with other types sorted according to the
- // reflect.Value.String() output which guarantees display stability.
- SortKeys bool
-
- // SpewKeys specifies that, as a last resort attempt, map keys should
- // be spewed to strings and sorted by those strings. This is only
- // considered if SortKeys is true.
- SpewKeys bool
-}
-
-// Config is the active configuration of the top-level functions.
-// The configuration can be changed by modifying the contents of spew.Config.
-var Config = ConfigState{Indent: " "}
-
-// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
-// passed with a Formatter interface returned by c.NewFormatter. It returns
-// the formatted string as a value that satisfies error. See NewFormatter
-// for formatting details.
-//
-// This function is shorthand for the following syntax:
-//
-// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b))
-func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) {
- return fmt.Errorf(format, c.convertArgs(a)...)
-}
-
-// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
-// passed with a Formatter interface returned by c.NewFormatter. It returns
-// the number of bytes written and any write error encountered. See
-// NewFormatter for formatting details.
-//
-// This function is shorthand for the following syntax:
-//
-// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b))
-func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) {
- return fmt.Fprint(w, c.convertArgs(a)...)
-}
-
-// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
-// passed with a Formatter interface returned by c.NewFormatter. It returns
-// the number of bytes written and any write error encountered. See
-// NewFormatter for formatting details.
-//
-// This function is shorthand for the following syntax:
-//
-// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b))
-func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
- return fmt.Fprintf(w, format, c.convertArgs(a)...)
-}
-
-// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
-// passed with a Formatter interface returned by c.NewFormatter. See
-// NewFormatter for formatting details.
-//
-// This function is shorthand for the following syntax:
-//
-// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b))
-func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
- return fmt.Fprintln(w, c.convertArgs(a)...)
-}
-
-// Print is a wrapper for fmt.Print that treats each argument as if it were
-// passed with a Formatter interface returned by c.NewFormatter. It returns
-// the number of bytes written and any write error encountered. See
-// NewFormatter for formatting details.
-//
-// This function is shorthand for the following syntax:
-//
-// fmt.Print(c.NewFormatter(a), c.NewFormatter(b))
-func (c *ConfigState) Print(a ...interface{}) (n int, err error) {
- return fmt.Print(c.convertArgs(a)...)
-}
-
-// Printf is a wrapper for fmt.Printf that treats each argument as if it were
-// passed with a Formatter interface returned by c.NewFormatter. It returns
-// the number of bytes written and any write error encountered. See
-// NewFormatter for formatting details.
-//
-// This function is shorthand for the following syntax:
-//
-// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b))
-func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) {
- return fmt.Printf(format, c.convertArgs(a)...)
-}
-
-// Println is a wrapper for fmt.Println that treats each argument as if it were
-// passed with a Formatter interface returned by c.NewFormatter. It returns
-// the number of bytes written and any write error encountered. See
-// NewFormatter for formatting details.
-//
-// This function is shorthand for the following syntax:
-//
-// fmt.Println(c.NewFormatter(a), c.NewFormatter(b))
-func (c *ConfigState) Println(a ...interface{}) (n int, err error) {
- return fmt.Println(c.convertArgs(a)...)
-}
-
-// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
-// passed with a Formatter interface returned by c.NewFormatter. It returns
-// the resulting string. See NewFormatter for formatting details.
-//
-// This function is shorthand for the following syntax:
-//
-// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b))
-func (c *ConfigState) Sprint(a ...interface{}) string {
- return fmt.Sprint(c.convertArgs(a)...)
-}
-
-// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
-// passed with a Formatter interface returned by c.NewFormatter. It returns
-// the resulting string. See NewFormatter for formatting details.
-//
-// This function is shorthand for the following syntax:
-//
-// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b))
-func (c *ConfigState) Sprintf(format string, a ...interface{}) string {
- return fmt.Sprintf(format, c.convertArgs(a)...)
-}
-
-// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
-// were passed with a Formatter interface returned by c.NewFormatter. It
-// returns the resulting string. See NewFormatter for formatting details.
-//
-// This function is shorthand for the following syntax:
-//
-// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b))
-func (c *ConfigState) Sprintln(a ...interface{}) string {
- return fmt.Sprintln(c.convertArgs(a)...)
-}
-
-/*
-NewFormatter returns a custom formatter that satisfies the fmt.Formatter
-interface. As a result, it integrates cleanly with standard fmt package
-printing functions. The formatter is useful for inline printing of smaller data
-types similar to the standard %v format specifier.
-
-The custom formatter only responds to the %v (most compact), %+v (adds pointer
-addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb
-combinations. Any other verbs such as %x and %q will be sent to the the
-standard fmt package for formatting. In addition, the custom formatter ignores
-the width and precision arguments (however they will still work on the format
-specifiers not handled by the custom formatter).
-
-Typically this function shouldn't be called directly. It is much easier to make
-use of the custom formatter by calling one of the convenience functions such as
-c.Printf, c.Println, or c.Printf.
-*/
-func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter {
- return newFormatter(c, v)
-}
-
-// Fdump formats and displays the passed arguments to io.Writer w. It formats
-// exactly the same as Dump.
-func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) {
- fdump(c, w, a...)
-}
-
-/*
-Dump displays the passed parameters to standard out with newlines, customizable
-indentation, and additional debug information such as complete types and all
-pointer addresses used to indirect to the final value. It provides the
-following features over the built-in printing facilities provided by the fmt
-package:
-
- * Pointers are dereferenced and followed
- * Circular data structures are detected and handled properly
- * Custom Stringer/error interfaces are optionally invoked, including
- on unexported types
- * Custom types which only implement the Stringer/error interfaces via
- a pointer receiver are optionally invoked when passing non-pointer
- variables
- * Byte arrays and slices are dumped like the hexdump -C command which
- includes offsets, byte values in hex, and ASCII output
-
-The configuration options are controlled by modifying the public members
-of c. See ConfigState for options documentation.
-
-See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
-get the formatted result as a string.
-*/
-func (c *ConfigState) Dump(a ...interface{}) {
- fdump(c, os.Stdout, a...)
-}
-
-// Sdump returns a string with the passed arguments formatted exactly the same
-// as Dump.
-func (c *ConfigState) Sdump(a ...interface{}) string {
- var buf bytes.Buffer
- fdump(c, &buf, a...)
- return buf.String()
-}
-
-// convertArgs accepts a slice of arguments and returns a slice of the same
-// length with each argument converted to a spew Formatter interface using
-// the ConfigState associated with s.
-func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) {
- formatters = make([]interface{}, len(args))
- for index, arg := range args {
- formatters[index] = newFormatter(c, arg)
- }
- return formatters
-}
-
-// NewDefaultConfig returns a ConfigState with the following default settings.
-//
-// Indent: " "
-// MaxDepth: 0
-// DisableMethods: false
-// DisablePointerMethods: false
-// ContinueOnMethod: false
-// SortKeys: false
-func NewDefaultConfig() *ConfigState {
- return &ConfigState{Indent: " "}
-}
diff --git a/vendor/github.com/davecgh/go-spew/spew/doc.go b/vendor/github.com/davecgh/go-spew/spew/doc.go
deleted file mode 100644
index aacaac6f..00000000
--- a/vendor/github.com/davecgh/go-spew/spew/doc.go
+++ /dev/null
@@ -1,211 +0,0 @@
-/*
- * Copyright (c) 2013-2016 Dave Collins
- *
- * Permission to use, copy, modify, and distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-/*
-Package spew implements a deep pretty printer for Go data structures to aid in
-debugging.
-
-A quick overview of the additional features spew provides over the built-in
-printing facilities for Go data types are as follows:
-
- * Pointers are dereferenced and followed
- * Circular data structures are detected and handled properly
- * Custom Stringer/error interfaces are optionally invoked, including
- on unexported types
- * Custom types which only implement the Stringer/error interfaces via
- a pointer receiver are optionally invoked when passing non-pointer
- variables
- * Byte arrays and slices are dumped like the hexdump -C command which
- includes offsets, byte values in hex, and ASCII output (only when using
- Dump style)
-
-There are two different approaches spew allows for dumping Go data structures:
-
- * Dump style which prints with newlines, customizable indentation,
- and additional debug information such as types and all pointer addresses
- used to indirect to the final value
- * A custom Formatter interface that integrates cleanly with the standard fmt
- package and replaces %v, %+v, %#v, and %#+v to provide inline printing
- similar to the default %v while providing the additional functionality
- outlined above and passing unsupported format verbs such as %x and %q
- along to fmt
-
-Quick Start
-
-This section demonstrates how to quickly get started with spew. See the
-sections below for further details on formatting and configuration options.
-
-To dump a variable with full newlines, indentation, type, and pointer
-information use Dump, Fdump, or Sdump:
- spew.Dump(myVar1, myVar2, ...)
- spew.Fdump(someWriter, myVar1, myVar2, ...)
- str := spew.Sdump(myVar1, myVar2, ...)
-
-Alternatively, if you would prefer to use format strings with a compacted inline
-printing style, use the convenience wrappers Printf, Fprintf, etc with
-%v (most compact), %+v (adds pointer addresses), %#v (adds types), or
-%#+v (adds types and pointer addresses):
- spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
- spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
- spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
- spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
-
-Configuration Options
-
-Configuration of spew is handled by fields in the ConfigState type. For
-convenience, all of the top-level functions use a global state available
-via the spew.Config global.
-
-It is also possible to create a ConfigState instance that provides methods
-equivalent to the top-level functions. This allows concurrent configuration
-options. See the ConfigState documentation for more details.
-
-The following configuration options are available:
- * Indent
- String to use for each indentation level for Dump functions.
- It is a single space by default. A popular alternative is "\t".
-
- * MaxDepth
- Maximum number of levels to descend into nested data structures.
- There is no limit by default.
-
- * DisableMethods
- Disables invocation of error and Stringer interface methods.
- Method invocation is enabled by default.
-
- * DisablePointerMethods
- Disables invocation of error and Stringer interface methods on types
- which only accept pointer receivers from non-pointer variables.
- Pointer method invocation is enabled by default.
-
- * DisablePointerAddresses
- DisablePointerAddresses specifies whether to disable the printing of
- pointer addresses. This is useful when diffing data structures in tests.
-
- * DisableCapacities
- DisableCapacities specifies whether to disable the printing of
- capacities for arrays, slices, maps and channels. This is useful when
- diffing data structures in tests.
-
- * ContinueOnMethod
- Enables recursion into types after invoking error and Stringer interface
- methods. Recursion after method invocation is disabled by default.
-
- * SortKeys
- Specifies map keys should be sorted before being printed. Use
- this to have a more deterministic, diffable output. Note that
- only native types (bool, int, uint, floats, uintptr and string)
- and types which implement error or Stringer interfaces are
- supported with other types sorted according to the
- reflect.Value.String() output which guarantees display
- stability. Natural map order is used by default.
-
- * SpewKeys
- Specifies that, as a last resort attempt, map keys should be
- spewed to strings and sorted by those strings. This is only
- considered if SortKeys is true.
-
-Dump Usage
-
-Simply call spew.Dump with a list of variables you want to dump:
-
- spew.Dump(myVar1, myVar2, ...)
-
-You may also call spew.Fdump if you would prefer to output to an arbitrary
-io.Writer. For example, to dump to standard error:
-
- spew.Fdump(os.Stderr, myVar1, myVar2, ...)
-
-A third option is to call spew.Sdump to get the formatted output as a string:
-
- str := spew.Sdump(myVar1, myVar2, ...)
-
-Sample Dump Output
-
-See the Dump example for details on the setup of the types and variables being
-shown here.
-
- (main.Foo) {
- unexportedField: (*main.Bar)(0xf84002e210)({
- flag: (main.Flag) flagTwo,
- data: (uintptr)
- }),
- ExportedField: (map[interface {}]interface {}) (len=1) {
- (string) (len=3) "one": (bool) true
- }
- }
-
-Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C
-command as shown.
- ([]uint8) (len=32 cap=32) {
- 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... |
- 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0|
- 00000020 31 32 |12|
- }
-
-Custom Formatter
-
-Spew provides a custom formatter that implements the fmt.Formatter interface
-so that it integrates cleanly with standard fmt package printing functions. The
-formatter is useful for inline printing of smaller data types similar to the
-standard %v format specifier.
-
-The custom formatter only responds to the %v (most compact), %+v (adds pointer
-addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
-combinations. Any other verbs such as %x and %q will be sent to the the
-standard fmt package for formatting. In addition, the custom formatter ignores
-the width and precision arguments (however they will still work on the format
-specifiers not handled by the custom formatter).
-
-Custom Formatter Usage
-
-The simplest way to make use of the spew custom formatter is to call one of the
-convenience functions such as spew.Printf, spew.Println, or spew.Printf. The
-functions have syntax you are most likely already familiar with:
-
- spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
- spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
- spew.Println(myVar, myVar2)
- spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
- spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
-
-See the Index for the full list convenience functions.
-
-Sample Formatter Output
-
-Double pointer to a uint8:
- %v: <**>5
- %+v: <**>(0xf8400420d0->0xf8400420c8)5
- %#v: (**uint8)5
- %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
-
-Pointer to circular struct with a uint8 field and a pointer to itself:
- %v: <*>{1 <*>}
- %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)}
- %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)}
- %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)}
-
-See the Printf example for details on the setup of variables being shown
-here.
-
-Errors
-
-Since it is possible for custom Stringer/error interfaces to panic, spew
-detects them and handles them internally by printing the panic information
-inline with the output. Since spew is intended to provide deep pretty printing
-capabilities on structures, it intentionally does not return any errors.
-*/
-package spew
diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go
deleted file mode 100644
index f78d89fc..00000000
--- a/vendor/github.com/davecgh/go-spew/spew/dump.go
+++ /dev/null
@@ -1,509 +0,0 @@
-/*
- * Copyright (c) 2013-2016 Dave Collins
- *
- * Permission to use, copy, modify, and distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-package spew
-
-import (
- "bytes"
- "encoding/hex"
- "fmt"
- "io"
- "os"
- "reflect"
- "regexp"
- "strconv"
- "strings"
-)
-
-var (
- // uint8Type is a reflect.Type representing a uint8. It is used to
- // convert cgo types to uint8 slices for hexdumping.
- uint8Type = reflect.TypeOf(uint8(0))
-
- // cCharRE is a regular expression that matches a cgo char.
- // It is used to detect character arrays to hexdump them.
- cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`)
-
- // cUnsignedCharRE is a regular expression that matches a cgo unsigned
- // char. It is used to detect unsigned character arrays to hexdump
- // them.
- cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`)
-
- // cUint8tCharRE is a regular expression that matches a cgo uint8_t.
- // It is used to detect uint8_t arrays to hexdump them.
- cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`)
-)
-
-// dumpState contains information about the state of a dump operation.
-type dumpState struct {
- w io.Writer
- depth int
- pointers map[uintptr]int
- ignoreNextType bool
- ignoreNextIndent bool
- cs *ConfigState
-}
-
-// indent performs indentation according to the depth level and cs.Indent
-// option.
-func (d *dumpState) indent() {
- if d.ignoreNextIndent {
- d.ignoreNextIndent = false
- return
- }
- d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth))
-}
-
-// unpackValue returns values inside of non-nil interfaces when possible.
-// This is useful for data types like structs, arrays, slices, and maps which
-// can contain varying types packed inside an interface.
-func (d *dumpState) unpackValue(v reflect.Value) reflect.Value {
- if v.Kind() == reflect.Interface && !v.IsNil() {
- v = v.Elem()
- }
- return v
-}
-
-// dumpPtr handles formatting of pointers by indirecting them as necessary.
-func (d *dumpState) dumpPtr(v reflect.Value) {
- // Remove pointers at or below the current depth from map used to detect
- // circular refs.
- for k, depth := range d.pointers {
- if depth >= d.depth {
- delete(d.pointers, k)
- }
- }
-
- // Keep list of all dereferenced pointers to show later.
- pointerChain := make([]uintptr, 0)
-
- // Figure out how many levels of indirection there are by dereferencing
- // pointers and unpacking interfaces down the chain while detecting circular
- // references.
- nilFound := false
- cycleFound := false
- indirects := 0
- ve := v
- for ve.Kind() == reflect.Ptr {
- if ve.IsNil() {
- nilFound = true
- break
- }
- indirects++
- addr := ve.Pointer()
- pointerChain = append(pointerChain, addr)
- if pd, ok := d.pointers[addr]; ok && pd < d.depth {
- cycleFound = true
- indirects--
- break
- }
- d.pointers[addr] = d.depth
-
- ve = ve.Elem()
- if ve.Kind() == reflect.Interface {
- if ve.IsNil() {
- nilFound = true
- break
- }
- ve = ve.Elem()
- }
- }
-
- // Display type information.
- d.w.Write(openParenBytes)
- d.w.Write(bytes.Repeat(asteriskBytes, indirects))
- d.w.Write([]byte(ve.Type().String()))
- d.w.Write(closeParenBytes)
-
- // Display pointer information.
- if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 {
- d.w.Write(openParenBytes)
- for i, addr := range pointerChain {
- if i > 0 {
- d.w.Write(pointerChainBytes)
- }
- printHexPtr(d.w, addr)
- }
- d.w.Write(closeParenBytes)
- }
-
- // Display dereferenced value.
- d.w.Write(openParenBytes)
- switch {
- case nilFound:
- d.w.Write(nilAngleBytes)
-
- case cycleFound:
- d.w.Write(circularBytes)
-
- default:
- d.ignoreNextType = true
- d.dump(ve)
- }
- d.w.Write(closeParenBytes)
-}
-
-// dumpSlice handles formatting of arrays and slices. Byte (uint8 under
-// reflection) arrays and slices are dumped in hexdump -C fashion.
-func (d *dumpState) dumpSlice(v reflect.Value) {
- // Determine whether this type should be hex dumped or not. Also,
- // for types which should be hexdumped, try to use the underlying data
- // first, then fall back to trying to convert them to a uint8 slice.
- var buf []uint8
- doConvert := false
- doHexDump := false
- numEntries := v.Len()
- if numEntries > 0 {
- vt := v.Index(0).Type()
- vts := vt.String()
- switch {
- // C types that need to be converted.
- case cCharRE.MatchString(vts):
- fallthrough
- case cUnsignedCharRE.MatchString(vts):
- fallthrough
- case cUint8tCharRE.MatchString(vts):
- doConvert = true
-
- // Try to use existing uint8 slices and fall back to converting
- // and copying if that fails.
- case vt.Kind() == reflect.Uint8:
- // We need an addressable interface to convert the type
- // to a byte slice. However, the reflect package won't
- // give us an interface on certain things like
- // unexported struct fields in order to enforce
- // visibility rules. We use unsafe, when available, to
- // bypass these restrictions since this package does not
- // mutate the values.
- vs := v
- if !vs.CanInterface() || !vs.CanAddr() {
- vs = unsafeReflectValue(vs)
- }
- if !UnsafeDisabled {
- vs = vs.Slice(0, numEntries)
-
- // Use the existing uint8 slice if it can be
- // type asserted.
- iface := vs.Interface()
- if slice, ok := iface.([]uint8); ok {
- buf = slice
- doHexDump = true
- break
- }
- }
-
- // The underlying data needs to be converted if it can't
- // be type asserted to a uint8 slice.
- doConvert = true
- }
-
- // Copy and convert the underlying type if needed.
- if doConvert && vt.ConvertibleTo(uint8Type) {
- // Convert and copy each element into a uint8 byte
- // slice.
- buf = make([]uint8, numEntries)
- for i := 0; i < numEntries; i++ {
- vv := v.Index(i)
- buf[i] = uint8(vv.Convert(uint8Type).Uint())
- }
- doHexDump = true
- }
- }
-
- // Hexdump the entire slice as needed.
- if doHexDump {
- indent := strings.Repeat(d.cs.Indent, d.depth)
- str := indent + hex.Dump(buf)
- str = strings.Replace(str, "\n", "\n"+indent, -1)
- str = strings.TrimRight(str, d.cs.Indent)
- d.w.Write([]byte(str))
- return
- }
-
- // Recursively call dump for each item.
- for i := 0; i < numEntries; i++ {
- d.dump(d.unpackValue(v.Index(i)))
- if i < (numEntries - 1) {
- d.w.Write(commaNewlineBytes)
- } else {
- d.w.Write(newlineBytes)
- }
- }
-}
-
-// dump is the main workhorse for dumping a value. It uses the passed reflect
-// value to figure out what kind of object we are dealing with and formats it
-// appropriately. It is a recursive function, however circular data structures
-// are detected and handled properly.
-func (d *dumpState) dump(v reflect.Value) {
- // Handle invalid reflect values immediately.
- kind := v.Kind()
- if kind == reflect.Invalid {
- d.w.Write(invalidAngleBytes)
- return
- }
-
- // Handle pointers specially.
- if kind == reflect.Ptr {
- d.indent()
- d.dumpPtr(v)
- return
- }
-
- // Print type information unless already handled elsewhere.
- if !d.ignoreNextType {
- d.indent()
- d.w.Write(openParenBytes)
- d.w.Write([]byte(v.Type().String()))
- d.w.Write(closeParenBytes)
- d.w.Write(spaceBytes)
- }
- d.ignoreNextType = false
-
- // Display length and capacity if the built-in len and cap functions
- // work with the value's kind and the len/cap itself is non-zero.
- valueLen, valueCap := 0, 0
- switch v.Kind() {
- case reflect.Array, reflect.Slice, reflect.Chan:
- valueLen, valueCap = v.Len(), v.Cap()
- case reflect.Map, reflect.String:
- valueLen = v.Len()
- }
- if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 {
- d.w.Write(openParenBytes)
- if valueLen != 0 {
- d.w.Write(lenEqualsBytes)
- printInt(d.w, int64(valueLen), 10)
- }
- if !d.cs.DisableCapacities && valueCap != 0 {
- if valueLen != 0 {
- d.w.Write(spaceBytes)
- }
- d.w.Write(capEqualsBytes)
- printInt(d.w, int64(valueCap), 10)
- }
- d.w.Write(closeParenBytes)
- d.w.Write(spaceBytes)
- }
-
- // Call Stringer/error interfaces if they exist and the handle methods flag
- // is enabled
- if !d.cs.DisableMethods {
- if (kind != reflect.Invalid) && (kind != reflect.Interface) {
- if handled := handleMethods(d.cs, d.w, v); handled {
- return
- }
- }
- }
-
- switch kind {
- case reflect.Invalid:
- // Do nothing. We should never get here since invalid has already
- // been handled above.
-
- case reflect.Bool:
- printBool(d.w, v.Bool())
-
- case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
- printInt(d.w, v.Int(), 10)
-
- case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
- printUint(d.w, v.Uint(), 10)
-
- case reflect.Float32:
- printFloat(d.w, v.Float(), 32)
-
- case reflect.Float64:
- printFloat(d.w, v.Float(), 64)
-
- case reflect.Complex64:
- printComplex(d.w, v.Complex(), 32)
-
- case reflect.Complex128:
- printComplex(d.w, v.Complex(), 64)
-
- case reflect.Slice:
- if v.IsNil() {
- d.w.Write(nilAngleBytes)
- break
- }
- fallthrough
-
- case reflect.Array:
- d.w.Write(openBraceNewlineBytes)
- d.depth++
- if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
- d.indent()
- d.w.Write(maxNewlineBytes)
- } else {
- d.dumpSlice(v)
- }
- d.depth--
- d.indent()
- d.w.Write(closeBraceBytes)
-
- case reflect.String:
- d.w.Write([]byte(strconv.Quote(v.String())))
-
- case reflect.Interface:
- // The only time we should get here is for nil interfaces due to
- // unpackValue calls.
- if v.IsNil() {
- d.w.Write(nilAngleBytes)
- }
-
- case reflect.Ptr:
- // Do nothing. We should never get here since pointers have already
- // been handled above.
-
- case reflect.Map:
- // nil maps should be indicated as different than empty maps
- if v.IsNil() {
- d.w.Write(nilAngleBytes)
- break
- }
-
- d.w.Write(openBraceNewlineBytes)
- d.depth++
- if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
- d.indent()
- d.w.Write(maxNewlineBytes)
- } else {
- numEntries := v.Len()
- keys := v.MapKeys()
- if d.cs.SortKeys {
- sortValues(keys, d.cs)
- }
- for i, key := range keys {
- d.dump(d.unpackValue(key))
- d.w.Write(colonSpaceBytes)
- d.ignoreNextIndent = true
- d.dump(d.unpackValue(v.MapIndex(key)))
- if i < (numEntries - 1) {
- d.w.Write(commaNewlineBytes)
- } else {
- d.w.Write(newlineBytes)
- }
- }
- }
- d.depth--
- d.indent()
- d.w.Write(closeBraceBytes)
-
- case reflect.Struct:
- d.w.Write(openBraceNewlineBytes)
- d.depth++
- if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
- d.indent()
- d.w.Write(maxNewlineBytes)
- } else {
- vt := v.Type()
- numFields := v.NumField()
- for i := 0; i < numFields; i++ {
- d.indent()
- vtf := vt.Field(i)
- d.w.Write([]byte(vtf.Name))
- d.w.Write(colonSpaceBytes)
- d.ignoreNextIndent = true
- d.dump(d.unpackValue(v.Field(i)))
- if i < (numFields - 1) {
- d.w.Write(commaNewlineBytes)
- } else {
- d.w.Write(newlineBytes)
- }
- }
- }
- d.depth--
- d.indent()
- d.w.Write(closeBraceBytes)
-
- case reflect.Uintptr:
- printHexPtr(d.w, uintptr(v.Uint()))
-
- case reflect.UnsafePointer, reflect.Chan, reflect.Func:
- printHexPtr(d.w, v.Pointer())
-
- // There were not any other types at the time this code was written, but
- // fall back to letting the default fmt package handle it in case any new
- // types are added.
- default:
- if v.CanInterface() {
- fmt.Fprintf(d.w, "%v", v.Interface())
- } else {
- fmt.Fprintf(d.w, "%v", v.String())
- }
- }
-}
-
-// fdump is a helper function to consolidate the logic from the various public
-// methods which take varying writers and config states.
-func fdump(cs *ConfigState, w io.Writer, a ...interface{}) {
- for _, arg := range a {
- if arg == nil {
- w.Write(interfaceBytes)
- w.Write(spaceBytes)
- w.Write(nilAngleBytes)
- w.Write(newlineBytes)
- continue
- }
-
- d := dumpState{w: w, cs: cs}
- d.pointers = make(map[uintptr]int)
- d.dump(reflect.ValueOf(arg))
- d.w.Write(newlineBytes)
- }
-}
-
-// Fdump formats and displays the passed arguments to io.Writer w. It formats
-// exactly the same as Dump.
-func Fdump(w io.Writer, a ...interface{}) {
- fdump(&Config, w, a...)
-}
-
-// Sdump returns a string with the passed arguments formatted exactly the same
-// as Dump.
-func Sdump(a ...interface{}) string {
- var buf bytes.Buffer
- fdump(&Config, &buf, a...)
- return buf.String()
-}
-
-/*
-Dump displays the passed parameters to standard out with newlines, customizable
-indentation, and additional debug information such as complete types and all
-pointer addresses used to indirect to the final value. It provides the
-following features over the built-in printing facilities provided by the fmt
-package:
-
- * Pointers are dereferenced and followed
- * Circular data structures are detected and handled properly
- * Custom Stringer/error interfaces are optionally invoked, including
- on unexported types
- * Custom types which only implement the Stringer/error interfaces via
- a pointer receiver are optionally invoked when passing non-pointer
- variables
- * Byte arrays and slices are dumped like the hexdump -C command which
- includes offsets, byte values in hex, and ASCII output
-
-The configuration options are controlled by an exported package global,
-spew.Config. See ConfigState for options documentation.
-
-See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
-get the formatted result as a string.
-*/
-func Dump(a ...interface{}) {
- fdump(&Config, os.Stdout, a...)
-}
diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go
deleted file mode 100644
index b04edb7d..00000000
--- a/vendor/github.com/davecgh/go-spew/spew/format.go
+++ /dev/null
@@ -1,419 +0,0 @@
-/*
- * Copyright (c) 2013-2016 Dave Collins
- *
- * Permission to use, copy, modify, and distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-package spew
-
-import (
- "bytes"
- "fmt"
- "reflect"
- "strconv"
- "strings"
-)
-
-// supportedFlags is a list of all the character flags supported by fmt package.
-const supportedFlags = "0-+# "
-
-// formatState implements the fmt.Formatter interface and contains information
-// about the state of a formatting operation. The NewFormatter function can
-// be used to get a new Formatter which can be used directly as arguments
-// in standard fmt package printing calls.
-type formatState struct {
- value interface{}
- fs fmt.State
- depth int
- pointers map[uintptr]int
- ignoreNextType bool
- cs *ConfigState
-}
-
-// buildDefaultFormat recreates the original format string without precision
-// and width information to pass in to fmt.Sprintf in the case of an
-// unrecognized type. Unless new types are added to the language, this
-// function won't ever be called.
-func (f *formatState) buildDefaultFormat() (format string) {
- buf := bytes.NewBuffer(percentBytes)
-
- for _, flag := range supportedFlags {
- if f.fs.Flag(int(flag)) {
- buf.WriteRune(flag)
- }
- }
-
- buf.WriteRune('v')
-
- format = buf.String()
- return format
-}
-
-// constructOrigFormat recreates the original format string including precision
-// and width information to pass along to the standard fmt package. This allows
-// automatic deferral of all format strings this package doesn't support.
-func (f *formatState) constructOrigFormat(verb rune) (format string) {
- buf := bytes.NewBuffer(percentBytes)
-
- for _, flag := range supportedFlags {
- if f.fs.Flag(int(flag)) {
- buf.WriteRune(flag)
- }
- }
-
- if width, ok := f.fs.Width(); ok {
- buf.WriteString(strconv.Itoa(width))
- }
-
- if precision, ok := f.fs.Precision(); ok {
- buf.Write(precisionBytes)
- buf.WriteString(strconv.Itoa(precision))
- }
-
- buf.WriteRune(verb)
-
- format = buf.String()
- return format
-}
-
-// unpackValue returns values inside of non-nil interfaces when possible and
-// ensures that types for values which have been unpacked from an interface
-// are displayed when the show types flag is also set.
-// This is useful for data types like structs, arrays, slices, and maps which
-// can contain varying types packed inside an interface.
-func (f *formatState) unpackValue(v reflect.Value) reflect.Value {
- if v.Kind() == reflect.Interface {
- f.ignoreNextType = false
- if !v.IsNil() {
- v = v.Elem()
- }
- }
- return v
-}
-
-// formatPtr handles formatting of pointers by indirecting them as necessary.
-func (f *formatState) formatPtr(v reflect.Value) {
- // Display nil if top level pointer is nil.
- showTypes := f.fs.Flag('#')
- if v.IsNil() && (!showTypes || f.ignoreNextType) {
- f.fs.Write(nilAngleBytes)
- return
- }
-
- // Remove pointers at or below the current depth from map used to detect
- // circular refs.
- for k, depth := range f.pointers {
- if depth >= f.depth {
- delete(f.pointers, k)
- }
- }
-
- // Keep list of all dereferenced pointers to possibly show later.
- pointerChain := make([]uintptr, 0)
-
- // Figure out how many levels of indirection there are by derferencing
- // pointers and unpacking interfaces down the chain while detecting circular
- // references.
- nilFound := false
- cycleFound := false
- indirects := 0
- ve := v
- for ve.Kind() == reflect.Ptr {
- if ve.IsNil() {
- nilFound = true
- break
- }
- indirects++
- addr := ve.Pointer()
- pointerChain = append(pointerChain, addr)
- if pd, ok := f.pointers[addr]; ok && pd < f.depth {
- cycleFound = true
- indirects--
- break
- }
- f.pointers[addr] = f.depth
-
- ve = ve.Elem()
- if ve.Kind() == reflect.Interface {
- if ve.IsNil() {
- nilFound = true
- break
- }
- ve = ve.Elem()
- }
- }
-
- // Display type or indirection level depending on flags.
- if showTypes && !f.ignoreNextType {
- f.fs.Write(openParenBytes)
- f.fs.Write(bytes.Repeat(asteriskBytes, indirects))
- f.fs.Write([]byte(ve.Type().String()))
- f.fs.Write(closeParenBytes)
- } else {
- if nilFound || cycleFound {
- indirects += strings.Count(ve.Type().String(), "*")
- }
- f.fs.Write(openAngleBytes)
- f.fs.Write([]byte(strings.Repeat("*", indirects)))
- f.fs.Write(closeAngleBytes)
- }
-
- // Display pointer information depending on flags.
- if f.fs.Flag('+') && (len(pointerChain) > 0) {
- f.fs.Write(openParenBytes)
- for i, addr := range pointerChain {
- if i > 0 {
- f.fs.Write(pointerChainBytes)
- }
- printHexPtr(f.fs, addr)
- }
- f.fs.Write(closeParenBytes)
- }
-
- // Display dereferenced value.
- switch {
- case nilFound:
- f.fs.Write(nilAngleBytes)
-
- case cycleFound:
- f.fs.Write(circularShortBytes)
-
- default:
- f.ignoreNextType = true
- f.format(ve)
- }
-}
-
-// format is the main workhorse for providing the Formatter interface. It
-// uses the passed reflect value to figure out what kind of object we are
-// dealing with and formats it appropriately. It is a recursive function,
-// however circular data structures are detected and handled properly.
-func (f *formatState) format(v reflect.Value) {
- // Handle invalid reflect values immediately.
- kind := v.Kind()
- if kind == reflect.Invalid {
- f.fs.Write(invalidAngleBytes)
- return
- }
-
- // Handle pointers specially.
- if kind == reflect.Ptr {
- f.formatPtr(v)
- return
- }
-
- // Print type information unless already handled elsewhere.
- if !f.ignoreNextType && f.fs.Flag('#') {
- f.fs.Write(openParenBytes)
- f.fs.Write([]byte(v.Type().String()))
- f.fs.Write(closeParenBytes)
- }
- f.ignoreNextType = false
-
- // Call Stringer/error interfaces if they exist and the handle methods
- // flag is enabled.
- if !f.cs.DisableMethods {
- if (kind != reflect.Invalid) && (kind != reflect.Interface) {
- if handled := handleMethods(f.cs, f.fs, v); handled {
- return
- }
- }
- }
-
- switch kind {
- case reflect.Invalid:
- // Do nothing. We should never get here since invalid has already
- // been handled above.
-
- case reflect.Bool:
- printBool(f.fs, v.Bool())
-
- case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
- printInt(f.fs, v.Int(), 10)
-
- case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
- printUint(f.fs, v.Uint(), 10)
-
- case reflect.Float32:
- printFloat(f.fs, v.Float(), 32)
-
- case reflect.Float64:
- printFloat(f.fs, v.Float(), 64)
-
- case reflect.Complex64:
- printComplex(f.fs, v.Complex(), 32)
-
- case reflect.Complex128:
- printComplex(f.fs, v.Complex(), 64)
-
- case reflect.Slice:
- if v.IsNil() {
- f.fs.Write(nilAngleBytes)
- break
- }
- fallthrough
-
- case reflect.Array:
- f.fs.Write(openBracketBytes)
- f.depth++
- if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
- f.fs.Write(maxShortBytes)
- } else {
- numEntries := v.Len()
- for i := 0; i < numEntries; i++ {
- if i > 0 {
- f.fs.Write(spaceBytes)
- }
- f.ignoreNextType = true
- f.format(f.unpackValue(v.Index(i)))
- }
- }
- f.depth--
- f.fs.Write(closeBracketBytes)
-
- case reflect.String:
- f.fs.Write([]byte(v.String()))
-
- case reflect.Interface:
- // The only time we should get here is for nil interfaces due to
- // unpackValue calls.
- if v.IsNil() {
- f.fs.Write(nilAngleBytes)
- }
-
- case reflect.Ptr:
- // Do nothing. We should never get here since pointers have already
- // been handled above.
-
- case reflect.Map:
- // nil maps should be indicated as different than empty maps
- if v.IsNil() {
- f.fs.Write(nilAngleBytes)
- break
- }
-
- f.fs.Write(openMapBytes)
- f.depth++
- if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
- f.fs.Write(maxShortBytes)
- } else {
- keys := v.MapKeys()
- if f.cs.SortKeys {
- sortValues(keys, f.cs)
- }
- for i, key := range keys {
- if i > 0 {
- f.fs.Write(spaceBytes)
- }
- f.ignoreNextType = true
- f.format(f.unpackValue(key))
- f.fs.Write(colonBytes)
- f.ignoreNextType = true
- f.format(f.unpackValue(v.MapIndex(key)))
- }
- }
- f.depth--
- f.fs.Write(closeMapBytes)
-
- case reflect.Struct:
- numFields := v.NumField()
- f.fs.Write(openBraceBytes)
- f.depth++
- if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
- f.fs.Write(maxShortBytes)
- } else {
- vt := v.Type()
- for i := 0; i < numFields; i++ {
- if i > 0 {
- f.fs.Write(spaceBytes)
- }
- vtf := vt.Field(i)
- if f.fs.Flag('+') || f.fs.Flag('#') {
- f.fs.Write([]byte(vtf.Name))
- f.fs.Write(colonBytes)
- }
- f.format(f.unpackValue(v.Field(i)))
- }
- }
- f.depth--
- f.fs.Write(closeBraceBytes)
-
- case reflect.Uintptr:
- printHexPtr(f.fs, uintptr(v.Uint()))
-
- case reflect.UnsafePointer, reflect.Chan, reflect.Func:
- printHexPtr(f.fs, v.Pointer())
-
- // There were not any other types at the time this code was written, but
- // fall back to letting the default fmt package handle it if any get added.
- default:
- format := f.buildDefaultFormat()
- if v.CanInterface() {
- fmt.Fprintf(f.fs, format, v.Interface())
- } else {
- fmt.Fprintf(f.fs, format, v.String())
- }
- }
-}
-
-// Format satisfies the fmt.Formatter interface. See NewFormatter for usage
-// details.
-func (f *formatState) Format(fs fmt.State, verb rune) {
- f.fs = fs
-
- // Use standard formatting for verbs that are not v.
- if verb != 'v' {
- format := f.constructOrigFormat(verb)
- fmt.Fprintf(fs, format, f.value)
- return
- }
-
- if f.value == nil {
- if fs.Flag('#') {
- fs.Write(interfaceBytes)
- }
- fs.Write(nilAngleBytes)
- return
- }
-
- f.format(reflect.ValueOf(f.value))
-}
-
-// newFormatter is a helper function to consolidate the logic from the various
-// public methods which take varying config states.
-func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter {
- fs := &formatState{value: v, cs: cs}
- fs.pointers = make(map[uintptr]int)
- return fs
-}
-
-/*
-NewFormatter returns a custom formatter that satisfies the fmt.Formatter
-interface. As a result, it integrates cleanly with standard fmt package
-printing functions. The formatter is useful for inline printing of smaller data
-types similar to the standard %v format specifier.
-
-The custom formatter only responds to the %v (most compact), %+v (adds pointer
-addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
-combinations. Any other verbs such as %x and %q will be sent to the the
-standard fmt package for formatting. In addition, the custom formatter ignores
-the width and precision arguments (however they will still work on the format
-specifiers not handled by the custom formatter).
-
-Typically this function shouldn't be called directly. It is much easier to make
-use of the custom formatter by calling one of the convenience functions such as
-Printf, Println, or Fprintf.
-*/
-func NewFormatter(v interface{}) fmt.Formatter {
- return newFormatter(&Config, v)
-}
diff --git a/vendor/github.com/davecgh/go-spew/spew/spew.go b/vendor/github.com/davecgh/go-spew/spew/spew.go
deleted file mode 100644
index 32c0e338..00000000
--- a/vendor/github.com/davecgh/go-spew/spew/spew.go
+++ /dev/null
@@ -1,148 +0,0 @@
-/*
- * Copyright (c) 2013-2016 Dave Collins
- *
- * Permission to use, copy, modify, and distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-package spew
-
-import (
- "fmt"
- "io"
-)
-
-// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
-// passed with a default Formatter interface returned by NewFormatter. It
-// returns the formatted string as a value that satisfies error. See
-// NewFormatter for formatting details.
-//
-// This function is shorthand for the following syntax:
-//
-// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b))
-func Errorf(format string, a ...interface{}) (err error) {
- return fmt.Errorf(format, convertArgs(a)...)
-}
-
-// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
-// passed with a default Formatter interface returned by NewFormatter. It
-// returns the number of bytes written and any write error encountered. See
-// NewFormatter for formatting details.
-//
-// This function is shorthand for the following syntax:
-//
-// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b))
-func Fprint(w io.Writer, a ...interface{}) (n int, err error) {
- return fmt.Fprint(w, convertArgs(a)...)
-}
-
-// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
-// passed with a default Formatter interface returned by NewFormatter. It
-// returns the number of bytes written and any write error encountered. See
-// NewFormatter for formatting details.
-//
-// This function is shorthand for the following syntax:
-//
-// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b))
-func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
- return fmt.Fprintf(w, format, convertArgs(a)...)
-}
-
-// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
-// passed with a default Formatter interface returned by NewFormatter. See
-// NewFormatter for formatting details.
-//
-// This function is shorthand for the following syntax:
-//
-// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b))
-func Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
- return fmt.Fprintln(w, convertArgs(a)...)
-}
-
-// Print is a wrapper for fmt.Print that treats each argument as if it were
-// passed with a default Formatter interface returned by NewFormatter. It
-// returns the number of bytes written and any write error encountered. See
-// NewFormatter for formatting details.
-//
-// This function is shorthand for the following syntax:
-//
-// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b))
-func Print(a ...interface{}) (n int, err error) {
- return fmt.Print(convertArgs(a)...)
-}
-
-// Printf is a wrapper for fmt.Printf that treats each argument as if it were
-// passed with a default Formatter interface returned by NewFormatter. It
-// returns the number of bytes written and any write error encountered. See
-// NewFormatter for formatting details.
-//
-// This function is shorthand for the following syntax:
-//
-// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b))
-func Printf(format string, a ...interface{}) (n int, err error) {
- return fmt.Printf(format, convertArgs(a)...)
-}
-
-// Println is a wrapper for fmt.Println that treats each argument as if it were
-// passed with a default Formatter interface returned by NewFormatter. It
-// returns the number of bytes written and any write error encountered. See
-// NewFormatter for formatting details.
-//
-// This function is shorthand for the following syntax:
-//
-// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b))
-func Println(a ...interface{}) (n int, err error) {
- return fmt.Println(convertArgs(a)...)
-}
-
-// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
-// passed with a default Formatter interface returned by NewFormatter. It
-// returns the resulting string. See NewFormatter for formatting details.
-//
-// This function is shorthand for the following syntax:
-//
-// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b))
-func Sprint(a ...interface{}) string {
- return fmt.Sprint(convertArgs(a)...)
-}
-
-// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
-// passed with a default Formatter interface returned by NewFormatter. It
-// returns the resulting string. See NewFormatter for formatting details.
-//
-// This function is shorthand for the following syntax:
-//
-// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b))
-func Sprintf(format string, a ...interface{}) string {
- return fmt.Sprintf(format, convertArgs(a)...)
-}
-
-// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
-// were passed with a default Formatter interface returned by NewFormatter. It
-// returns the resulting string. See NewFormatter for formatting details.
-//
-// This function is shorthand for the following syntax:
-//
-// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b))
-func Sprintln(a ...interface{}) string {
- return fmt.Sprintln(convertArgs(a)...)
-}
-
-// convertArgs accepts a slice of arguments and returns a slice of the same
-// length with each argument converted to a default spew Formatter interface.
-func convertArgs(args []interface{}) (formatters []interface{}) {
- formatters = make([]interface{}, len(args))
- for index, arg := range args {
- formatters[index] = NewFormatter(arg)
- }
- return formatters
-}
diff --git a/vendor/github.com/dgraph-io/badger/.gitignore b/vendor/github.com/dgraph-io/badger/.gitignore
deleted file mode 100644
index e3efdf58..00000000
--- a/vendor/github.com/dgraph-io/badger/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-p/
-badger-test*/
diff --git a/vendor/github.com/dgraph-io/badger/.golangci.yml b/vendor/github.com/dgraph-io/badger/.golangci.yml
deleted file mode 100644
index fecb8644..00000000
--- a/vendor/github.com/dgraph-io/badger/.golangci.yml
+++ /dev/null
@@ -1,27 +0,0 @@
-run:
- tests: false
-
-linters-settings:
- lll:
- line-length: 100
-
-linters:
- disable-all: true
- enable:
- - errcheck
- - ineffassign
- - gas
- - gofmt
- - golint
- - gosimple
- - govet
- - lll
- - varcheck
- - unused
-
-issues:
- exclude-rules:
- - linters:
- - gosec
- text: "G404: "
-
\ No newline at end of file
diff --git a/vendor/github.com/dgraph-io/badger/.travis.yml b/vendor/github.com/dgraph-io/badger/.travis.yml
deleted file mode 100644
index 7c58e56d..00000000
--- a/vendor/github.com/dgraph-io/badger/.travis.yml
+++ /dev/null
@@ -1,24 +0,0 @@
-language: go
-
-go:
- - "1.11"
- - "1.12"
-
-matrix:
- include:
- - os: osx
-notifications:
- email: false
- slack:
- secure: X7uBLWYbuUhf8QFE16CoS5z7WvFR8EN9j6cEectMW6mKZ3vwXGwVXRIPsgUq/606DsQdCCx34MR8MRWYGlu6TBolbSe9y0EP0i46yipPz22YtuT7umcVUbGEyx8MZKgG0v1u/zA0O4aCsOBpGAA3gxz8h3JlEHDt+hv6U8xRsSllVLzLSNb5lwxDtcfEDxVVqP47GMEgjLPM28Pyt5qwjk7o5a4YSVzkfdxBXxd3gWzFUWzJ5E3cTacli50dK4GVfiLcQY2aQYoYO7AAvDnvP+TPfjDkBlUEE4MUz5CDIN51Xb+WW33sX7g+r3Bj7V5IRcF973RiYkpEh+3eoiPnyWyxhDZBYilty3b+Hysp6d4Ov/3I3ll7Bcny5+cYjakjkMH3l9w3gs6Y82GlpSLSJshKWS8vPRsxFe0Pstj6QSJXTd9EBaFr+l1ScXjJv/Sya9j8N9FfTuOTESWuaL1auX4Y7zEEVHlA8SCNOO8K0eTfxGZnC/YcIHsR8rePEAcFxfOYQppkyLF/XvAtnb/LMUuu0g4y2qNdme6Oelvyar1tFEMRtbl4mRCdu/krXBFtkrsfUaVY6WTPdvXAGotsFJ0wuA53zGVhlcd3+xAlSlR3c1QX95HIMeivJKb5L4nTjP+xnrmQNtnVk+tG4LSH2ltuwcZSSczModtcBmRefrk=
-
-env:
- global:
- - secure: CRkV2+/jlO0gXzzS50XGxfMS117FNwiVjxNY/LeWq06RKD+dDCPxTJl3JCNe3l0cYEPAglV2uMMYukDiTqJ7e+HI4nh4N4mv6lwx39N8dAvJe1x5ITS2T4qk4kTjuQb1Q1vw/ZOxoQqmvNKj2uRmBdJ/HHmysbRJ1OzCWML3OXdUwJf0AYlJzTjpMfkOKr7sTtE4rwyyQtd4tKH1fGdurgI9ZuFd9qvYxK2qcJhsQ6CNqMXt+7FkVkN1rIPmofjjBTNryzUr4COFXuWH95aDAif19DeBW4lbNgo1+FpDsrgmqtuhl6NAuptI8q/imow2KXBYJ8JPXsxW8DVFj0IIp0RCd3GjaEnwBEbxAyiIHLfW7AudyTS/dJOvZffPqXnuJ8xj3OPIdNe4xY0hWl8Ju2HhKfLOAHq7VadHZWd3IHLil70EiL4/JLD1rNbMImUZisFaA8pyrcIvYYebjOnk4TscwKFLedClRSX1XsMjWWd0oykQtrdkHM2IxknnBpaLu7mFnfE07f6dkG0nlpyu4SCLey7hr5FdcEmljA0nIxTSYDg6035fQkBEAbe7hlESOekkVNT9IZPwG+lmt3vU4ofi6NqNbJecOuSB+h36IiZ9s4YQtxYNnLgW14zjuFGGyT5smc3IjBT7qngDjKIgyrSVoRkY/8udy9qbUgvBeW8=
-
-before_script:
-- go get github.com/mattn/goveralls
-script:
-- bash contrib/cover.sh $HOME/build coverage.out || travis_terminate 1
-- goveralls -service=travis-ci -coverprofile=coverage.out || true
-- goveralls -coverprofile=coverage.out -service=travis-ci
diff --git a/vendor/github.com/dgraph-io/badger/CHANGELOG.md b/vendor/github.com/dgraph-io/badger/CHANGELOG.md
deleted file mode 100644
index e381a4b7..00000000
--- a/vendor/github.com/dgraph-io/badger/CHANGELOG.md
+++ /dev/null
@@ -1,190 +0,0 @@
-# Changelog
-All notable changes to this project will be documented in this file.
-
-The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/)
-and this project adheres to [Serialization Versioning](VERSIONING.md).
-
-## [Unreleased]
-
-## [1.6.0] - 2019-07-01
-
-This is a release including almost 200 commits, so expect many changes - some of them
-not backward compatible.
-
-Regarding backward compatibility in Badger versions, you might be interested on reading
-[VERSIONING.md](VERSIONING.md).
-
-_Note_: The hashes in parentheses correspond to the commits that impacted the given feature.
-
-### New APIs
-
-- badger.DB
- - DropPrefix (291295e)
- - Flatten (7e41bba)
- - KeySplits (4751ef1)
- - MaxBatchCount (b65e2a3)
- - MaxBatchSize (b65e2a3)
- - PrintKeyValueHistogram (fd59907)
- - Subscribe (26128a7)
- - Sync (851e462)
-
-- badger.DefaultOptions() and badger.LSMOnlyOptions() (91ce687)
- - badger.Options.WithX methods
-
-- badger.Entry (e9447c9)
- - NewEntry
- - WithMeta
- - WithDiscard
- - WithTTL
-
-- badger.Item
- - KeySize (fd59907)
- - ValueSize (5242a99)
-
-- badger.IteratorOptions
- - PickTable (7d46029, 49a49e3)
- - Prefix (7d46029)
-
-- badger.Logger (fbb2778)
-
-- badger.Options
- - CompactL0OnClose (7e41bba)
- - Logger (3f66663)
- - LogRotatesToFlush (2237832)
-
-- badger.Stream (14cbd89, 3258067)
-- badger.StreamWriter (7116e16)
-- badger.TableInfo.KeyCount (fd59907)
-- badger.TableManifest (2017987)
-- badger.Tx.NewKeyIterator (49a49e3)
-- badger.WriteBatch (6daccf9, 7e78e80)
-
-### Modified APIs
-
-#### Breaking changes:
-
-- badger.DefaultOptions and badger.LSMOnlyOptions are now functions rather than variables (91ce687)
-- badger.Item.Value now receives a function that returns an error (439fd46)
-- badger.Txn.Commit doesn't receive any params now (6daccf9)
-- badger.DB.Tables now receives a boolean (76b5341)
-
-#### Not breaking changes:
-
-- badger.LSMOptions changed values (799c33f)
-- badger.DB.NewIterator now allows multiple iterators per RO txn (41d9656)
-- badger.Options.TableLoadingMode's new default is options.MemoryMap (6b97bac)
-
-### Removed APIs
-
-- badger.ManagedDB (d22c0e8)
-- badger.Options.DoNotCompact (7e41bba)
-- badger.Txn.SetWithX (e9447c9)
-
-### Tools:
-
-- badger bank disect (13db058)
-- badger bank test (13db058) --mmap (03870e3)
-- badger fill (7e41bba)
-- badger flatten (7e41bba)
-- badger info --histogram (fd59907) --history --lookup --show-keys --show-meta --with-prefix (09e9b63) --show-internal (fb2eed9)
-- badger benchmark read (239041e)
-- badger benchmark write (6d3b67d)
-
-## [1.5.5] - 2019-06-20
-
-* Introduce support for Go Modules
-
-## [1.5.3] - 2018-07-11
-Bug Fixes:
-* Fix a panic caused due to item.vptr not copying over vs.Value, when looking
- for a move key.
-
-## [1.5.2] - 2018-06-19
-Bug Fixes:
-* Fix the way move key gets generated.
-* If a transaction has unclosed, or multiple iterators running simultaneously,
- throw a panic. Every iterator must be properly closed. At any point in time,
- only one iterator per transaction can be running. This is to avoid bugs in a
- transaction data structure which is thread unsafe.
-
-* *Warning: This change might cause panics in user code. Fix is to properly
- close your iterators, and only have one running at a time per transaction.*
-
-## [1.5.1] - 2018-06-04
-Bug Fixes:
-* Fix for infinite yieldItemValue recursion. #503
-* Fix recursive addition of `badgerMove` prefix. https://github.com/dgraph-io/badger/commit/2e3a32f0ccac3066fb4206b28deb39c210c5266f
-* Use file size based window size for sampling, instead of fixing it to 10MB. #501
-
-Cleanup:
-* Clarify comments and documentation.
-* Move badger tool one directory level up.
-
-## [1.5.0] - 2018-05-08
-* Introduce `NumVersionsToKeep` option. This option is used to discard many
- versions of the same key, which saves space.
-* Add a new `SetWithDiscard` method, which would indicate that all the older
- versions of the key are now invalid. Those versions would be discarded during
- compactions.
-* Value log GC moves are now bound to another keyspace to ensure latest versions
- of data are always at the top in LSM tree.
-* Introduce `ValueLogMaxEntries` to restrict the number of key-value pairs per
- value log file. This helps bound the time it takes to garbage collect one
- file.
-
-## [1.4.0] - 2018-05-04
-* Make mmap-ing of value log optional.
-* Run GC multiple times, based on recorded discard statistics.
-* Add MergeOperator.
-* Force compact L0 on clsoe (#439).
-* Add truncate option to warn about data loss (#452).
-* Discard key versions during compaction (#464).
-* Introduce new `LSMOnlyOptions`, to make Badger act like a typical LSM based DB.
-
-Bug fix:
-* (Temporary) Check max version across all tables in Get (removed in next
- release).
-* Update commit and read ts while loading from backup.
-* Ensure all transaction entries are part of the same value log file.
-* On commit, run unlock callbacks before doing writes (#413).
-* Wait for goroutines to finish before closing iterators (#421).
-
-## [1.3.0] - 2017-12-12
-* Add `DB.NextSequence()` method to generate monotonically increasing integer
- sequences.
-* Add `DB.Size()` method to return the size of LSM and value log files.
-* Tweaked mmap code to make Windows 32-bit builds work.
-* Tweaked build tags on some files to make iOS builds work.
-* Fix `DB.PurgeOlderVersions()` to not violate some constraints.
-
-## [1.2.0] - 2017-11-30
-* Expose a `Txn.SetEntry()` method to allow setting the key-value pair
- and all the metadata at the same time.
-
-## [1.1.1] - 2017-11-28
-* Fix bug where txn.Get was returing key deleted in same transaction.
-* Fix race condition while decrementing reference in oracle.
-* Update doneCommit in the callback for CommitAsync.
-* Iterator see writes of current txn.
-
-## [1.1.0] - 2017-11-13
-* Create Badger directory if it does not exist when `badger.Open` is called.
-* Added `Item.ValueCopy()` to avoid deadlocks in long-running iterations
-* Fixed 64-bit alignment issues to make Badger run on Arm v7
-
-## [1.0.1] - 2017-11-06
-* Fix an uint16 overflow when resizing key slice
-
-[Unreleased]: https://github.com/dgraph-io/badger/compare/v1.6.0...HEAD
-[1.6.0]: https://github.com/dgraph-io/badger/compare/v1.5.5...v1.6.0
-[1.5.5]: https://github.com/dgraph-io/badger/compare/v1.5.3...v1.5.5
-[1.5.3]: https://github.com/dgraph-io/badger/compare/v1.5.2...v1.5.3
-[1.5.2]: https://github.com/dgraph-io/badger/compare/v1.5.1...v1.5.2
-[1.5.1]: https://github.com/dgraph-io/badger/compare/v1.5.0...v1.5.1
-[1.5.0]: https://github.com/dgraph-io/badger/compare/v1.4.0...v1.5.0
-[1.4.0]: https://github.com/dgraph-io/badger/compare/v1.3.0...v1.4.0
-[1.3.0]: https://github.com/dgraph-io/badger/compare/v1.2.0...v1.3.0
-[1.2.0]: https://github.com/dgraph-io/badger/compare/v1.1.1...v1.2.0
-[1.1.1]: https://github.com/dgraph-io/badger/compare/v1.1.0...v1.1.1
-[1.1.0]: https://github.com/dgraph-io/badger/compare/v1.0.1...v1.1.0
-[1.0.1]: https://github.com/dgraph-io/badger/compare/v1.0.0...v1.0.1
diff --git a/vendor/github.com/dgraph-io/badger/CODE_OF_CONDUCT.md b/vendor/github.com/dgraph-io/badger/CODE_OF_CONDUCT.md
deleted file mode 100644
index bf7bbc29..00000000
--- a/vendor/github.com/dgraph-io/badger/CODE_OF_CONDUCT.md
+++ /dev/null
@@ -1,5 +0,0 @@
-# Code of Conduct
-
-Our Code of Conduct can be found here:
-
-https://dgraph.io/conduct
diff --git a/vendor/github.com/dgraph-io/badger/LICENSE b/vendor/github.com/dgraph-io/badger/LICENSE
deleted file mode 100644
index d9a10c0d..00000000
--- a/vendor/github.com/dgraph-io/badger/LICENSE
+++ /dev/null
@@ -1,176 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
diff --git a/vendor/github.com/dgraph-io/badger/README.md b/vendor/github.com/dgraph-io/badger/README.md
deleted file mode 100644
index fe033d9c..00000000
--- a/vendor/github.com/dgraph-io/badger/README.md
+++ /dev/null
@@ -1,859 +0,0 @@
-# BadgerDB [![GoDoc](https://godoc.org/github.com/dgraph-io/badger?status.svg)](https://godoc.org/github.com/dgraph-io/badger) [![Go Report Card](https://goreportcard.com/badge/github.com/dgraph-io/badger)](https://goreportcard.com/report/github.com/dgraph-io/badger) [![Sourcegraph](https://sourcegraph.com/github.com/dgraph-io/badger/-/badge.svg)](https://sourcegraph.com/github.com/dgraph-io/badger?badge) [![Build Status](https://teamcity.dgraph.io/guestAuth/app/rest/builds/buildType:(id:Badger_UnitTests)/statusIcon.svg)](https://teamcity.dgraph.io/viewLog.html?buildTypeId=Badger_UnitTests&buildId=lastFinished&guest=1) ![Appveyor](https://ci.appveyor.com/api/projects/status/github/dgraph-io/badger?branch=master&svg=true) [![Coverage Status](https://coveralls.io/repos/github/dgraph-io/badger/badge.svg?branch=master)](https://coveralls.io/github/dgraph-io/badger?branch=master)
-
-![Badger mascot](images/diggy-shadow.png)
-
-BadgerDB is an embeddable, persistent and fast key-value (KV) database
-written in pure Go. It's meant to be a performant alternative to non-Go-based
-key-value stores like [RocksDB](https://github.com/facebook/rocksdb).
-
-## Project Status [Jun 26, 2019]
-
-Badger is stable and is being used to serve data sets worth hundreds of
-terabytes. Badger supports concurrent ACID transactions with serializable
-snapshot isolation (SSI) guarantees. A Jepsen-style bank test runs nightly for
-8h, with `--race` flag and ensures maintainance of transactional guarantees.
-Badger has also been tested to work with filesystem level anomalies, to ensure
-persistence and consistency.
-
-Badger v1.0 was released in Nov 2017, and the latest version that is data-compatible
-with v1.0 is v1.6.0.
-
-Badger v2.0, a new release coming up very soon will use a new storage format which won't
-be compatible with all of the v1.x. The [Changelog] is kept fairly up-to-date.
-
-For more details on our version naming schema please read [Choosing a version](#choosing-a-version).
-
-[Changelog]:https://github.com/dgraph-io/badger/blob/master/CHANGELOG.md
-
-## Table of Contents
- * [Getting Started](#getting-started)
- + [Installing](#installing)
- - [Choosing a version](#choosing-a-version)
- + [Opening a database](#opening-a-database)
- + [Transactions](#transactions)
- - [Read-only transactions](#read-only-transactions)
- - [Read-write transactions](#read-write-transactions)
- - [Managing transactions manually](#managing-transactions-manually)
- + [Using key/value pairs](#using-keyvalue-pairs)
- + [Monotonically increasing integers](#monotonically-increasing-integers)
- * [Merge Operations](#merge-operations)
- + [Setting Time To Live(TTL) and User Metadata on Keys](#setting-time-to-livettl-and-user-metadata-on-keys)
- + [Iterating over keys](#iterating-over-keys)
- - [Prefix scans](#prefix-scans)
- - [Key-only iteration](#key-only-iteration)
- + [Stream](#stream)
- + [Garbage Collection](#garbage-collection)
- + [Database backup](#database-backup)
- + [Memory usage](#memory-usage)
- + [Statistics](#statistics)
- * [Resources](#resources)
- + [Blog Posts](#blog-posts)
- * [Contact](#contact)
- * [Design](#design)
- + [Comparisons](#comparisons)
- + [Benchmarks](#benchmarks)
- * [Other Projects Using Badger](#other-projects-using-badger)
- * [Frequently Asked Questions](#frequently-asked-questions)
-
-## Getting Started
-
-### Installing
-To start using Badger, install Go 1.11 or above and run `go get`:
-
-```sh
-$ go get github.com/dgraph-io/badger/...
-```
-
-This will retrieve the library and install the `badger` command line
-utility into your `$GOBIN` path.
-
-#### Choosing a version
-
-BadgerDB is a pretty special package from the point of view that the most important change we can
-make to it is not on its API but rather on how data is stored on disk.
-
-This is why we follow a version naming schema that differs from Semantic Versioning.
-
-- New major versions are released when the data format on disk changes in an incompatible way.
-- New minor versions are released whenever the API changes but data compatibility is maintained.
- Note that the changes on the API could be backward-incompatible - unlike Semantic Versioning.
-- New patch versions are released when there's no changes to the data format nor the API.
-
-Following these rules:
-
-- v1.5.0 and v1.6.0 can be used on top of the same files without any concerns, as their major
- version is the same, therefore the data format on disk is compatible.
-- v1.6.0 and v2.0.0 are data incompatible as their major version implies, so files created with
- v1.6.0 will need to be converted into the new format before they can be used by v2.0.0.
-
-For a longer explanation on the reasons behind using a new versioning naming schema, you can read
-[VERSIONING.md](VERSIONING.md).
-
-### Opening a database
-The top-level object in Badger is a `DB`. It represents multiple files on disk
-in specific directories, which contain the data for a single database.
-
-To open your database, use the `badger.Open()` function, with the appropriate
-options. The `Dir` and `ValueDir` options are mandatory and must be
-specified by the client. They can be set to the same value to simplify things.
-
-```go
-package main
-
-import (
- "log"
-
- badger "github.com/dgraph-io/badger"
-)
-
-func main() {
- // Open the Badger database located in the /tmp/badger directory.
- // It will be created if it doesn't exist.
- db, err := badger.Open(badger.DefaultOptions("tmp/badger"))
- if err != nil {
- log.Fatal(err)
- }
- defer db.Close()
- // Your code here…
-}
-```
-
-Please note that Badger obtains a lock on the directories so multiple processes
-cannot open the same database at the same time.
-
-### Transactions
-
-#### Read-only transactions
-To start a read-only transaction, you can use the `DB.View()` method:
-
-```go
-err := db.View(func(txn *badger.Txn) error {
- // Your code here…
- return nil
-})
-```
-
-You cannot perform any writes or deletes within this transaction. Badger
-ensures that you get a consistent view of the database within this closure. Any
-writes that happen elsewhere after the transaction has started, will not be
-seen by calls made within the closure.
-
-#### Read-write transactions
-To start a read-write transaction, you can use the `DB.Update()` method:
-
-```go
-err := db.Update(func(txn *badger.Txn) error {
- // Your code here…
- return nil
-})
-```
-
-All database operations are allowed inside a read-write transaction.
-
-Always check the returned error value. If you return an error
-within your closure it will be passed through.
-
-An `ErrConflict` error will be reported in case of a conflict. Depending on the state
-of your application, you have the option to retry the operation if you receive
-this error.
-
-An `ErrTxnTooBig` will be reported in case the number of pending writes/deletes in
-the transaction exceed a certain limit. In that case, it is best to commit the
-transaction and start a new transaction immediately. Here is an example (we are
-not checking for errors in some places for simplicity):
-
-```go
-updates := make(map[string]string)
-txn := db.NewTransaction(true)
-for k,v := range updates {
- if err := txn.Set([]byte(k),[]byte(v)); err == ErrTxnTooBig {
- _ = txn.Commit()
- txn = db.NewTransaction(true)
- _ = txn.Set([]byte(k),[]byte(v))
- }
-}
-_ = txn.Commit()
-```
-
-#### Managing transactions manually
-The `DB.View()` and `DB.Update()` methods are wrappers around the
-`DB.NewTransaction()` and `Txn.Commit()` methods (or `Txn.Discard()` in case of
-read-only transactions). These helper methods will start the transaction,
-execute a function, and then safely discard your transaction if an error is
-returned. This is the recommended way to use Badger transactions.
-
-However, sometimes you may want to manually create and commit your
-transactions. You can use the `DB.NewTransaction()` function directly, which
-takes in a boolean argument to specify whether a read-write transaction is
-required. For read-write transactions, it is necessary to call `Txn.Commit()`
-to ensure the transaction is committed. For read-only transactions, calling
-`Txn.Discard()` is sufficient. `Txn.Commit()` also calls `Txn.Discard()`
-internally to cleanup the transaction, so just calling `Txn.Commit()` is
-sufficient for read-write transaction. However, if your code doesn’t call
-`Txn.Commit()` for some reason (for e.g it returns prematurely with an error),
-then please make sure you call `Txn.Discard()` in a `defer` block. Refer to the
-code below.
-
-```go
-// Start a writable transaction.
-txn := db.NewTransaction(true)
-defer txn.Discard()
-
-// Use the transaction...
-err := txn.Set([]byte("answer"), []byte("42"))
-if err != nil {
- return err
-}
-
-// Commit the transaction and check for error.
-if err := txn.Commit(); err != nil {
- return err
-}
-```
-
-The first argument to `DB.NewTransaction()` is a boolean stating if the transaction
-should be writable.
-
-Badger allows an optional callback to the `Txn.Commit()` method. Normally, the
-callback can be set to `nil`, and the method will return after all the writes
-have succeeded. However, if this callback is provided, the `Txn.Commit()`
-method returns as soon as it has checked for any conflicts. The actual writing
-to the disk happens asynchronously, and the callback is invoked once the
-writing has finished, or an error has occurred. This can improve the throughput
-of the application in some cases. But it also means that a transaction is not
-durable until the callback has been invoked with a `nil` error value.
-
-### Using key/value pairs
-To save a key/value pair, use the `Txn.Set()` method:
-
-```go
-err := db.Update(func(txn *badger.Txn) error {
- err := txn.Set([]byte("answer"), []byte("42"))
- return err
-})
-```
-
-Key/Value pair can also be saved by first creating `Entry`, then setting this
-`Entry` using `Txn.SetEntry()`. `Entry` also exposes methods to set properties
-on it.
-
-```go
-err := db.Update(func(txn *badger.Txn) error {
- e := NewEntry([]byte("answer"), []byte("42"))
- err := txn.SetEntry(e)
- return err
-})
-```
-
-This will set the value of the `"answer"` key to `"42"`. To retrieve this
-value, we can use the `Txn.Get()` method:
-
-```go
-err := db.View(func(txn *badger.Txn) error {
- item, err := txn.Get([]byte("answer"))
- handle(err)
-
- var valNot, valCopy []byte
- err := item.Value(func(val []byte) error {
- // This func with val would only be called if item.Value encounters no error.
-
- // Accessing val here is valid.
- fmt.Printf("The answer is: %s\n", val)
-
- // Copying or parsing val is valid.
- valCopy = append([]byte{}, val...)
-
- // Assigning val slice to another variable is NOT OK.
- valNot = val // Do not do this.
- return nil
- })
- handle(err)
-
- // DO NOT access val here. It is the most common cause of bugs.
- fmt.Printf("NEVER do this. %s\n", valNot)
-
- // You must copy it to use it outside item.Value(...).
- fmt.Printf("The answer is: %s\n", valCopy)
-
- // Alternatively, you could also use item.ValueCopy().
- valCopy, err = item.ValueCopy(nil)
- handle(err)
- fmt.Printf("The answer is: %s\n", valCopy)
-
- return nil
-})
-```
-
-`Txn.Get()` returns `ErrKeyNotFound` if the value is not found.
-
-Please note that values returned from `Get()` are only valid while the
-transaction is open. If you need to use a value outside of the transaction
-then you must use `copy()` to copy it to another byte slice.
-
-Use the `Txn.Delete()` method to delete a key.
-
-### Monotonically increasing integers
-
-To get unique monotonically increasing integers with strong durability, you can
-use the `DB.GetSequence` method. This method returns a `Sequence` object, which
-is thread-safe and can be used concurrently via various goroutines.
-
-Badger would lease a range of integers to hand out from memory, with the
-bandwidth provided to `DB.GetSequence`. The frequency at which disk writes are
-done is determined by this lease bandwidth and the frequency of `Next`
-invocations. Setting a bandwith too low would do more disk writes, setting it
-too high would result in wasted integers if Badger is closed or crashes.
-To avoid wasted integers, call `Release` before closing Badger.
-
-```go
-seq, err := db.GetSequence(key, 1000)
-defer seq.Release()
-for {
- num, err := seq.Next()
-}
-```
-
-### Merge Operations
-Badger provides support for ordered merge operations. You can define a func
-of type `MergeFunc` which takes in an existing value, and a value to be
-_merged_ with it. It returns a new value which is the result of the _merge_
-operation. All values are specified in byte arrays. For e.g., here is a merge
-function (`add`) which appends a `[]byte` value to an existing `[]byte` value.
-
-```Go
-// Merge function to append one byte slice to another
-func add(originalValue, newValue []byte) []byte {
- return append(originalValue, newValue...)
-}
-```
-
-This function can then be passed to the `DB.GetMergeOperator()` method, along
-with a key, and a duration value. The duration specifies how often the merge
-function is run on values that have been added using the `MergeOperator.Add()`
-method.
-
-`MergeOperator.Get()` method can be used to retrieve the cumulative value of the key
-associated with the merge operation.
-
-```Go
-key := []byte("merge")
-
-m := db.GetMergeOperator(key, add, 200*time.Millisecond)
-defer m.Stop()
-
-m.Add([]byte("A"))
-m.Add([]byte("B"))
-m.Add([]byte("C"))
-
-res, _ := m.Get() // res should have value ABC encoded
-```
-
-Example: Merge operator which increments a counter
-
-```Go
-func uint64ToBytes(i uint64) []byte {
- var buf [8]byte
- binary.BigEndian.PutUint64(buf[:], i)
- return buf[:]
-}
-
-func bytesToUint64(b []byte) uint64 {
- return binary.BigEndian.Uint64(b)
-}
-
-// Merge function to add two uint64 numbers
-func add(existing, new []byte) []byte {
- return uint64ToBytes(bytesToUint64(existing) + bytesToUint64(new))
-}
-```
-It can be used as
-```Go
-key := []byte("merge")
-
-m := db.GetMergeOperator(key, add, 200*time.Millisecond)
-defer m.Stop()
-
-m.Add(uint64ToBytes(1))
-m.Add(uint64ToBytes(2))
-m.Add(uint64ToBytes(3))
-
-res, _ := m.Get() // res should have value 6 encoded
-```
-
-### Setting Time To Live(TTL) and User Metadata on Keys
-Badger allows setting an optional Time to Live (TTL) value on keys. Once the TTL has
-elapsed, the key will no longer be retrievable and will be eligible for garbage
-collection. A TTL can be set as a `time.Duration` value using the `Entry.WithTTL()`
-and `Txn.SetEntry()` API methods.
-
-```go
-err := db.Update(func(txn *badger.Txn) error {
- e := NewEntry([]byte("answer"), []byte("42")).WithTTL(time.Hour)
- err := txn.SetEntry(e)
- return err
-})
-```
-
-An optional user metadata value can be set on each key. A user metadata value
-is represented by a single byte. It can be used to set certain bits along
-with the key to aid in interpreting or decoding the key-value pair. User
-metadata can be set using `Entry.WithMeta()` and `Txn.SetEntry()` API methods.
-
-```go
-err := db.Update(func(txn *badger.Txn) error {
- e := NewEntry([]byte("answer"), []byte("42")).WithMeta(byte(1))
- err := txn.SetEntry(e)
- return err
-})
-```
-
-`Entry` APIs can be used to add the user metadata and TTL for same key. This `Entry`
-then can be set using `Txn.SetEntry()`.
-
-```go
-err := db.Update(func(txn *badger.Txn) error {
- e := NewEntry([]byte("answer"), []byte("42")).WithMeta(byte(1)).WithTTL(time.Hour)
- err := txn.SetEntry(e)
- return err
-})
-```
-
-### Iterating over keys
-To iterate over keys, we can use an `Iterator`, which can be obtained using the
-`Txn.NewIterator()` method. Iteration happens in byte-wise lexicographical sorting
-order.
-
-
-```go
-err := db.View(func(txn *badger.Txn) error {
- opts := badger.DefaultIteratorOptions
- opts.PrefetchSize = 10
- it := txn.NewIterator(opts)
- defer it.Close()
- for it.Rewind(); it.Valid(); it.Next() {
- item := it.Item()
- k := item.Key()
- err := item.Value(func(v []byte) error {
- fmt.Printf("key=%s, value=%s\n", k, v)
- return nil
- })
- if err != nil {
- return err
- }
- }
- return nil
-})
-```
-
-The iterator allows you to move to a specific point in the list of keys and move
-forward or backward through the keys one at a time.
-
-By default, Badger prefetches the values of the next 100 items. You can adjust
-that with the `IteratorOptions.PrefetchSize` field. However, setting it to
-a value higher than GOMAXPROCS (which we recommend to be 128 or higher)
-shouldn’t give any additional benefits. You can also turn off the fetching of
-values altogether. See section below on key-only iteration.
-
-#### Prefix scans
-To iterate over a key prefix, you can combine `Seek()` and `ValidForPrefix()`:
-
-```go
-db.View(func(txn *badger.Txn) error {
- it := txn.NewIterator(badger.DefaultIteratorOptions)
- defer it.Close()
- prefix := []byte("1234")
- for it.Seek(prefix); it.ValidForPrefix(prefix); it.Next() {
- item := it.Item()
- k := item.Key()
- err := item.Value(func(v []byte) error {
- fmt.Printf("key=%s, value=%s\n", k, v)
- return nil
- })
- if err != nil {
- return err
- }
- }
- return nil
-})
-```
-
-#### Key-only iteration
-Badger supports a unique mode of iteration called _key-only_ iteration. It is
-several order of magnitudes faster than regular iteration, because it involves
-access to the LSM-tree only, which is usually resident entirely in RAM. To
-enable key-only iteration, you need to set the `IteratorOptions.PrefetchValues`
-field to `false`. This can also be used to do sparse reads for selected keys
-during an iteration, by calling `item.Value()` only when required.
-
-```go
-err := db.View(func(txn *badger.Txn) error {
- opts := badger.DefaultIteratorOptions
- opts.PrefetchValues = false
- it := txn.NewIterator(opts)
- defer it.Close()
- for it.Rewind(); it.Valid(); it.Next() {
- item := it.Item()
- k := item.Key()
- fmt.Printf("key=%s\n", k)
- }
- return nil
-})
-```
-
-### Stream
-Badger provides a Stream framework, which concurrently iterates over all or a
-portion of the DB, converting data into custom key-values, and streams it out
-serially to be sent over network, written to disk, or even written back to
-Badger. This is a lot faster way to iterate over Badger than using a single
-Iterator. Stream supports Badger in both managed and normal mode.
-
-Stream uses the natural boundaries created by SSTables within the LSM tree, to
-quickly generate key ranges. Each goroutine then picks a range and runs an
-iterator to iterate over it. Each iterator iterates over all versions of values
-and is created from the same transaction, thus working over a snapshot of the
-DB. Every time a new key is encountered, it calls `ChooseKey(item)`, followed
-by `KeyToList(key, itr)`. This allows a user to select or reject that key, and
-if selected, convert the value versions into custom key-values. The goroutine
-batches up 4MB worth of key-values, before sending it over to a channel.
-Another goroutine further batches up data from this channel using *smart
-batching* algorithm and calls `Send` serially.
-
-This framework is designed for high throughput key-value iteration, spreading
-the work of iteration across many goroutines. `DB.Backup` uses this framework to
-provide full and incremental backups quickly. Dgraph is a heavy user of this
-framework. In fact, this framework was developed and used within Dgraph, before
-getting ported over to Badger.
-
-```go
-stream := db.NewStream()
-// db.NewStreamAt(readTs) for managed mode.
-
-// -- Optional settings
-stream.NumGo = 16 // Set number of goroutines to use for iteration.
-stream.Prefix = []byte("some-prefix") // Leave nil for iteration over the whole DB.
-stream.LogPrefix = "Badger.Streaming" // For identifying stream logs. Outputs to Logger.
-
-// ChooseKey is called concurrently for every key. If left nil, assumes true by default.
-stream.ChooseKey = func(item *badger.Item) bool {
- return bytes.HasSuffix(item.Key(), []byte("er"))
-}
-
-// KeyToList is called concurrently for chosen keys. This can be used to convert
-// Badger data into custom key-values. If nil, uses stream.ToList, a default
-// implementation, which picks all valid key-values.
-stream.KeyToList = nil
-
-// -- End of optional settings.
-
-// Send is called serially, while Stream.Orchestrate is running.
-stream.Send = func(list *pb.KVList) error {
- return proto.MarshalText(w, list) // Write to w.
-}
-
-// Run the stream
-if err := stream.Orchestrate(context.Background()); err != nil {
- return err
-}
-// Done.
-```
-
-### Garbage Collection
-Badger values need to be garbage collected, because of two reasons:
-
-* Badger keeps values separately from the LSM tree. This means that the compaction operations
-that clean up the LSM tree do not touch the values at all. Values need to be cleaned up
-separately.
-
-* Concurrent read/write transactions could leave behind multiple values for a single key, because they
-are stored with different versions. These could accumulate, and take up unneeded space beyond the
-time these older versions are needed.
-
-Badger relies on the client to perform garbage collection at a time of their choosing. It provides
-the following method, which can be invoked at an appropriate time:
-
-* `DB.RunValueLogGC()`: This method is designed to do garbage collection while
- Badger is online. Along with randomly picking a file, it uses statistics generated by the
- LSM-tree compactions to pick files that are likely to lead to maximum space
- reclamation. It is recommended to be called during periods of low activity in
- your system, or periodically. One call would only result in removal of at max
- one log file. As an optimization, you could also immediately re-run it whenever
- it returns nil error (indicating a successful value log GC), as shown below.
-
- ```go
- ticker := time.NewTicker(5 * time.Minute)
- defer ticker.Stop()
- for range ticker.C {
- again:
- err := db.RunValueLogGC(0.7)
- if err == nil {
- goto again
- }
- }
- ```
-
-* `DB.PurgeOlderVersions()`: This method is **DEPRECATED** since v1.5.0. Now, Badger's LSM tree automatically discards older/invalid versions of keys.
-
-**Note: The RunValueLogGC method would not garbage collect the latest value log.**
-
-### Database backup
-There are two public API methods `DB.Backup()` and `DB.Load()` which can be
-used to do online backups and restores. Badger v0.9 provides a CLI tool
-`badger`, which can do offline backup/restore. Make sure you have `$GOPATH/bin`
-in your PATH to use this tool.
-
-The command below will create a version-agnostic backup of the database, to a
-file `badger.bak` in the current working directory
-
-```
-badger backup --dir
-```
-
-To restore `badger.bak` in the current working directory to a new database:
-
-```
-badger restore --dir
-```
-
-See `badger --help` for more details.
-
-If you have a Badger database that was created using v0.8 (or below), you can
-use the `badger_backup` tool provided in v0.8.1, and then restore it using the
-command above to upgrade your database to work with the latest version.
-
-```
-badger_backup --dir --backup-file badger.bak
-```
-
-We recommend all users to use the `Backup` and `Restore` APIs and tools. However,
-Badger is also rsync-friendly because all files are immutable, barring the
-latest value log which is append-only. So, rsync can be used as rudimentary way
-to perform a backup. In the following script, we repeat rsync to ensure that the
-LSM tree remains consistent with the MANIFEST file while doing a full backup.
-
-```
-#!/bin/bash
-set -o history
-set -o histexpand
-# Makes a complete copy of a Badger database directory.
-# Repeat rsync if the MANIFEST and SSTables are updated.
-rsync -avz --delete db/ dst
-while !! | grep -q "(MANIFEST\|\.sst)$"; do :; done
-```
-
-### Memory usage
-Badger's memory usage can be managed by tweaking several options available in
-the `Options` struct that is passed in when opening the database using
-`DB.Open`.
-
-- `Options.ValueLogLoadingMode` can be set to `options.FileIO` (instead of the
- default `options.MemoryMap`) to avoid memory-mapping log files. This can be
- useful in environments with low RAM.
-- Number of memtables (`Options.NumMemtables`)
- - If you modify `Options.NumMemtables`, also adjust `Options.NumLevelZeroTables` and
- `Options.NumLevelZeroTablesStall` accordingly.
-- Number of concurrent compactions (`Options.NumCompactors`)
-- Mode in which LSM tree is loaded (`Options.TableLoadingMode`)
-- Size of table (`Options.MaxTableSize`)
-- Size of value log file (`Options.ValueLogFileSize`)
-
-If you want to decrease the memory usage of Badger instance, tweak these
-options (ideally one at a time) until you achieve the desired
-memory usage.
-
-### Statistics
-Badger records metrics using the [expvar] package, which is included in the Go
-standard library. All the metrics are documented in [y/metrics.go][metrics]
-file.
-
-`expvar` package adds a handler in to the default HTTP server (which has to be
-started explicitly), and serves up the metrics at the `/debug/vars` endpoint.
-These metrics can then be collected by a system like [Prometheus], to get
-better visibility into what Badger is doing.
-
-[expvar]: https://golang.org/pkg/expvar/
-[metrics]: https://github.com/dgraph-io/badger/blob/master/y/metrics.go
-[Prometheus]: https://prometheus.io/
-
-## Resources
-
-### Blog Posts
-1. [Introducing Badger: A fast key-value store written natively in
-Go](https://open.dgraph.io/post/badger/)
-2. [Make Badger crash resilient with ALICE](https://blog.dgraph.io/post/alice/)
-3. [Badger vs LMDB vs BoltDB: Benchmarking key-value databases in Go](https://blog.dgraph.io/post/badger-lmdb-boltdb/)
-4. [Concurrent ACID Transactions in Badger](https://blog.dgraph.io/post/badger-txn/)
-
-## Design
-Badger was written with these design goals in mind:
-
-- Write a key-value database in pure Go.
-- Use latest research to build the fastest KV database for data sets spanning terabytes.
-- Optimize for SSDs.
-
-Badger’s design is based on a paper titled _[WiscKey: Separating Keys from
-Values in SSD-conscious Storage][wisckey]_.
-
-[wisckey]: https://www.usenix.org/system/files/conference/fast16/fast16-papers-lu.pdf
-
-### Comparisons
-| Feature | Badger | RocksDB | BoltDB |
-| ------- | ------ | ------- | ------ |
-| Design | LSM tree with value log | LSM tree only | B+ tree |
-| High Read throughput | Yes | No | Yes |
-| High Write throughput | Yes | Yes | No |
-| Designed for SSDs | Yes (with latest research 1 ) | Not specifically 2 | No |
-| Embeddable | Yes | Yes | Yes |
-| Sorted KV access | Yes | Yes | Yes |
-| Pure Go (no Cgo) | Yes | No | Yes |
-| Transactions | Yes, ACID, concurrent with SSI3 | Yes (but non-ACID) | Yes, ACID |
-| Snapshots | Yes | Yes | Yes |
-| TTL support | Yes | Yes | No |
-| 3D access (key-value-version) | Yes4 | No | No |
-
-1 The [WISCKEY paper][wisckey] (on which Badger is based) saw big
-wins with separating values from keys, significantly reducing the write
-amplification compared to a typical LSM tree.
-
-2 RocksDB is an SSD optimized version of LevelDB, which was designed specifically for rotating disks.
-As such RocksDB's design isn't aimed at SSDs.
-
-3 SSI: Serializable Snapshot Isolation. For more details, see the blog post [Concurrent ACID Transactions in Badger](https://blog.dgraph.io/post/badger-txn/)
-
-4 Badger provides direct access to value versions via its Iterator API.
-Users can also specify how many versions to keep per key via Options.
-
-### Benchmarks
-We have run comprehensive benchmarks against RocksDB, Bolt and LMDB. The
-benchmarking code, and the detailed logs for the benchmarks can be found in the
-[badger-bench] repo. More explanation, including graphs can be found the blog posts (linked
-above).
-
-[badger-bench]: https://github.com/dgraph-io/badger-bench
-
-## Other Projects Using Badger
-Below is a list of known projects that use Badger:
-
-* [0-stor](https://github.com/zero-os/0-stor) - Single device object store.
-* [Dgraph](https://github.com/dgraph-io/dgraph) - Distributed graph database.
-* [Dispatch Protocol](https://github.com/dispatchlabs/disgo) - Blockchain protocol for distributed application data analytics.
-* [Sandglass](https://github.com/celrenheit/sandglass) - distributed, horizontally scalable, persistent, time sorted message queue.
-* [Usenet Express](https://usenetexpress.com/) - Serving over 300TB of data with Badger.
-* [go-ipfs](https://github.com/ipfs/go-ipfs) - Go client for the InterPlanetary File System (IPFS), a new hypermedia distribution protocol.
-* [gorush](https://github.com/appleboy/gorush) - A push notification server written in Go.
-* [emitter](https://github.com/emitter-io/emitter) - Scalable, low latency, distributed pub/sub broker with message storage, uses MQTT, gossip and badger.
-* [GarageMQ](https://github.com/valinurovam/garagemq) - AMQP server written in Go.
-* [RedixDB](https://alash3al.github.io/redix/) - A real-time persistent key-value store with the same redis protocol.
-* [BBVA](https://github.com/BBVA/raft-badger) - Raft backend implementation using BadgerDB for Hashicorp raft.
-* [Riot](https://github.com/go-ego/riot) - An open-source, distributed search engine.
-* [Fantom](https://github.com/Fantom-foundation/go-lachesis) - aBFT Consensus platform for distributed applications.
-* [decred](https://github.com/decred/dcrdata) - An open, progressive, and self-funding cryptocurrency with a system of community-based governance integrated into its blockchain.
-* [OpenNetSys](https://github.com/opennetsys/c3-go) - Create useful dApps in any software language.
-* [HoneyTrap](https://github.com/honeytrap/honeytrap) - An extensible and opensource system for running, monitoring and managing honeypots.
-* [Insolar](https://github.com/insolar/insolar) - Enterprise-ready blockchain platform.
-* [IoTeX](https://github.com/iotexproject/iotex-core) - The next generation of the decentralized network for IoT powered by scalability- and privacy-centric blockchains.
-* [go-sessions](https://github.com/kataras/go-sessions) - The sessions manager for Go net/http and fasthttp.
-* [Babble](https://github.com/mosaicnetworks/babble) - BFT Consensus platform for distributed applications.
-* [Tormenta](https://github.com/jpincas/tormenta) - Embedded object-persistence layer / simple JSON database for Go projects.
-* [BadgerHold](https://github.com/timshannon/badgerhold) - An embeddable NoSQL store for querying Go types built on Badger
-* [Goblero](https://github.com/didil/goblero) - Pure Go embedded persistent job queue backed by BadgerDB
-* [Surfline](https://www.surfline.com) - Serving global wave and weather forecast data with Badger.
-* [Cete](https://github.com/mosuka/cete) - Simple and highly available distributed key-value store built on Badger. Makes it easy bringing up a cluster of Badger with Raft consensus algorithm by hashicorp/raft.
-* [Volument](https://volument.com/) - A new take on website analytics backed by Badger.
-
-If you are using Badger in a project please send a pull request to add it to the list.
-
-## Frequently Asked Questions
-- **My writes are getting stuck. Why?**
-
-**Update: With the new `Value(func(v []byte))` API, this deadlock can no longer
-happen.**
-
-The following is true for users on Badger v1.x.
-
-This can happen if a long running iteration with `Prefetch` is set to false, but
-a `Item::Value` call is made internally in the loop. That causes Badger to
-acquire read locks over the value log files to avoid value log GC removing the
-file from underneath. As a side effect, this also blocks a new value log GC
-file from being created, when the value log file boundary is hit.
-
-Please see Github issues [#293](https://github.com/dgraph-io/badger/issues/293)
-and [#315](https://github.com/dgraph-io/badger/issues/315).
-
-There are multiple workarounds during iteration:
-
-1. Use `Item::ValueCopy` instead of `Item::Value` when retrieving value.
-1. Set `Prefetch` to true. Badger would then copy over the value and release the
- file lock immediately.
-1. When `Prefetch` is false, don't call `Item::Value` and do a pure key-only
- iteration. This might be useful if you just want to delete a lot of keys.
-1. Do the writes in a separate transaction after the reads.
-
-- **My writes are really slow. Why?**
-
-Are you creating a new transaction for every single key update, and waiting for
-it to `Commit` fully before creating a new one? This will lead to very low
-throughput.
-
-We have created `WriteBatch` API which provides a way to batch up
-many updates into a single transaction and `Commit` that transaction using
-callbacks to avoid blocking. This amortizes the cost of a transaction really
-well, and provides the most efficient way to do bulk writes.
-
-```go
-wb := db.NewWriteBatch()
-defer wb.Cancel()
-
-for i := 0; i < N; i++ {
- err := wb.Set(key(i), value(i), 0) // Will create txns as needed.
- handle(err)
-}
-handle(wb.Flush()) // Wait for all txns to finish.
-```
-
-Note that `WriteBatch` API does not allow any reads. For read-modify-write
-workloads, you should be using the `Transaction` API.
-
-- **I don't see any disk write. Why?**
-
-If you're using Badger with `SyncWrites=false`, then your writes might not be written to value log
-and won't get synced to disk immediately. Writes to LSM tree are done inmemory first, before they
-get compacted to disk. The compaction would only happen once `MaxTableSize` has been reached. So, if
-you're doing a few writes and then checking, you might not see anything on disk. Once you `Close`
-the database, you'll see these writes on disk.
-
-- **Reverse iteration doesn't give me the right results.**
-
-Just like forward iteration goes to the first key which is equal or greater than the SEEK key, reverse iteration goes to the first key which is equal or lesser than the SEEK key. Therefore, SEEK key would not be part of the results. You can typically add a `0xff` byte as a suffix to the SEEK key to include it in the results. See the following issues: [#436](https://github.com/dgraph-io/badger/issues/436) and [#347](https://github.com/dgraph-io/badger/issues/347).
-
-- **Which instances should I use for Badger?**
-
-We recommend using instances which provide local SSD storage, without any limit
-on the maximum IOPS. In AWS, these are storage optimized instances like i3. They
-provide local SSDs which clock 100K IOPS over 4KB blocks easily.
-
-- **I'm getting a closed channel error. Why?**
-
-```
-panic: close of closed channel
-panic: send on closed channel
-```
-
-If you're seeing panics like above, this would be because you're operating on a closed DB. This can happen, if you call `Close()` before sending a write, or multiple times. You should ensure that you only call `Close()` once, and all your read/write operations finish before closing.
-
-- **Are there any Go specific settings that I should use?**
-
-We *highly* recommend setting a high number for GOMAXPROCS, which allows Go to
-observe the full IOPS throughput provided by modern SSDs. In Dgraph, we have set
-it to 128. For more details, [see this
-thread](https://groups.google.com/d/topic/golang-nuts/jPb_h3TvlKE/discussion).
-
-- **Are there any linux specific settings that I should use?**
-
-We recommend setting max file descriptors to a high number depending upon the expected size of you data.
-
-## Contact
-- Please use [discuss.dgraph.io](https://discuss.dgraph.io) for questions, feature requests and discussions.
-- Please use [Github issue tracker](https://github.com/dgraph-io/badger/issues) for filing bugs or feature requests.
-- Join [![Slack Status](http://slack.dgraph.io/badge.svg)](http://slack.dgraph.io).
-- Follow us on Twitter [@dgraphlabs](https://twitter.com/dgraphlabs).
-
diff --git a/vendor/github.com/dgraph-io/badger/VERSIONING.md b/vendor/github.com/dgraph-io/badger/VERSIONING.md
deleted file mode 100644
index a890a36f..00000000
--- a/vendor/github.com/dgraph-io/badger/VERSIONING.md
+++ /dev/null
@@ -1,47 +0,0 @@
-# Serialization Versioning: Semantic Versioning for databases
-
-Semantic Versioning, commonly known as SemVer, is a great idea that has been very widely adopted as
-a way to decide how to name software versions. The whole concept is very well summarized on
-semver.org with the following lines:
-
-> Given a version number MAJOR.MINOR.PATCH, increment the:
->
-> 1. MAJOR version when you make incompatible API changes,
-> 2. MINOR version when you add functionality in a backwards-compatible manner, and
-> 3. PATCH version when you make backwards-compatible bug fixes.
->
-> Additional labels for pre-release and build metadata are available as extensions to the
-> MAJOR.MINOR.PATCH format.
-
-Unfortunately, API changes are not the most important changes for libraries that serialize data for
-later consumption. For these libraries, such as BadgerDB, changes to the API are much easier to
-handle than change to the data format used to store data on disk.
-
-## Serialization Version specification
-
-Serialization Versioning, like Semantic Versioning, uses 3 numbers and also calls them
-MAJOR.MINOR.PATCH, but the semantics of the numbers are slightly modified:
-
-Given a version number MAJOR.MINOR.PATCH, increment the:
-
-- MAJOR version when you make changes that require a transformation of the dataset before it can be
-used again.
-- MINOR version when old datasets are still readable but the API might have changed in
-backwards-compatible or incompatible ways.
-- PATCH version when you make backwards-compatible bug fixes.
-
-Additional labels for pre-release and build metadata are available as extensions to the
-MAJOR.MINOR.PATCH format.
-
-Following this naming strategy, migration from v1.x to v2.x requires a migration strategy for your
-existing dataset, and as such has to be carefully planned. Migrations in between different minor
-versions (e.g. v1.5.x and v1.6.x) might break your build, as the API *might* have changed, but once
-your code compiles there's no need for any data migration. Lastly, changes in between two different
-patch versions should never break your build or dataset.
-
-For more background on our decision to adopt Serialization Versioning, read the blog post
-[Semantic Versioning, Go Modules, and Databases][blog] and the original proposal on
-[this comment on Dgraph's Discuss forum][discuss].
-
-[blog]: https://blog.dgraph.io/post/serialization-versioning/
-[discuss]: https://discuss.dgraph.io/t/go-modules-on-badger-and-dgraph/4662/7
\ No newline at end of file
diff --git a/vendor/github.com/dgraph-io/badger/appveyor.yml b/vendor/github.com/dgraph-io/badger/appveyor.yml
deleted file mode 100644
index afa54ca0..00000000
--- a/vendor/github.com/dgraph-io/badger/appveyor.yml
+++ /dev/null
@@ -1,49 +0,0 @@
-# version format
-version: "{build}"
-
-# Operating system (build VM template)
-os: Windows Server 2012 R2
-
-# Platform.
-platform: x64
-
-clone_folder: c:\gopath\src\github.com\dgraph-io\badger
-
-# Environment variables
-environment:
- GOVERSION: 1.8.3
- GOPATH: c:\gopath
- GO111MODULE: on
-
-# scripts that run after cloning repository
-install:
- - set PATH=%GOPATH%\bin;c:\go\bin;%PATH%
- - go version
- - go env
- - python --version
-
-# To run your custom scripts instead of automatic MSBuild
-build_script:
- # We need to disable firewall - https://github.com/appveyor/ci/issues/1579#issuecomment-309830648
- - ps: Disable-NetFirewallRule -DisplayName 'File and Printer Sharing (SMB-Out)'
- - cd c:\gopath\src\github.com\dgraph-io\badger
- - git branch
- - go get -t ./...
-
-# To run your custom scripts instead of automatic tests
-test_script:
- # Unit tests
- - ps: Add-AppveyorTest "Unit Tests" -Outcome Running
- - go test -v github.com/dgraph-io/badger/...
- - go test -v -vlog_mmap=false github.com/dgraph-io/badger/...
- - ps: Update-AppveyorTest "Unit Tests" -Outcome Passed
-
-notifications:
- - provider: Email
- to:
- - pawan@dgraph.io
- on_build_failure: true
- on_build_status_changed: true
-# to disable deployment
-deploy: off
-
diff --git a/vendor/github.com/dgraph-io/badger/backup.go b/vendor/github.com/dgraph-io/badger/backup.go
deleted file mode 100644
index 2569b310..00000000
--- a/vendor/github.com/dgraph-io/badger/backup.go
+++ /dev/null
@@ -1,244 +0,0 @@
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package badger
-
-import (
- "bufio"
- "bytes"
- "context"
- "encoding/binary"
- "io"
-
- "github.com/dgraph-io/badger/pb"
- "github.com/dgraph-io/badger/y"
-)
-
-// Backup is a wrapper function over Stream.Backup to generate full and incremental backups of the
-// DB. For more control over how many goroutines are used to generate the backup, or if you wish to
-// backup only a certain range of keys, use Stream.Backup directly.
-func (db *DB) Backup(w io.Writer, since uint64) (uint64, error) {
- stream := db.NewStream()
- stream.LogPrefix = "DB.Backup"
- return stream.Backup(w, since)
-}
-
-// Backup dumps a protobuf-encoded list of all entries in the database into the
-// given writer, that are newer than the specified version. It returns a
-// timestamp indicating when the entries were dumped which can be passed into a
-// later invocation to generate an incremental dump, of entries that have been
-// added/modified since the last invocation of Stream.Backup().
-//
-// This can be used to backup the data in a database at a given point in time.
-func (stream *Stream) Backup(w io.Writer, since uint64) (uint64, error) {
- stream.KeyToList = func(key []byte, itr *Iterator) (*pb.KVList, error) {
- list := &pb.KVList{}
- for ; itr.Valid(); itr.Next() {
- item := itr.Item()
- if !bytes.Equal(item.Key(), key) {
- return list, nil
- }
- if item.Version() < since {
- // Ignore versions less than given timestamp, or skip older
- // versions of the given key.
- return list, nil
- }
-
- var valCopy []byte
- if !item.IsDeletedOrExpired() {
- // No need to copy value, if item is deleted or expired.
- var err error
- valCopy, err = item.ValueCopy(nil)
- if err != nil {
- stream.db.opt.Errorf("Key [%x, %d]. Error while fetching value [%v]\n",
- item.Key(), item.Version(), err)
- return nil, err
- }
- }
-
- // clear txn bits
- meta := item.meta &^ (bitTxn | bitFinTxn)
- kv := &pb.KV{
- Key: item.KeyCopy(nil),
- Value: valCopy,
- UserMeta: []byte{item.UserMeta()},
- Version: item.Version(),
- ExpiresAt: item.ExpiresAt(),
- Meta: []byte{meta},
- }
- list.Kv = append(list.Kv, kv)
-
- switch {
- case item.DiscardEarlierVersions():
- // If we need to discard earlier versions of this item, add a delete
- // marker just below the current version.
- list.Kv = append(list.Kv, &pb.KV{
- Key: item.KeyCopy(nil),
- Version: item.Version() - 1,
- Meta: []byte{bitDelete},
- })
- return list, nil
-
- case item.IsDeletedOrExpired():
- return list, nil
- }
- }
- return list, nil
- }
-
- var maxVersion uint64
- stream.Send = func(list *pb.KVList) error {
- for _, kv := range list.Kv {
- if maxVersion < kv.Version {
- maxVersion = kv.Version
- }
- }
- return writeTo(list, w)
- }
-
- if err := stream.Orchestrate(context.Background()); err != nil {
- return 0, err
- }
- return maxVersion, nil
-}
-
-func writeTo(list *pb.KVList, w io.Writer) error {
- if err := binary.Write(w, binary.LittleEndian, uint64(list.Size())); err != nil {
- return err
- }
- buf, err := list.Marshal()
- if err != nil {
- return err
- }
- _, err = w.Write(buf)
- return err
-}
-
-// KVLoader is used to write KVList objects in to badger. It can be used to restore a backup.
-type KVLoader struct {
- db *DB
- throttle *y.Throttle
- entries []*Entry
-}
-
-// NewKVLoader returns a new instance of KVLoader.
-func (db *DB) NewKVLoader(maxPendingWrites int) *KVLoader {
- return &KVLoader{
- db: db,
- throttle: y.NewThrottle(maxPendingWrites),
- }
-}
-
-// Set writes the key-value pair to the database.
-func (l *KVLoader) Set(kv *pb.KV) error {
- var userMeta, meta byte
- if len(kv.UserMeta) > 0 {
- userMeta = kv.UserMeta[0]
- }
- if len(kv.Meta) > 0 {
- meta = kv.Meta[0]
- }
-
- l.entries = append(l.entries, &Entry{
- Key: y.KeyWithTs(kv.Key, kv.Version),
- Value: kv.Value,
- UserMeta: userMeta,
- ExpiresAt: kv.ExpiresAt,
- meta: meta,
- })
- if len(l.entries) >= 1000 {
- return l.send()
- }
- return nil
-}
-
-func (l *KVLoader) send() error {
- if err := l.throttle.Do(); err != nil {
- return err
- }
- if err := l.db.batchSetAsync(l.entries, func(err error) {
- l.throttle.Done(err)
- }); err != nil {
- return err
- }
-
- l.entries = make([]*Entry, 0, 1000)
- return nil
-}
-
-// Finish is meant to be called after all the key-value pairs have been loaded.
-func (l *KVLoader) Finish() error {
- if len(l.entries) > 0 {
- if err := l.send(); err != nil {
- return err
- }
- }
- return l.throttle.Finish()
-}
-
-// Load reads a protobuf-encoded list of all entries from a reader and writes
-// them to the database. This can be used to restore the database from a backup
-// made by calling DB.Backup(). If more complex logic is needed to restore a badger
-// backup, the KVLoader interface should be used instead.
-//
-// DB.Load() should be called on a database that is not running any other
-// concurrent transactions while it is running.
-func (db *DB) Load(r io.Reader, maxPendingWrites int) error {
- br := bufio.NewReaderSize(r, 16<<10)
- unmarshalBuf := make([]byte, 1<<10)
-
- ldr := db.NewKVLoader(maxPendingWrites)
- for {
- var sz uint64
- err := binary.Read(br, binary.LittleEndian, &sz)
- if err == io.EOF {
- break
- } else if err != nil {
- return err
- }
-
- if cap(unmarshalBuf) < int(sz) {
- unmarshalBuf = make([]byte, sz)
- }
-
- if _, err = io.ReadFull(br, unmarshalBuf[:sz]); err != nil {
- return err
- }
-
- list := &pb.KVList{}
- if err := list.Unmarshal(unmarshalBuf[:sz]); err != nil {
- return err
- }
-
- for _, kv := range list.Kv {
- if err := ldr.Set(kv); err != nil {
- return err
- }
-
- // Update nextTxnTs, memtable stores this
- // timestamp in badger head when flushed.
- if kv.Version >= db.orc.nextTxnTs {
- db.orc.nextTxnTs = kv.Version + 1
- }
- }
- }
-
- if err := ldr.Finish(); err != nil {
- return err
- }
- db.orc.txnMark.Done(db.orc.nextTxnTs - 1)
- return nil
-}
diff --git a/vendor/github.com/dgraph-io/badger/batch.go b/vendor/github.com/dgraph-io/badger/batch.go
deleted file mode 100644
index c94e0fed..00000000
--- a/vendor/github.com/dgraph-io/badger/batch.go
+++ /dev/null
@@ -1,162 +0,0 @@
-/*
- * Copyright 2018 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package badger
-
-import (
- "sync"
-
- "github.com/dgraph-io/badger/y"
-)
-
-// WriteBatch holds the necessary info to perform batched writes.
-type WriteBatch struct {
- sync.Mutex
- txn *Txn
- db *DB
- throttle *y.Throttle
- err error
-}
-
-// NewWriteBatch creates a new WriteBatch. This provides a way to conveniently do a lot of writes,
-// batching them up as tightly as possible in a single transaction and using callbacks to avoid
-// waiting for them to commit, thus achieving good performance. This API hides away the logic of
-// creating and committing transactions. Due to the nature of SSI guaratees provided by Badger,
-// blind writes can never encounter transaction conflicts (ErrConflict).
-func (db *DB) NewWriteBatch() *WriteBatch {
- return &WriteBatch{
- db: db,
- txn: db.newTransaction(true, true),
- throttle: y.NewThrottle(16),
- }
-}
-
-// SetMaxPendingTxns sets a limit on maximum number of pending transactions while writing batches.
-// This function should be called before using WriteBatch. Default value of MaxPendingTxns is
-// 16 to minimise memory usage.
-func (wb *WriteBatch) SetMaxPendingTxns(max int) {
- wb.throttle = y.NewThrottle(max)
-}
-
-// Cancel function must be called if there's a chance that Flush might not get
-// called. If neither Flush or Cancel is called, the transaction oracle would
-// never get a chance to clear out the row commit timestamp map, thus causing an
-// unbounded memory consumption. Typically, you can call Cancel as a defer
-// statement right after NewWriteBatch is called.
-//
-// Note that any committed writes would still go through despite calling Cancel.
-func (wb *WriteBatch) Cancel() {
- if err := wb.throttle.Finish(); err != nil {
- wb.db.opt.Errorf("WatchBatch.Cancel error while finishing: %v", err)
- }
- wb.txn.Discard()
-}
-
-func (wb *WriteBatch) callback(err error) {
- // sync.WaitGroup is thread-safe, so it doesn't need to be run inside wb.Lock.
- defer wb.throttle.Done(err)
- if err == nil {
- return
- }
-
- wb.Lock()
- defer wb.Unlock()
- if wb.err != nil {
- return
- }
- wb.err = err
-}
-
-// SetEntry is the equivalent of Txn.SetEntry.
-func (wb *WriteBatch) SetEntry(e *Entry) error {
- wb.Lock()
- defer wb.Unlock()
-
- if err := wb.txn.SetEntry(e); err != ErrTxnTooBig {
- return err
- }
- // Txn has reached it's zenith. Commit now.
- if cerr := wb.commit(); cerr != nil {
- return cerr
- }
- // This time the error must not be ErrTxnTooBig, otherwise, we make the
- // error permanent.
- if err := wb.txn.SetEntry(e); err != nil {
- wb.err = err
- return err
- }
- return nil
-}
-
-// Set is equivalent of Txn.Set().
-func (wb *WriteBatch) Set(k, v []byte) error {
- e := &Entry{Key: k, Value: v}
- return wb.SetEntry(e)
-}
-
-// Delete is equivalent of Txn.Delete.
-func (wb *WriteBatch) Delete(k []byte) error {
- wb.Lock()
- defer wb.Unlock()
-
- if err := wb.txn.Delete(k); err != ErrTxnTooBig {
- return err
- }
- if err := wb.commit(); err != nil {
- return err
- }
- if err := wb.txn.Delete(k); err != nil {
- wb.err = err
- return err
- }
- return nil
-}
-
-// Caller to commit must hold a write lock.
-func (wb *WriteBatch) commit() error {
- if wb.err != nil {
- return wb.err
- }
- if err := wb.throttle.Do(); err != nil {
- return err
- }
- wb.txn.CommitWith(wb.callback)
- wb.txn = wb.db.newTransaction(true, true)
- wb.txn.readTs = 0 // We're not reading anything.
- return wb.err
-}
-
-// Flush must be called at the end to ensure that any pending writes get committed to Badger. Flush
-// returns any error stored by WriteBatch.
-func (wb *WriteBatch) Flush() error {
- wb.Lock()
- _ = wb.commit()
- wb.txn.Discard()
- wb.Unlock()
-
- if err := wb.throttle.Finish(); err != nil {
- return err
- }
-
- return wb.err
-}
-
-// Error returns any errors encountered so far. No commits would be run once an error is detected.
-func (wb *WriteBatch) Error() error {
- wb.Lock()
- defer wb.Unlock()
- return wb.err
-}
diff --git a/vendor/github.com/dgraph-io/badger/compaction.go b/vendor/github.com/dgraph-io/badger/compaction.go
deleted file mode 100644
index 931d5666..00000000
--- a/vendor/github.com/dgraph-io/badger/compaction.go
+++ /dev/null
@@ -1,210 +0,0 @@
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package badger
-
-import (
- "bytes"
- "fmt"
- "log"
- "math"
- "sync"
-
- "golang.org/x/net/trace"
-
- "github.com/dgraph-io/badger/table"
- "github.com/dgraph-io/badger/y"
-)
-
-type keyRange struct {
- left []byte
- right []byte
- inf bool
-}
-
-var infRange = keyRange{inf: true}
-
-func (r keyRange) String() string {
- return fmt.Sprintf("[left=%x, right=%x, inf=%v]", r.left, r.right, r.inf)
-}
-
-func (r keyRange) equals(dst keyRange) bool {
- return bytes.Equal(r.left, dst.left) &&
- bytes.Equal(r.right, dst.right) &&
- r.inf == dst.inf
-}
-
-func (r keyRange) overlapsWith(dst keyRange) bool {
- if r.inf || dst.inf {
- return true
- }
-
- // If my left is greater than dst right, we have no overlap.
- if y.CompareKeys(r.left, dst.right) > 0 {
- return false
- }
- // If my right is less than dst left, we have no overlap.
- if y.CompareKeys(r.right, dst.left) < 0 {
- return false
- }
- // We have overlap.
- return true
-}
-
-func getKeyRange(tables []*table.Table) keyRange {
- if len(tables) == 0 {
- return keyRange{}
- }
- smallest := tables[0].Smallest()
- biggest := tables[0].Biggest()
- for i := 1; i < len(tables); i++ {
- if y.CompareKeys(tables[i].Smallest(), smallest) < 0 {
- smallest = tables[i].Smallest()
- }
- if y.CompareKeys(tables[i].Biggest(), biggest) > 0 {
- biggest = tables[i].Biggest()
- }
- }
- return keyRange{
- left: y.KeyWithTs(y.ParseKey(smallest), math.MaxUint64),
- right: y.KeyWithTs(y.ParseKey(biggest), 0),
- }
-}
-
-type levelCompactStatus struct {
- ranges []keyRange
- delSize int64
-}
-
-func (lcs *levelCompactStatus) debug() string {
- var b bytes.Buffer
- for _, r := range lcs.ranges {
- b.WriteString(r.String())
- }
- return b.String()
-}
-
-func (lcs *levelCompactStatus) overlapsWith(dst keyRange) bool {
- for _, r := range lcs.ranges {
- if r.overlapsWith(dst) {
- return true
- }
- }
- return false
-}
-
-func (lcs *levelCompactStatus) remove(dst keyRange) bool {
- final := lcs.ranges[:0]
- var found bool
- for _, r := range lcs.ranges {
- if !r.equals(dst) {
- final = append(final, r)
- } else {
- found = true
- }
- }
- lcs.ranges = final
- return found
-}
-
-type compactStatus struct {
- sync.RWMutex
- levels []*levelCompactStatus
-}
-
-func (cs *compactStatus) toLog(tr trace.Trace) {
- cs.RLock()
- defer cs.RUnlock()
-
- tr.LazyPrintf("Compaction status:")
- for i, l := range cs.levels {
- if l.debug() == "" {
- continue
- }
- tr.LazyPrintf("[%d] %s", i, l.debug())
- }
-}
-
-func (cs *compactStatus) overlapsWith(level int, this keyRange) bool {
- cs.RLock()
- defer cs.RUnlock()
-
- thisLevel := cs.levels[level]
- return thisLevel.overlapsWith(this)
-}
-
-func (cs *compactStatus) delSize(l int) int64 {
- cs.RLock()
- defer cs.RUnlock()
- return cs.levels[l].delSize
-}
-
-type thisAndNextLevelRLocked struct{}
-
-// compareAndAdd will check whether we can run this compactDef. That it doesn't overlap with any
-// other running compaction. If it can be run, it would store this run in the compactStatus state.
-func (cs *compactStatus) compareAndAdd(_ thisAndNextLevelRLocked, cd compactDef) bool {
- cs.Lock()
- defer cs.Unlock()
-
- level := cd.thisLevel.level
-
- y.AssertTruef(level < len(cs.levels)-1, "Got level %d. Max levels: %d", level, len(cs.levels))
- thisLevel := cs.levels[level]
- nextLevel := cs.levels[level+1]
-
- if thisLevel.overlapsWith(cd.thisRange) {
- return false
- }
- if nextLevel.overlapsWith(cd.nextRange) {
- return false
- }
- // Check whether this level really needs compaction or not. Otherwise, we'll end up
- // running parallel compactions for the same level.
- // Update: We should not be checking size here. Compaction priority already did the size checks.
- // Here we should just be executing the wish of others.
-
- thisLevel.ranges = append(thisLevel.ranges, cd.thisRange)
- nextLevel.ranges = append(nextLevel.ranges, cd.nextRange)
- thisLevel.delSize += cd.thisSize
- return true
-}
-
-func (cs *compactStatus) delete(cd compactDef) {
- cs.Lock()
- defer cs.Unlock()
-
- level := cd.thisLevel.level
- y.AssertTruef(level < len(cs.levels)-1, "Got level %d. Max levels: %d", level, len(cs.levels))
-
- thisLevel := cs.levels[level]
- nextLevel := cs.levels[level+1]
-
- thisLevel.delSize -= cd.thisSize
- found := thisLevel.remove(cd.thisRange)
- found = nextLevel.remove(cd.nextRange) && found
-
- if !found {
- this := cd.thisRange
- next := cd.nextRange
- fmt.Printf("Looking for: [%q, %q, %v] in this level.\n", this.left, this.right, this.inf)
- fmt.Printf("This Level:\n%s\n", thisLevel.debug())
- fmt.Println()
- fmt.Printf("Looking for: [%q, %q, %v] in next level.\n", next.left, next.right, next.inf)
- fmt.Printf("Next Level:\n%s\n", nextLevel.debug())
- log.Fatal("keyRange not found")
- }
-}
diff --git a/vendor/github.com/dgraph-io/badger/db.go b/vendor/github.com/dgraph-io/badger/db.go
deleted file mode 100644
index 21bb22d6..00000000
--- a/vendor/github.com/dgraph-io/badger/db.go
+++ /dev/null
@@ -1,1468 +0,0 @@
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package badger
-
-import (
- "bytes"
- "context"
- "encoding/binary"
- "encoding/hex"
- "expvar"
- "io"
- "math"
- "os"
- "path/filepath"
- "sort"
- "strconv"
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/dgraph-io/badger/options"
- "github.com/dgraph-io/badger/pb"
- "github.com/dgraph-io/badger/skl"
- "github.com/dgraph-io/badger/table"
- "github.com/dgraph-io/badger/y"
- humanize "github.com/dustin/go-humanize"
- "github.com/pkg/errors"
- "golang.org/x/net/trace"
-)
-
-var (
- badgerPrefix = []byte("!badger!") // Prefix for internal keys used by badger.
- head = []byte("!badger!head") // For storing value offset for replay.
- txnKey = []byte("!badger!txn") // For indicating end of entries in txn.
- badgerMove = []byte("!badger!move") // For key-value pairs which got moved during GC.
- lfDiscardStatsKey = []byte("!badger!discard") // For storing lfDiscardStats
-)
-
-type closers struct {
- updateSize *y.Closer
- compactors *y.Closer
- memtable *y.Closer
- writes *y.Closer
- valueGC *y.Closer
- pub *y.Closer
-}
-
-type callback func(kv *pb.KVList)
-
-// DB provides the various functions required to interact with Badger.
-// DB is thread-safe.
-type DB struct {
- sync.RWMutex // Guards list of inmemory tables, not individual reads and writes.
-
- dirLockGuard *directoryLockGuard
- // nil if Dir and ValueDir are the same
- valueDirGuard *directoryLockGuard
-
- closers closers
- elog trace.EventLog
- mt *skl.Skiplist // Our latest (actively written) in-memory table
- imm []*skl.Skiplist // Add here only AFTER pushing to flushChan.
- opt Options
- manifest *manifestFile
- lc *levelsController
- vlog valueLog
- vhead valuePointer // less than or equal to a pointer to the last vlog value put into mt
- writeCh chan *request
- flushChan chan flushTask // For flushing memtables.
- closeOnce sync.Once // For closing DB only once.
-
- // Number of log rotates since the last memtable flush. We will access this field via atomic
- // functions. Since we are not going to use any 64bit atomic functions, there is no need for
- // 64 bit alignment of this struct(see #311).
- logRotates int32
-
- blockWrites int32
-
- orc *oracle
-
- pub *publisher
-}
-
-const (
- kvWriteChCapacity = 1000
-)
-
-func (db *DB) replayFunction() func(Entry, valuePointer) error {
- type txnEntry struct {
- nk []byte
- v y.ValueStruct
- }
-
- var txn []txnEntry
- var lastCommit uint64
-
- toLSM := func(nk []byte, vs y.ValueStruct) {
- for err := db.ensureRoomForWrite(); err != nil; err = db.ensureRoomForWrite() {
- db.elog.Printf("Replay: Making room for writes")
- time.Sleep(10 * time.Millisecond)
- }
- db.mt.Put(nk, vs)
- }
-
- first := true
- return func(e Entry, vp valuePointer) error { // Function for replaying.
- if first {
- db.elog.Printf("First key=%q\n", e.Key)
- }
- first = false
-
- if db.orc.nextTxnTs < y.ParseTs(e.Key) {
- db.orc.nextTxnTs = y.ParseTs(e.Key)
- }
-
- nk := make([]byte, len(e.Key))
- copy(nk, e.Key)
- var nv []byte
- meta := e.meta
- if db.shouldWriteValueToLSM(e) {
- nv = make([]byte, len(e.Value))
- copy(nv, e.Value)
- } else {
- nv = make([]byte, vptrSize)
- vp.Encode(nv)
- meta = meta | bitValuePointer
- }
-
- v := y.ValueStruct{
- Value: nv,
- Meta: meta,
- UserMeta: e.UserMeta,
- ExpiresAt: e.ExpiresAt,
- }
-
- if e.meta&bitFinTxn > 0 {
- txnTs, err := strconv.ParseUint(string(e.Value), 10, 64)
- if err != nil {
- return errors.Wrapf(err, "Unable to parse txn fin: %q", e.Value)
- }
- y.AssertTrue(lastCommit == txnTs)
- y.AssertTrue(len(txn) > 0)
- // Got the end of txn. Now we can store them.
- for _, t := range txn {
- toLSM(t.nk, t.v)
- }
- txn = txn[:0]
- lastCommit = 0
-
- } else if e.meta&bitTxn > 0 {
- txnTs := y.ParseTs(nk)
- if lastCommit == 0 {
- lastCommit = txnTs
- }
- if lastCommit != txnTs {
- db.opt.Warningf("Found an incomplete txn at timestamp %d. Discarding it.\n",
- lastCommit)
- txn = txn[:0]
- lastCommit = txnTs
- }
- te := txnEntry{nk: nk, v: v}
- txn = append(txn, te)
-
- } else {
- // This entry is from a rewrite.
- toLSM(nk, v)
-
- // We shouldn't get this entry in the middle of a transaction.
- y.AssertTrue(lastCommit == 0)
- y.AssertTrue(len(txn) == 0)
- }
- return nil
- }
-}
-
-// Open returns a new DB object.
-func Open(opt Options) (db *DB, err error) {
- opt.maxBatchSize = (15 * opt.MaxTableSize) / 100
- opt.maxBatchCount = opt.maxBatchSize / int64(skl.MaxNodeSize)
-
- if opt.ValueThreshold > ValueThresholdLimit {
- return nil, ErrValueThreshold
- }
-
- if opt.ReadOnly {
- // Can't truncate if the DB is read only.
- opt.Truncate = false
- // Do not perform compaction in read only mode.
- opt.CompactL0OnClose = false
- }
-
- for _, path := range []string{opt.Dir, opt.ValueDir} {
- dirExists, err := exists(path)
- if err != nil {
- return nil, y.Wrapf(err, "Invalid Dir: %q", path)
- }
- if !dirExists {
- if opt.ReadOnly {
- return nil, errors.Errorf("Cannot find directory %q for read-only open", path)
- }
- // Try to create the directory
- err = os.Mkdir(path, 0700)
- if err != nil {
- return nil, y.Wrapf(err, "Error Creating Dir: %q", path)
- }
- }
- }
- absDir, err := filepath.Abs(opt.Dir)
- if err != nil {
- return nil, err
- }
- absValueDir, err := filepath.Abs(opt.ValueDir)
- if err != nil {
- return nil, err
- }
- var dirLockGuard, valueDirLockGuard *directoryLockGuard
- dirLockGuard, err = acquireDirectoryLock(opt.Dir, lockFile, opt.ReadOnly)
- if err != nil {
- return nil, err
- }
- defer func() {
- if dirLockGuard != nil {
- _ = dirLockGuard.release()
- }
- }()
- if absValueDir != absDir {
- valueDirLockGuard, err = acquireDirectoryLock(opt.ValueDir, lockFile, opt.ReadOnly)
- if err != nil {
- return nil, err
- }
- defer func() {
- if valueDirLockGuard != nil {
- _ = valueDirLockGuard.release()
- }
- }()
- }
- if !(opt.ValueLogFileSize <= 2<<30 && opt.ValueLogFileSize >= 1<<20) {
- return nil, ErrValueLogSize
- }
- if !(opt.ValueLogLoadingMode == options.FileIO ||
- opt.ValueLogLoadingMode == options.MemoryMap) {
- return nil, ErrInvalidLoadingMode
- }
- manifestFile, manifest, err := openOrCreateManifestFile(opt.Dir, opt.ReadOnly)
- if err != nil {
- return nil, err
- }
- defer func() {
- if manifestFile != nil {
- _ = manifestFile.close()
- }
- }()
-
- db = &DB{
- imm: make([]*skl.Skiplist, 0, opt.NumMemtables),
- flushChan: make(chan flushTask, opt.NumMemtables),
- writeCh: make(chan *request, kvWriteChCapacity),
- opt: opt,
- manifest: manifestFile,
- elog: trace.NewEventLog("Badger", "DB"),
- dirLockGuard: dirLockGuard,
- valueDirGuard: valueDirLockGuard,
- orc: newOracle(opt),
- pub: newPublisher(),
- }
-
- // Calculate initial size.
- db.calculateSize()
- db.closers.updateSize = y.NewCloser(1)
- go db.updateSize(db.closers.updateSize)
- db.mt = skl.NewSkiplist(arenaSize(opt))
-
- // newLevelsController potentially loads files in directory.
- if db.lc, err = newLevelsController(db, &manifest); err != nil {
- return nil, err
- }
-
- if !opt.ReadOnly {
- db.closers.compactors = y.NewCloser(1)
- db.lc.startCompact(db.closers.compactors)
-
- db.closers.memtable = y.NewCloser(1)
- go func() {
- _ = db.flushMemtable(db.closers.memtable) // Need levels controller to be up.
- }()
- }
-
- headKey := y.KeyWithTs(head, math.MaxUint64)
- // Need to pass with timestamp, lsm get removes the last 8 bytes and compares key
- vs, err := db.get(headKey)
- if err != nil {
- return nil, errors.Wrap(err, "Retrieving head")
- }
- db.orc.nextTxnTs = vs.Version
- var vptr valuePointer
- if len(vs.Value) > 0 {
- vptr.Decode(vs.Value)
- }
-
- replayCloser := y.NewCloser(1)
- go db.doWrites(replayCloser)
-
- if err = db.vlog.open(db, vptr, db.replayFunction()); err != nil {
- return db, err
- }
- replayCloser.SignalAndWait() // Wait for replay to be applied first.
-
- // Let's advance nextTxnTs to one more than whatever we observed via
- // replaying the logs.
- db.orc.txnMark.Done(db.orc.nextTxnTs)
- // In normal mode, we must update readMark so older versions of keys can be removed during
- // compaction when run in offline mode via the flatten tool.
- db.orc.readMark.Done(db.orc.nextTxnTs)
- db.orc.incrementNextTs()
-
- db.writeCh = make(chan *request, kvWriteChCapacity)
- db.closers.writes = y.NewCloser(1)
- go db.doWrites(db.closers.writes)
-
- db.closers.valueGC = y.NewCloser(1)
- go db.vlog.waitOnGC(db.closers.valueGC)
-
- db.closers.pub = y.NewCloser(1)
- go db.pub.listenForUpdates(db.closers.pub)
-
- valueDirLockGuard = nil
- dirLockGuard = nil
- manifestFile = nil
- return db, nil
-}
-
-// Close closes a DB. It's crucial to call it to ensure all the pending updates make their way to
-// disk. Calling DB.Close() multiple times would still only close the DB once.
-func (db *DB) Close() error {
- var err error
- db.closeOnce.Do(func() {
- err = db.close()
- })
- return err
-}
-
-func (db *DB) close() (err error) {
- db.elog.Printf("Closing database")
-
- if err := db.vlog.flushDiscardStats(); err != nil {
- return errors.Wrap(err, "failed to flush discard stats")
- }
-
- atomic.StoreInt32(&db.blockWrites, 1)
-
- // Stop value GC first.
- db.closers.valueGC.SignalAndWait()
-
- // Stop writes next.
- db.closers.writes.SignalAndWait()
-
- db.closers.pub.SignalAndWait()
-
- // Now close the value log.
- if vlogErr := db.vlog.Close(); vlogErr != nil {
- err = errors.Wrap(vlogErr, "DB.Close")
- }
-
- // Make sure that block writer is done pushing stuff into memtable!
- // Otherwise, you will have a race condition: we are trying to flush memtables
- // and remove them completely, while the block / memtable writer is still
- // trying to push stuff into the memtable. This will also resolve the value
- // offset problem: as we push into memtable, we update value offsets there.
- if !db.mt.Empty() {
- db.elog.Printf("Flushing memtable")
- for {
- pushedFlushTask := func() bool {
- db.Lock()
- defer db.Unlock()
- y.AssertTrue(db.mt != nil)
- select {
- case db.flushChan <- flushTask{mt: db.mt, vptr: db.vhead}:
- db.imm = append(db.imm, db.mt) // Flusher will attempt to remove this from s.imm.
- db.mt = nil // Will segfault if we try writing!
- db.elog.Printf("pushed to flush chan\n")
- return true
- default:
- // If we fail to push, we need to unlock and wait for a short while.
- // The flushing operation needs to update s.imm. Otherwise, we have a deadlock.
- // TODO: Think about how to do this more cleanly, maybe without any locks.
- }
- return false
- }()
- if pushedFlushTask {
- break
- }
- time.Sleep(10 * time.Millisecond)
- }
- }
- db.stopCompactions()
-
- // Force Compact L0
- // We don't need to care about cstatus since no parallel compaction is running.
- if db.opt.CompactL0OnClose {
- err := db.lc.doCompact(compactionPriority{level: 0, score: 1.73})
- switch err {
- case errFillTables:
- // This error only means that there might be enough tables to do a compaction. So, we
- // should not report it to the end user to avoid confusing them.
- case nil:
- db.opt.Infof("Force compaction on level 0 done")
- default:
- db.opt.Warningf("While forcing compaction on level 0: %v", err)
- }
- }
-
- if lcErr := db.lc.close(); err == nil {
- err = errors.Wrap(lcErr, "DB.Close")
- }
- db.elog.Printf("Waiting for closer")
- db.closers.updateSize.SignalAndWait()
- db.orc.Stop()
-
- db.elog.Finish()
-
- if db.dirLockGuard != nil {
- if guardErr := db.dirLockGuard.release(); err == nil {
- err = errors.Wrap(guardErr, "DB.Close")
- }
- }
- if db.valueDirGuard != nil {
- if guardErr := db.valueDirGuard.release(); err == nil {
- err = errors.Wrap(guardErr, "DB.Close")
- }
- }
- if manifestErr := db.manifest.close(); err == nil {
- err = errors.Wrap(manifestErr, "DB.Close")
- }
-
- // Fsync directories to ensure that lock file, and any other removed files whose directory
- // we haven't specifically fsynced, are guaranteed to have their directory entry removal
- // persisted to disk.
- if syncErr := syncDir(db.opt.Dir); err == nil {
- err = errors.Wrap(syncErr, "DB.Close")
- }
- if syncErr := syncDir(db.opt.ValueDir); err == nil {
- err = errors.Wrap(syncErr, "DB.Close")
- }
-
- return err
-}
-
-const (
- lockFile = "LOCK"
-)
-
-// Sync syncs database content to disk. This function provides
-// more control to user to sync data whenever required.
-func (db *DB) Sync() error {
- return db.vlog.sync(math.MaxUint32)
-}
-
-// getMemtables returns the current memtables and get references.
-func (db *DB) getMemTables() ([]*skl.Skiplist, func()) {
- db.RLock()
- defer db.RUnlock()
-
- tables := make([]*skl.Skiplist, len(db.imm)+1)
-
- // Get mutable memtable.
- tables[0] = db.mt
- tables[0].IncrRef()
-
- // Get immutable memtables.
- last := len(db.imm) - 1
- for i := range db.imm {
- tables[i+1] = db.imm[last-i]
- tables[i+1].IncrRef()
- }
- return tables, func() {
- for _, tbl := range tables {
- tbl.DecrRef()
- }
- }
-}
-
-// get returns the value in memtable or disk for given key.
-// Note that value will include meta byte.
-//
-// IMPORTANT: We should never write an entry with an older timestamp for the same key, We need to
-// maintain this invariant to search for the latest value of a key, or else we need to search in all
-// tables and find the max version among them. To maintain this invariant, we also need to ensure
-// that all versions of a key are always present in the same table from level 1, because compaction
-// can push any table down.
-//
-// Update (Sep 22, 2018): To maintain the above invariant, and to allow keys to be moved from one
-// value log to another (while reclaiming space during value log GC), we have logically moved this
-// need to write "old versions after new versions" to the badgerMove keyspace. Thus, for normal
-// gets, we can stop going down the LSM tree once we find any version of the key (note however that
-// we will ALWAYS skip versions with ts greater than the key version). However, if that key has
-// been moved, then for the corresponding movekey, we'll look through all the levels of the tree
-// to ensure that we pick the highest version of the movekey present.
-func (db *DB) get(key []byte) (y.ValueStruct, error) {
- tables, decr := db.getMemTables() // Lock should be released.
- defer decr()
-
- var maxVs *y.ValueStruct
- var version uint64
- if bytes.HasPrefix(key, badgerMove) {
- // If we are checking badgerMove key, we should look into all the
- // levels, so we can pick up the newer versions, which might have been
- // compacted down the tree.
- maxVs = &y.ValueStruct{}
- version = y.ParseTs(key)
- }
-
- y.NumGets.Add(1)
- for i := 0; i < len(tables); i++ {
- vs := tables[i].Get(key)
- y.NumMemtableGets.Add(1)
- if vs.Meta == 0 && vs.Value == nil {
- continue
- }
- // Found a version of the key. For user keyspace, return immediately. For move keyspace,
- // continue iterating, unless we found a version == given key version.
- if maxVs == nil || vs.Version == version {
- return vs, nil
- }
- if maxVs.Version < vs.Version {
- *maxVs = vs
- }
- }
- return db.lc.get(key, maxVs)
-}
-
-func (db *DB) updateHead(ptrs []valuePointer) {
- var ptr valuePointer
- for i := len(ptrs) - 1; i >= 0; i-- {
- p := ptrs[i]
- if !p.IsZero() {
- ptr = p
- break
- }
- }
- if ptr.IsZero() {
- return
- }
-
- db.Lock()
- defer db.Unlock()
- y.AssertTrue(!ptr.Less(db.vhead))
- db.vhead = ptr
-}
-
-var requestPool = sync.Pool{
- New: func() interface{} {
- return new(request)
- },
-}
-
-func (db *DB) shouldWriteValueToLSM(e Entry) bool {
- return len(e.Value) < db.opt.ValueThreshold
-}
-
-func (db *DB) writeToLSM(b *request) error {
- if len(b.Ptrs) != len(b.Entries) {
- return errors.Errorf("Ptrs and Entries don't match: %+v", b)
- }
-
- for i, entry := range b.Entries {
- if entry.meta&bitFinTxn != 0 {
- continue
- }
- if db.shouldWriteValueToLSM(*entry) { // Will include deletion / tombstone case.
- db.mt.Put(entry.Key,
- y.ValueStruct{
- Value: entry.Value,
- Meta: entry.meta,
- UserMeta: entry.UserMeta,
- ExpiresAt: entry.ExpiresAt,
- })
- } else {
- var offsetBuf [vptrSize]byte
- db.mt.Put(entry.Key,
- y.ValueStruct{
- Value: b.Ptrs[i].Encode(offsetBuf[:]),
- Meta: entry.meta | bitValuePointer,
- UserMeta: entry.UserMeta,
- ExpiresAt: entry.ExpiresAt,
- })
- }
- }
- return nil
-}
-
-// writeRequests is called serially by only one goroutine.
-func (db *DB) writeRequests(reqs []*request) error {
- if len(reqs) == 0 {
- return nil
- }
-
- done := func(err error) {
- for _, r := range reqs {
- r.Err = err
- r.Wg.Done()
- }
- }
- db.elog.Printf("writeRequests called. Writing to value log")
-
- err := db.vlog.write(reqs)
- if err != nil {
- done(err)
- return err
- }
-
- db.elog.Printf("Sending updates to subscribers")
- db.pub.sendUpdates(reqs)
- db.elog.Printf("Writing to memtable")
- var count int
- for _, b := range reqs {
- if len(b.Entries) == 0 {
- continue
- }
- count += len(b.Entries)
- var i uint64
- for err = db.ensureRoomForWrite(); err == errNoRoom; err = db.ensureRoomForWrite() {
- i++
- if i%100 == 0 {
- db.elog.Printf("Making room for writes")
- }
- // We need to poll a bit because both hasRoomForWrite and the flusher need access to s.imm.
- // When flushChan is full and you are blocked there, and the flusher is trying to update s.imm,
- // you will get a deadlock.
- time.Sleep(10 * time.Millisecond)
- }
- if err != nil {
- done(err)
- return errors.Wrap(err, "writeRequests")
- }
- if err := db.writeToLSM(b); err != nil {
- done(err)
- return errors.Wrap(err, "writeRequests")
- }
- db.updateHead(b.Ptrs)
- }
- done(nil)
- db.elog.Printf("%d entries written", count)
- return nil
-}
-
-func (db *DB) sendToWriteCh(entries []*Entry) (*request, error) {
- if atomic.LoadInt32(&db.blockWrites) == 1 {
- return nil, ErrBlockedWrites
- }
- var count, size int64
- for _, e := range entries {
- size += int64(e.estimateSize(db.opt.ValueThreshold))
- count++
- }
- if count >= db.opt.maxBatchCount || size >= db.opt.maxBatchSize {
- return nil, ErrTxnTooBig
- }
-
- // We can only service one request because we need each txn to be stored in a contigous section.
- // Txns should not interleave among other txns or rewrites.
- req := requestPool.Get().(*request)
- req.Entries = entries
- req.Wg = sync.WaitGroup{}
- req.Wg.Add(1)
- req.IncrRef() // for db write
- req.IncrRef() // for publisher updates
- db.writeCh <- req // Handled in doWrites.
- y.NumPuts.Add(int64(len(entries)))
-
- return req, nil
-}
-
-func (db *DB) doWrites(lc *y.Closer) {
- defer lc.Done()
- pendingCh := make(chan struct{}, 1)
-
- writeRequests := func(reqs []*request) {
- if err := db.writeRequests(reqs); err != nil {
- db.opt.Errorf("writeRequests: %v", err)
- }
- <-pendingCh
- }
-
- // This variable tracks the number of pending writes.
- reqLen := new(expvar.Int)
- y.PendingWrites.Set(db.opt.Dir, reqLen)
-
- reqs := make([]*request, 0, 10)
- for {
- var r *request
- select {
- case r = <-db.writeCh:
- case <-lc.HasBeenClosed():
- goto closedCase
- }
-
- for {
- reqs = append(reqs, r)
- reqLen.Set(int64(len(reqs)))
-
- if len(reqs) >= 3*kvWriteChCapacity {
- pendingCh <- struct{}{} // blocking.
- goto writeCase
- }
-
- select {
- // Either push to pending, or continue to pick from writeCh.
- case r = <-db.writeCh:
- case pendingCh <- struct{}{}:
- goto writeCase
- case <-lc.HasBeenClosed():
- goto closedCase
- }
- }
-
- closedCase:
- close(db.writeCh)
- for r := range db.writeCh { // Flush the channel.
- reqs = append(reqs, r)
- }
-
- pendingCh <- struct{}{} // Push to pending before doing a write.
- writeRequests(reqs)
- return
-
- writeCase:
- go writeRequests(reqs)
- reqs = make([]*request, 0, 10)
- reqLen.Set(0)
- }
-}
-
-// batchSet applies a list of badger.Entry. If a request level error occurs it
-// will be returned.
-// Check(kv.BatchSet(entries))
-func (db *DB) batchSet(entries []*Entry) error {
- req, err := db.sendToWriteCh(entries)
- if err != nil {
- return err
- }
-
- return req.Wait()
-}
-
-// batchSetAsync is the asynchronous version of batchSet. It accepts a callback
-// function which is called when all the sets are complete. If a request level
-// error occurs, it will be passed back via the callback.
-// err := kv.BatchSetAsync(entries, func(err error)) {
-// Check(err)
-// }
-func (db *DB) batchSetAsync(entries []*Entry, f func(error)) error {
- req, err := db.sendToWriteCh(entries)
- if err != nil {
- return err
- }
- go func() {
- err := req.Wait()
- // Write is complete. Let's call the callback function now.
- f(err)
- }()
- return nil
-}
-
-var errNoRoom = errors.New("No room for write")
-
-// ensureRoomForWrite is always called serially.
-func (db *DB) ensureRoomForWrite() error {
- var err error
- db.Lock()
- defer db.Unlock()
-
- // Here we determine if we need to force flush memtable. Given we rotated log file, it would
- // make sense to force flush a memtable, so the updated value head would have a chance to be
- // pushed to L0. Otherwise, it would not go to L0, until the memtable has been fully filled,
- // which can take a lot longer if the write load has fewer keys and larger values. This force
- // flush, thus avoids the need to read through a lot of log files on a crash and restart.
- // Above approach is quite simple with small drawback. We are calling ensureRoomForWrite before
- // inserting every entry in Memtable. We will get latest db.head after all entries for a request
- // are inserted in Memtable. If we have done >= db.logRotates rotations, then while inserting
- // first entry in Memtable, below condition will be true and we will endup flushing old value of
- // db.head. Hence we are limiting no of value log files to be read to db.logRotates only.
- forceFlush := atomic.LoadInt32(&db.logRotates) >= db.opt.LogRotatesToFlush
-
- if !forceFlush && db.mt.MemSize() < db.opt.MaxTableSize {
- return nil
- }
-
- y.AssertTrue(db.mt != nil) // A nil mt indicates that DB is being closed.
- select {
- case db.flushChan <- flushTask{mt: db.mt, vptr: db.vhead}:
- // After every memtable flush, let's reset the counter.
- atomic.StoreInt32(&db.logRotates, 0)
-
- // Ensure value log is synced to disk so this memtable's contents wouldn't be lost.
- err = db.vlog.sync(db.vhead.Fid)
- if err != nil {
- return err
- }
-
- db.opt.Debugf("Flushing memtable, mt.size=%d size of flushChan: %d\n",
- db.mt.MemSize(), len(db.flushChan))
- // We manage to push this task. Let's modify imm.
- db.imm = append(db.imm, db.mt)
- db.mt = skl.NewSkiplist(arenaSize(db.opt))
- // New memtable is empty. We certainly have room.
- return nil
- default:
- // We need to do this to unlock and allow the flusher to modify imm.
- return errNoRoom
- }
-}
-
-func arenaSize(opt Options) int64 {
- return opt.MaxTableSize + opt.maxBatchSize + opt.maxBatchCount*int64(skl.MaxNodeSize)
-}
-
-// WriteLevel0Table flushes memtable.
-func writeLevel0Table(ft flushTask, f io.Writer) error {
- iter := ft.mt.NewIterator()
- defer iter.Close()
- b := table.NewTableBuilder()
- defer b.Close()
- for iter.SeekToFirst(); iter.Valid(); iter.Next() {
- if len(ft.dropPrefix) > 0 && bytes.HasPrefix(iter.Key(), ft.dropPrefix) {
- continue
- }
- if err := b.Add(iter.Key(), iter.Value()); err != nil {
- return err
- }
- }
- _, err := f.Write(b.Finish())
- return err
-}
-
-type flushTask struct {
- mt *skl.Skiplist
- vptr valuePointer
- dropPrefix []byte
-}
-
-// handleFlushTask must be run serially.
-func (db *DB) handleFlushTask(ft flushTask) error {
- // There can be a scnerio, when empty memtable is flushed. For example, memtable is empty and
- // after writing request to value log, rotation count exceeds db.LogRotatesToFlush.
- if ft.mt.Empty() {
- return nil
- }
-
- // Store badger head even if vptr is zero, need it for readTs
- db.opt.Debugf("Storing value log head: %+v\n", ft.vptr)
- db.elog.Printf("Storing offset: %+v\n", ft.vptr)
- offset := make([]byte, vptrSize)
- ft.vptr.Encode(offset)
-
- // Pick the max commit ts, so in case of crash, our read ts would be higher than all the
- // commits.
- headTs := y.KeyWithTs(head, db.orc.nextTs())
- ft.mt.Put(headTs, y.ValueStruct{Value: offset})
-
- fileID := db.lc.reserveFileID()
- fd, err := y.CreateSyncedFile(table.NewFilename(fileID, db.opt.Dir), true)
- if err != nil {
- return y.Wrap(err)
- }
-
- // Don't block just to sync the directory entry.
- dirSyncCh := make(chan error)
- go func() { dirSyncCh <- syncDir(db.opt.Dir) }()
-
- err = writeLevel0Table(ft, fd)
- dirSyncErr := <-dirSyncCh
-
- if err != nil {
- db.elog.Errorf("ERROR while writing to level 0: %v", err)
- return err
- }
- if dirSyncErr != nil {
- // Do dir sync as best effort. No need to return due to an error there.
- db.elog.Errorf("ERROR while syncing level directory: %v", dirSyncErr)
- }
-
- tbl, err := table.OpenTable(fd, db.opt.TableLoadingMode, nil)
- if err != nil {
- db.elog.Printf("ERROR while opening table: %v", err)
- return err
- }
- // We own a ref on tbl.
- err = db.lc.addLevel0Table(tbl) // This will incrRef (if we don't error, sure)
- _ = tbl.DecrRef() // Releases our ref.
- return err
-}
-
-// flushMemtable must keep running until we send it an empty flushTask. If there
-// are errors during handling the flush task, we'll retry indefinitely.
-func (db *DB) flushMemtable(lc *y.Closer) error {
- defer lc.Done()
-
- for ft := range db.flushChan {
- if ft.mt == nil {
- // We close db.flushChan now, instead of sending a nil ft.mt.
- continue
- }
- for {
- err := db.handleFlushTask(ft)
- if err == nil {
- // Update s.imm. Need a lock.
- db.Lock()
- // This is a single-threaded operation. ft.mt corresponds to the head of
- // db.imm list. Once we flush it, we advance db.imm. The next ft.mt
- // which would arrive here would match db.imm[0], because we acquire a
- // lock over DB when pushing to flushChan.
- // TODO: This logic is dirty AF. Any change and this could easily break.
- y.AssertTrue(ft.mt == db.imm[0])
- db.imm = db.imm[1:]
- ft.mt.DecrRef() // Return memory.
- db.Unlock()
-
- break
- }
- // Encountered error. Retry indefinitely.
- db.opt.Errorf("Failure while flushing memtable to disk: %v. Retrying...\n", err)
- time.Sleep(time.Second)
- }
- }
- return nil
-}
-
-func exists(path string) (bool, error) {
- _, err := os.Stat(path)
- if err == nil {
- return true, nil
- }
- if os.IsNotExist(err) {
- return false, nil
- }
- return true, err
-}
-
-// This function does a filewalk, calculates the size of vlog and sst files and stores it in
-// y.LSMSize and y.VlogSize.
-func (db *DB) calculateSize() {
- newInt := func(val int64) *expvar.Int {
- v := new(expvar.Int)
- v.Add(val)
- return v
- }
-
- totalSize := func(dir string) (int64, int64) {
- var lsmSize, vlogSize int64
- err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
- if err != nil {
- return err
- }
- ext := filepath.Ext(path)
- if ext == ".sst" {
- lsmSize += info.Size()
- } else if ext == ".vlog" {
- vlogSize += info.Size()
- }
- return nil
- })
- if err != nil {
- db.elog.Printf("Got error while calculating total size of directory: %s", dir)
- }
- return lsmSize, vlogSize
- }
-
- lsmSize, vlogSize := totalSize(db.opt.Dir)
- y.LSMSize.Set(db.opt.Dir, newInt(lsmSize))
- // If valueDir is different from dir, we'd have to do another walk.
- if db.opt.ValueDir != db.opt.Dir {
- _, vlogSize = totalSize(db.opt.ValueDir)
- }
- y.VlogSize.Set(db.opt.Dir, newInt(vlogSize))
-}
-
-func (db *DB) updateSize(lc *y.Closer) {
- defer lc.Done()
-
- metricsTicker := time.NewTicker(time.Minute)
- defer metricsTicker.Stop()
-
- for {
- select {
- case <-metricsTicker.C:
- db.calculateSize()
- case <-lc.HasBeenClosed():
- return
- }
- }
-}
-
-// RunValueLogGC triggers a value log garbage collection.
-//
-// It picks value log files to perform GC based on statistics that are collected
-// duing compactions. If no such statistics are available, then log files are
-// picked in random order. The process stops as soon as the first log file is
-// encountered which does not result in garbage collection.
-//
-// When a log file is picked, it is first sampled. If the sample shows that we
-// can discard at least discardRatio space of that file, it would be rewritten.
-//
-// If a call to RunValueLogGC results in no rewrites, then an ErrNoRewrite is
-// thrown indicating that the call resulted in no file rewrites.
-//
-// We recommend setting discardRatio to 0.5, thus indicating that a file be
-// rewritten if half the space can be discarded. This results in a lifetime
-// value log write amplification of 2 (1 from original write + 0.5 rewrite +
-// 0.25 + 0.125 + ... = 2). Setting it to higher value would result in fewer
-// space reclaims, while setting it to a lower value would result in more space
-// reclaims at the cost of increased activity on the LSM tree. discardRatio
-// must be in the range (0.0, 1.0), both endpoints excluded, otherwise an
-// ErrInvalidRequest is returned.
-//
-// Only one GC is allowed at a time. If another value log GC is running, or DB
-// has been closed, this would return an ErrRejected.
-//
-// Note: Every time GC is run, it would produce a spike of activity on the LSM
-// tree.
-func (db *DB) RunValueLogGC(discardRatio float64) error {
- if discardRatio >= 1.0 || discardRatio <= 0.0 {
- return ErrInvalidRequest
- }
-
- // Find head on disk
- headKey := y.KeyWithTs(head, math.MaxUint64)
- // Need to pass with timestamp, lsm get removes the last 8 bytes and compares key
- val, err := db.lc.get(headKey, nil)
- if err != nil {
- return errors.Wrap(err, "Retrieving head from on-disk LSM")
- }
-
- var head valuePointer
- if len(val.Value) > 0 {
- head.Decode(val.Value)
- }
-
- // Pick a log file and run GC
- return db.vlog.runGC(discardRatio, head)
-}
-
-// Size returns the size of lsm and value log files in bytes. It can be used to decide how often to
-// call RunValueLogGC.
-func (db *DB) Size() (lsm, vlog int64) {
- if y.LSMSize.Get(db.opt.Dir) == nil {
- lsm, vlog = 0, 0
- return
- }
- lsm = y.LSMSize.Get(db.opt.Dir).(*expvar.Int).Value()
- vlog = y.VlogSize.Get(db.opt.Dir).(*expvar.Int).Value()
- return
-}
-
-// Sequence represents a Badger sequence.
-type Sequence struct {
- sync.Mutex
- db *DB
- key []byte
- next uint64
- leased uint64
- bandwidth uint64
-}
-
-// Next would return the next integer in the sequence, updating the lease by running a transaction
-// if needed.
-func (seq *Sequence) Next() (uint64, error) {
- seq.Lock()
- defer seq.Unlock()
- if seq.next >= seq.leased {
- if err := seq.updateLease(); err != nil {
- return 0, err
- }
- }
- val := seq.next
- seq.next++
- return val, nil
-}
-
-// Release the leased sequence to avoid wasted integers. This should be done right
-// before closing the associated DB. However it is valid to use the sequence after
-// it was released, causing a new lease with full bandwidth.
-func (seq *Sequence) Release() error {
- seq.Lock()
- defer seq.Unlock()
- err := seq.db.Update(func(txn *Txn) error {
- var buf [8]byte
- binary.BigEndian.PutUint64(buf[:], seq.next)
- return txn.SetEntry(NewEntry(seq.key, buf[:]))
- })
- if err != nil {
- return err
- }
- seq.leased = seq.next
- return nil
-}
-
-func (seq *Sequence) updateLease() error {
- return seq.db.Update(func(txn *Txn) error {
- item, err := txn.Get(seq.key)
- if err == ErrKeyNotFound {
- seq.next = 0
- } else if err != nil {
- return err
- } else {
- var num uint64
- if err := item.Value(func(v []byte) error {
- num = binary.BigEndian.Uint64(v)
- return nil
- }); err != nil {
- return err
- }
- seq.next = num
- }
-
- lease := seq.next + seq.bandwidth
- var buf [8]byte
- binary.BigEndian.PutUint64(buf[:], lease)
- if err = txn.SetEntry(NewEntry(seq.key, buf[:])); err != nil {
- return err
- }
- seq.leased = lease
- return nil
- })
-}
-
-// GetSequence would initiate a new sequence object, generating it from the stored lease, if
-// available, in the database. Sequence can be used to get a list of monotonically increasing
-// integers. Multiple sequences can be created by providing different keys. Bandwidth sets the
-// size of the lease, determining how many Next() requests can be served from memory.
-//
-// GetSequence is not supported on ManagedDB. Calling this would result in a panic.
-func (db *DB) GetSequence(key []byte, bandwidth uint64) (*Sequence, error) {
- if db.opt.managedTxns {
- panic("Cannot use GetSequence with managedDB=true.")
- }
-
- switch {
- case len(key) == 0:
- return nil, ErrEmptyKey
- case bandwidth == 0:
- return nil, ErrZeroBandwidth
- }
- seq := &Sequence{
- db: db,
- key: key,
- next: 0,
- leased: 0,
- bandwidth: bandwidth,
- }
- err := seq.updateLease()
- return seq, err
-}
-
-// Tables gets the TableInfo objects from the level controller. If withKeysCount
-// is true, TableInfo objects also contain counts of keys for the tables.
-func (db *DB) Tables(withKeysCount bool) []TableInfo {
- return db.lc.getTableInfo(withKeysCount)
-}
-
-// KeySplits can be used to get rough key ranges to divide up iteration over
-// the DB.
-func (db *DB) KeySplits(prefix []byte) []string {
- var splits []string
- // We just want table ranges here and not keys count.
- for _, ti := range db.Tables(false) {
- // We don't use ti.Left, because that has a tendency to store !badger
- // keys.
- if bytes.HasPrefix(ti.Right, prefix) {
- splits = append(splits, string(ti.Right))
- }
- }
- sort.Strings(splits)
- return splits
-}
-
-// MaxBatchCount returns max possible entries in batch
-func (db *DB) MaxBatchCount() int64 {
- return db.opt.maxBatchCount
-}
-
-// MaxBatchSize returns max possible batch size
-func (db *DB) MaxBatchSize() int64 {
- return db.opt.maxBatchSize
-}
-
-func (db *DB) stopCompactions() {
- // Stop memtable flushes.
- if db.closers.memtable != nil {
- close(db.flushChan)
- db.closers.memtable.SignalAndWait()
- }
- // Stop compactions.
- if db.closers.compactors != nil {
- db.closers.compactors.SignalAndWait()
- }
-}
-
-func (db *DB) startCompactions() {
- // Resume compactions.
- if db.closers.compactors != nil {
- db.closers.compactors = y.NewCloser(1)
- db.lc.startCompact(db.closers.compactors)
- }
- if db.closers.memtable != nil {
- db.flushChan = make(chan flushTask, db.opt.NumMemtables)
- db.closers.memtable = y.NewCloser(1)
- go func() {
- _ = db.flushMemtable(db.closers.memtable)
- }()
- }
-}
-
-// Flatten can be used to force compactions on the LSM tree so all the tables fall on the same
-// level. This ensures that all the versions of keys are colocated and not split across multiple
-// levels, which is necessary after a restore from backup. During Flatten, live compactions are
-// stopped. Ideally, no writes are going on during Flatten. Otherwise, it would create competition
-// between flattening the tree and new tables being created at level zero.
-func (db *DB) Flatten(workers int) error {
- db.stopCompactions()
- defer db.startCompactions()
-
- compactAway := func(cp compactionPriority) error {
- db.opt.Infof("Attempting to compact with %+v\n", cp)
- errCh := make(chan error, 1)
- for i := 0; i < workers; i++ {
- go func() {
- errCh <- db.lc.doCompact(cp)
- }()
- }
- var success int
- var rerr error
- for i := 0; i < workers; i++ {
- err := <-errCh
- if err != nil {
- rerr = err
- db.opt.Warningf("While running doCompact with %+v. Error: %v\n", cp, err)
- } else {
- success++
- }
- }
- if success == 0 {
- return rerr
- }
- // We could do at least one successful compaction. So, we'll consider this a success.
- db.opt.Infof("%d compactor(s) succeeded. One or more tables from level %d compacted.\n",
- success, cp.level)
- return nil
- }
-
- hbytes := func(sz int64) string {
- return humanize.Bytes(uint64(sz))
- }
-
- for {
- db.opt.Infof("\n")
- var levels []int
- for i, l := range db.lc.levels {
- sz := l.getTotalSize()
- db.opt.Infof("Level: %d. %8s Size. %8s Max.\n",
- i, hbytes(l.getTotalSize()), hbytes(l.maxTotalSize))
- if sz > 0 {
- levels = append(levels, i)
- }
- }
- if len(levels) <= 1 {
- prios := db.lc.pickCompactLevels()
- if len(prios) == 0 || prios[0].score <= 1.0 {
- db.opt.Infof("All tables consolidated into one level. Flattening done.\n")
- return nil
- }
- if err := compactAway(prios[0]); err != nil {
- return err
- }
- continue
- }
- // Create an artificial compaction priority, to ensure that we compact the level.
- cp := compactionPriority{level: levels[0], score: 1.71}
- if err := compactAway(cp); err != nil {
- return err
- }
- }
-}
-
-func (db *DB) prepareToDrop() func() {
- if db.opt.ReadOnly {
- panic("Attempting to drop data in read-only mode.")
- }
- // Stop accepting new writes.
- atomic.StoreInt32(&db.blockWrites, 1)
-
- // Make all pending writes finish. The following will also close writeCh.
- db.closers.writes.SignalAndWait()
- db.opt.Infof("Writes flushed. Stopping compactions now...")
-
- // Stop all compactions.
- db.stopCompactions()
- return func() {
- db.opt.Infof("Resuming writes")
- db.startCompactions()
-
- db.writeCh = make(chan *request, kvWriteChCapacity)
- db.closers.writes = y.NewCloser(1)
- go db.doWrites(db.closers.writes)
-
- // Resume writes.
- atomic.StoreInt32(&db.blockWrites, 0)
- }
-}
-
-// DropAll would drop all the data stored in Badger. It does this in the following way.
-// - Stop accepting new writes.
-// - Pause memtable flushes and compactions.
-// - Pick all tables from all levels, create a changeset to delete all these
-// tables and apply it to manifest.
-// - Pick all log files from value log, and delete all of them. Restart value log files from zero.
-// - Resume memtable flushes and compactions.
-//
-// NOTE: DropAll is resilient to concurrent writes, but not to reads. It is up to the user to not do
-// any reads while DropAll is going on, otherwise they may result in panics. Ideally, both reads and
-// writes are paused before running DropAll, and resumed after it is finished.
-func (db *DB) DropAll() error {
- f, err := db.dropAll()
- if err != nil {
- return err
- }
- if f == nil {
- panic("both error and returned function cannot be nil in DropAll")
- }
- f()
- return nil
-}
-
-func (db *DB) dropAll() (func(), error) {
- db.opt.Infof("DropAll called. Blocking writes...")
- f := db.prepareToDrop()
-
- // Block all foreign interactions with memory tables.
- db.Lock()
- defer db.Unlock()
-
- // Remove inmemory tables. Calling DecrRef for safety. Not sure if they're absolutely needed.
- db.mt.DecrRef()
- for _, mt := range db.imm {
- mt.DecrRef()
- }
- db.imm = db.imm[:0]
- db.mt = skl.NewSkiplist(arenaSize(db.opt)) // Set it up for future writes.
-
- num, err := db.lc.dropTree()
- if err != nil {
- return nil, err
- }
- db.opt.Infof("Deleted %d SSTables. Now deleting value logs...\n", num)
-
- num, err = db.vlog.dropAll()
- if err != nil {
- return nil, err
- }
- db.vhead = valuePointer{} // Zero it out.
- db.lc.nextFileID = 1
- db.opt.Infof("Deleted %d value log files. DropAll done.\n", num)
- return f, nil
-}
-
-// DropPrefix would drop all the keys with the provided prefix. It does this in the following way:
-// - Stop accepting new writes.
-// - Stop memtable flushes and compactions.
-// - Flush out all memtables, skipping over keys with the given prefix, Kp.
-// - Write out the value log header to memtables when flushing, so we don't accidentally bring Kp
-// back after a restart.
-// - Compact L0->L1, skipping over Kp.
-// - Compact rest of the levels, Li->Li, picking tables which have Kp.
-// - Resume memtable flushes, compactions and writes.
-func (db *DB) DropPrefix(prefix []byte) error {
- db.opt.Infof("DropPrefix called on %s. Blocking writes...", hex.Dump(prefix))
- f := db.prepareToDrop()
- defer f()
-
- // Block all foreign interactions with memory tables.
- db.Lock()
- defer db.Unlock()
-
- db.imm = append(db.imm, db.mt)
- for _, memtable := range db.imm {
- if memtable.Empty() {
- memtable.DecrRef()
- continue
- }
- task := flushTask{
- mt: memtable,
- // Ensure that the head of value log gets persisted to disk.
- vptr: db.vhead,
- dropPrefix: prefix,
- }
- db.opt.Debugf("Flushing memtable")
- if err := db.handleFlushTask(task); err != nil {
- db.opt.Errorf("While trying to flush memtable: %v", err)
- return err
- }
- memtable.DecrRef()
- }
- db.imm = db.imm[:0]
- db.mt = skl.NewSkiplist(arenaSize(db.opt))
-
- // Drop prefixes from the levels.
- if err := db.lc.dropPrefix(prefix); err != nil {
- return err
- }
- db.opt.Infof("DropPrefix done")
- return nil
-}
-
-// Subscribe can be used watch key changes for the given key prefix.
-func (db *DB) Subscribe(ctx context.Context, cb callback, prefix []byte, prefixes ...[]byte) error {
- if cb == nil {
- return ErrNilCallback
- }
- prefixes = append(prefixes, prefix)
- c := y.NewCloser(1)
- recvCh, id := db.pub.newSubscriber(c, prefixes...)
- slurp := func(batch *pb.KVList) {
- defer func() {
- if len(batch.GetKv()) > 0 {
- cb(batch)
- }
- }()
- for {
- select {
- case kvs := <-recvCh:
- batch.Kv = append(batch.Kv, kvs.Kv...)
- default:
- return
- }
- }
- }
- for {
- select {
- case <-c.HasBeenClosed():
- slurp(new(pb.KVList))
- // Drain if any pending updates.
- c.Done()
- // No need to delete here. Closer will be called only while
- // closing DB. Subscriber will be deleted by cleanSubscribers.
- return nil
- case <-ctx.Done():
- c.Done()
- db.pub.deleteSubscriber(id)
- // Delete the subscriber to avoid further updates.
- return ctx.Err()
- case batch := <-recvCh:
- slurp(batch)
- }
- }
-}
diff --git a/vendor/github.com/dgraph-io/badger/dir_unix.go b/vendor/github.com/dgraph-io/badger/dir_unix.go
deleted file mode 100644
index d56e6e82..00000000
--- a/vendor/github.com/dgraph-io/badger/dir_unix.go
+++ /dev/null
@@ -1,118 +0,0 @@
-// +build !windows
-
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package badger
-
-import (
- "fmt"
- "io/ioutil"
- "os"
- "path/filepath"
-
- "github.com/dgraph-io/badger/y"
- "github.com/pkg/errors"
- "golang.org/x/sys/unix"
-)
-
-// directoryLockGuard holds a lock on a directory and a pid file inside. The pid file isn't part
-// of the locking mechanism, it's just advisory.
-type directoryLockGuard struct {
- // File handle on the directory, which we've flocked.
- f *os.File
- // The absolute path to our pid file.
- path string
- // Was this a shared lock for a read-only database?
- readOnly bool
-}
-
-// acquireDirectoryLock gets a lock on the directory (using flock). If
-// this is not read-only, it will also write our pid to
-// dirPath/pidFileName for convenience.
-func acquireDirectoryLock(dirPath string, pidFileName string, readOnly bool) (
- *directoryLockGuard, error) {
- // Convert to absolute path so that Release still works even if we do an unbalanced
- // chdir in the meantime.
- absPidFilePath, err := filepath.Abs(filepath.Join(dirPath, pidFileName))
- if err != nil {
- return nil, errors.Wrap(err, "cannot get absolute path for pid lock file")
- }
- f, err := os.Open(dirPath)
- if err != nil {
- return nil, errors.Wrapf(err, "cannot open directory %q", dirPath)
- }
- opts := unix.LOCK_EX | unix.LOCK_NB
- if readOnly {
- opts = unix.LOCK_SH | unix.LOCK_NB
- }
-
- err = unix.Flock(int(f.Fd()), opts)
- if err != nil {
- f.Close()
- return nil, errors.Wrapf(err,
- "Cannot acquire directory lock on %q. Another process is using this Badger database.",
- dirPath)
- }
-
- if !readOnly {
- // Yes, we happily overwrite a pre-existing pid file. We're the
- // only read-write badger process using this directory.
- err = ioutil.WriteFile(absPidFilePath, []byte(fmt.Sprintf("%d\n", os.Getpid())), 0666)
- if err != nil {
- f.Close()
- return nil, errors.Wrapf(err,
- "Cannot write pid file %q", absPidFilePath)
- }
- }
- return &directoryLockGuard{f, absPidFilePath, readOnly}, nil
-}
-
-// Release deletes the pid file and releases our lock on the directory.
-func (guard *directoryLockGuard) release() error {
- var err error
- if !guard.readOnly {
- // It's important that we remove the pid file first.
- err = os.Remove(guard.path)
- }
-
- if closeErr := guard.f.Close(); err == nil {
- err = closeErr
- }
- guard.path = ""
- guard.f = nil
-
- return err
-}
-
-// openDir opens a directory for syncing.
-func openDir(path string) (*os.File, error) { return os.Open(path) }
-
-// When you create or delete a file, you have to ensure the directory entry for the file is synced
-// in order to guarantee the file is visible (if the system crashes). (See the man page for fsync,
-// or see https://github.com/coreos/etcd/issues/6368 for an example.)
-func syncDir(dir string) error {
- f, err := openDir(dir)
- if err != nil {
- return errors.Wrapf(err, "While opening directory: %s.", dir)
- }
- err = y.FileSync(f)
- closeErr := f.Close()
- if err != nil {
- return errors.Wrapf(err, "While syncing directory: %s.", dir)
- }
- return errors.Wrapf(closeErr, "While closing directory: %s.", dir)
-}
diff --git a/vendor/github.com/dgraph-io/badger/dir_windows.go b/vendor/github.com/dgraph-io/badger/dir_windows.go
deleted file mode 100644
index 60f982e2..00000000
--- a/vendor/github.com/dgraph-io/badger/dir_windows.go
+++ /dev/null
@@ -1,110 +0,0 @@
-// +build windows
-
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package badger
-
-// OpenDir opens a directory in windows with write access for syncing.
-import (
- "os"
- "path/filepath"
- "syscall"
-
- "github.com/pkg/errors"
-)
-
-// FILE_ATTRIBUTE_TEMPORARY - A file that is being used for temporary storage.
-// FILE_FLAG_DELETE_ON_CLOSE - The file is to be deleted immediately after all of its handles are
-// closed, which includes the specified handle and any other open or duplicated handles.
-// See: https://docs.microsoft.com/en-us/windows/desktop/FileIO/file-attribute-constants
-// NOTE: Added here to avoid importing golang.org/x/sys/windows
-const (
- FILE_ATTRIBUTE_TEMPORARY = 0x00000100
- FILE_FLAG_DELETE_ON_CLOSE = 0x04000000
-)
-
-func openDir(path string) (*os.File, error) {
- fd, err := openDirWin(path)
- if err != nil {
- return nil, err
- }
- return os.NewFile(uintptr(fd), path), nil
-}
-
-func openDirWin(path string) (fd syscall.Handle, err error) {
- if len(path) == 0 {
- return syscall.InvalidHandle, syscall.ERROR_FILE_NOT_FOUND
- }
- pathp, err := syscall.UTF16PtrFromString(path)
- if err != nil {
- return syscall.InvalidHandle, err
- }
- access := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE)
- sharemode := uint32(syscall.FILE_SHARE_READ | syscall.FILE_SHARE_WRITE)
- createmode := uint32(syscall.OPEN_EXISTING)
- fl := uint32(syscall.FILE_FLAG_BACKUP_SEMANTICS)
- return syscall.CreateFile(pathp, access, sharemode, nil, createmode, fl, 0)
-}
-
-// DirectoryLockGuard holds a lock on the directory.
-type directoryLockGuard struct {
- h syscall.Handle
- path string
-}
-
-// AcquireDirectoryLock acquires exclusive access to a directory.
-func acquireDirectoryLock(dirPath string, pidFileName string, readOnly bool) (*directoryLockGuard, error) {
- if readOnly {
- return nil, ErrWindowsNotSupported
- }
-
- // Convert to absolute path so that Release still works even if we do an unbalanced
- // chdir in the meantime.
- absLockFilePath, err := filepath.Abs(filepath.Join(dirPath, pidFileName))
- if err != nil {
- return nil, errors.Wrap(err, "Cannot get absolute path for pid lock file")
- }
-
- // This call creates a file handler in memory that only one process can use at a time. When
- // that process ends, the file is deleted by the system.
- // FILE_ATTRIBUTE_TEMPORARY is used to tell Windows to try to create the handle in memory.
- // FILE_FLAG_DELETE_ON_CLOSE is not specified in syscall_windows.go but tells Windows to delete
- // the file when all processes holding the handler are closed.
- // XXX: this works but it's a bit klunky. i'd prefer to use LockFileEx but it needs unsafe pkg.
- h, err := syscall.CreateFile(
- syscall.StringToUTF16Ptr(absLockFilePath), 0, 0, nil,
- syscall.OPEN_ALWAYS,
- uint32(FILE_ATTRIBUTE_TEMPORARY|FILE_FLAG_DELETE_ON_CLOSE),
- 0)
- if err != nil {
- return nil, errors.Wrapf(err,
- "Cannot create lock file %q. Another process is using this Badger database",
- absLockFilePath)
- }
-
- return &directoryLockGuard{h: h, path: absLockFilePath}, nil
-}
-
-// Release removes the directory lock.
-func (g *directoryLockGuard) release() error {
- g.path = ""
- return syscall.CloseHandle(g.h)
-}
-
-// Windows doesn't support syncing directories to the file system. See
-// https://github.com/dgraph-io/badger/issues/699#issuecomment-504133587 for more details.
-func syncDir(dir string) error { return nil }
diff --git a/vendor/github.com/dgraph-io/badger/doc.go b/vendor/github.com/dgraph-io/badger/doc.go
deleted file mode 100644
index 83dc9a28..00000000
--- a/vendor/github.com/dgraph-io/badger/doc.go
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
-Package badger implements an embeddable, simple and fast key-value database,
-written in pure Go. It is designed to be highly performant for both reads and
-writes simultaneously. Badger uses Multi-Version Concurrency Control (MVCC), and
-supports transactions. It runs transactions concurrently, with serializable
-snapshot isolation guarantees.
-
-Badger uses an LSM tree along with a value log to separate keys from values,
-hence reducing both write amplification and the size of the LSM tree. This
-allows LSM tree to be served entirely from RAM, while the values are served
-from SSD.
-
-
-Usage
-
-Badger has the following main types: DB, Txn, Item and Iterator. DB contains
-keys that are associated with values. It must be opened with the appropriate
-options before it can be accessed.
-
-All operations happen inside a Txn. Txn represents a transaction, which can
-be read-only or read-write. Read-only transactions can read values for a
-given key (which are returned inside an Item), or iterate over a set of
-key-value pairs using an Iterator (which are returned as Item type values as
-well). Read-write transactions can also update and delete keys from the DB.
-
-See the examples for more usage details.
-*/
-package badger
diff --git a/vendor/github.com/dgraph-io/badger/errors.go b/vendor/github.com/dgraph-io/badger/errors.go
deleted file mode 100644
index 8d2df683..00000000
--- a/vendor/github.com/dgraph-io/badger/errors.go
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package badger
-
-import (
- "math"
-
- "github.com/pkg/errors"
-)
-
-const (
- // ValueThresholdLimit is the maximum permissible value of opt.ValueThreshold.
- ValueThresholdLimit = math.MaxUint16 - 16 + 1
-)
-
-var (
- // ErrValueLogSize is returned when opt.ValueLogFileSize option is not within the valid
- // range.
- ErrValueLogSize = errors.New("Invalid ValueLogFileSize, must be between 1MB and 2GB")
-
- // ErrValueThreshold is returned when ValueThreshold is set to a value close to or greater than
- // uint16.
- ErrValueThreshold = errors.Errorf(
- "Invalid ValueThreshold, must be less than %d", ValueThresholdLimit)
-
- // ErrKeyNotFound is returned when key isn't found on a txn.Get.
- ErrKeyNotFound = errors.New("Key not found")
-
- // ErrTxnTooBig is returned if too many writes are fit into a single transaction.
- ErrTxnTooBig = errors.New("Txn is too big to fit into one request")
-
- // ErrConflict is returned when a transaction conflicts with another transaction. This can
- // happen if the read rows had been updated concurrently by another transaction.
- ErrConflict = errors.New("Transaction Conflict. Please retry")
-
- // ErrReadOnlyTxn is returned if an update function is called on a read-only transaction.
- ErrReadOnlyTxn = errors.New("No sets or deletes are allowed in a read-only transaction")
-
- // ErrDiscardedTxn is returned if a previously discarded transaction is re-used.
- ErrDiscardedTxn = errors.New("This transaction has been discarded. Create a new one")
-
- // ErrEmptyKey is returned if an empty key is passed on an update function.
- ErrEmptyKey = errors.New("Key cannot be empty")
-
- // ErrInvalidKey is returned if the key has a special !badger! prefix,
- // reserved for internal usage.
- ErrInvalidKey = errors.New("Key is using a reserved !badger! prefix")
-
- // ErrRetry is returned when a log file containing the value is not found.
- // This usually indicates that it may have been garbage collected, and the
- // operation needs to be retried.
- ErrRetry = errors.New("Unable to find log file. Please retry")
-
- // ErrThresholdZero is returned if threshold is set to zero, and value log GC is called.
- // In such a case, GC can't be run.
- ErrThresholdZero = errors.New(
- "Value log GC can't run because threshold is set to zero")
-
- // ErrNoRewrite is returned if a call for value log GC doesn't result in a log file rewrite.
- ErrNoRewrite = errors.New(
- "Value log GC attempt didn't result in any cleanup")
-
- // ErrRejected is returned if a value log GC is called either while another GC is running, or
- // after DB::Close has been called.
- ErrRejected = errors.New("Value log GC request rejected")
-
- // ErrInvalidRequest is returned if the user request is invalid.
- ErrInvalidRequest = errors.New("Invalid request")
-
- // ErrManagedTxn is returned if the user tries to use an API which isn't
- // allowed due to external management of transactions, when using ManagedDB.
- ErrManagedTxn = errors.New(
- "Invalid API request. Not allowed to perform this action using ManagedDB")
-
- // ErrInvalidDump if a data dump made previously cannot be loaded into the database.
- ErrInvalidDump = errors.New("Data dump cannot be read")
-
- // ErrZeroBandwidth is returned if the user passes in zero bandwidth for sequence.
- ErrZeroBandwidth = errors.New("Bandwidth must be greater than zero")
-
- // ErrInvalidLoadingMode is returned when opt.ValueLogLoadingMode option is not
- // within the valid range
- ErrInvalidLoadingMode = errors.New("Invalid ValueLogLoadingMode, must be FileIO or MemoryMap")
-
- // ErrReplayNeeded is returned when opt.ReadOnly is set but the
- // database requires a value log replay.
- ErrReplayNeeded = errors.New("Database was not properly closed, cannot open read-only")
-
- // ErrWindowsNotSupported is returned when opt.ReadOnly is used on Windows
- ErrWindowsNotSupported = errors.New("Read-only mode is not supported on Windows")
-
- // ErrTruncateNeeded is returned when the value log gets corrupt, and requires truncation of
- // corrupt data to allow Badger to run properly.
- ErrTruncateNeeded = errors.New(
- "Value log truncate required to run DB. This might result in data loss")
-
- // ErrBlockedWrites is returned if the user called DropAll. During the process of dropping all
- // data from Badger, we stop accepting new writes, by returning this error.
- ErrBlockedWrites = errors.New("Writes are blocked, possibly due to DropAll or Close")
-
- // ErrNilCallback is returned when subscriber's callback is nil.
- ErrNilCallback = errors.New("Callback cannot be nil")
-)
diff --git a/vendor/github.com/dgraph-io/badger/histogram.go b/vendor/github.com/dgraph-io/badger/histogram.go
deleted file mode 100644
index d8c94bb7..00000000
--- a/vendor/github.com/dgraph-io/badger/histogram.go
+++ /dev/null
@@ -1,169 +0,0 @@
-/*
- * Copyright 2019 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package badger
-
-import (
- "fmt"
- "math"
-)
-
-// PrintHistogram builds and displays the key-value size histogram.
-// When keyPrefix is set, only the keys that have prefix "keyPrefix" are
-// considered for creating the histogram
-func (db *DB) PrintHistogram(keyPrefix []byte) {
- if db == nil {
- fmt.Println("\nCannot build histogram: DB is nil.")
- return
- }
- histogram := db.buildHistogram(keyPrefix)
- fmt.Printf("Histogram of key sizes (in bytes)\n")
- histogram.keySizeHistogram.printHistogram()
- fmt.Printf("Histogram of value sizes (in bytes)\n")
- histogram.valueSizeHistogram.printHistogram()
-}
-
-// histogramData stores information about a histogram
-type histogramData struct {
- bins []int64
- countPerBin []int64
- totalCount int64
- min int64
- max int64
- sum int64
-}
-
-// sizeHistogram contains keySize histogram and valueSize histogram
-type sizeHistogram struct {
- keySizeHistogram, valueSizeHistogram histogramData
-}
-
-// newSizeHistogram returns a new instance of keyValueSizeHistogram with
-// properly initialized fields.
-func newSizeHistogram() *sizeHistogram {
- // TODO(ibrahim): find appropriate bin size.
- keyBins := createHistogramBins(1, 16)
- valueBins := createHistogramBins(1, 30)
- return &sizeHistogram{
- keySizeHistogram: histogramData{
- bins: keyBins,
- countPerBin: make([]int64, len(keyBins)+1),
- max: math.MinInt64,
- min: math.MaxInt64,
- sum: 0,
- },
- valueSizeHistogram: histogramData{
- bins: valueBins,
- countPerBin: make([]int64, len(valueBins)+1),
- max: math.MinInt64,
- min: math.MaxInt64,
- sum: 0,
- },
- }
-}
-
-// createHistogramBins creates bins for an histogram. The bin sizes are powers
-// of two of the form [2^min_exponent, ..., 2^max_exponent].
-func createHistogramBins(minExponent, maxExponent uint32) []int64 {
- var bins []int64
- for i := minExponent; i <= maxExponent; i++ {
- bins = append(bins, int64(1)< histogram.max {
- histogram.max = value
- }
- if value < histogram.min {
- histogram.min = value
- }
-
- histogram.sum += value
- histogram.totalCount++
-
- for index := 0; index <= len(histogram.bins); index++ {
- // Allocate value in the last buckets if we reached the end of the Bounds array.
- if index == len(histogram.bins) {
- histogram.countPerBin[index]++
- break
- }
-
- // Check if the value should be added to the "index" bin
- if value < int64(histogram.bins[index]) {
- histogram.countPerBin[index]++
- break
- }
- }
-}
-
-// buildHistogram builds the key-value size histogram.
-// When keyPrefix is set, only the keys that have prefix "keyPrefix" are
-// considered for creating the histogram
-func (db *DB) buildHistogram(keyPrefix []byte) *sizeHistogram {
- txn := db.NewTransaction(false)
- defer txn.Discard()
-
- itr := txn.NewIterator(DefaultIteratorOptions)
- defer itr.Close()
-
- badgerHistogram := newSizeHistogram()
-
- // Collect key and value sizes.
- for itr.Seek(keyPrefix); itr.ValidForPrefix(keyPrefix); itr.Next() {
- item := itr.Item()
- badgerHistogram.keySizeHistogram.Update(item.KeySize())
- badgerHistogram.valueSizeHistogram.Update(item.ValueSize())
- }
- return badgerHistogram
-}
-
-// printHistogram prints the histogram data in a human-readable format.
-func (histogram histogramData) printHistogram() {
- fmt.Printf("Total count: %d\n", histogram.totalCount)
- fmt.Printf("Min value: %d\n", histogram.min)
- fmt.Printf("Max value: %d\n", histogram.max)
- fmt.Printf("Mean: %.2f\n", float64(histogram.sum)/float64(histogram.totalCount))
- fmt.Printf("%24s %9s\n", "Range", "Count")
-
- numBins := len(histogram.bins)
- for index, count := range histogram.countPerBin {
- if count == 0 {
- continue
- }
-
- // The last bin represents the bin that contains the range from
- // the last bin up to infinity so it's processed differently than the
- // other bins.
- if index == len(histogram.countPerBin)-1 {
- lowerBound := int(histogram.bins[numBins-1])
- fmt.Printf("[%10d, %10s) %9d\n", lowerBound, "infinity", count)
- continue
- }
-
- upperBound := int(histogram.bins[index])
- lowerBound := 0
- if index > 0 {
- lowerBound = int(histogram.bins[index-1])
- }
-
- fmt.Printf("[%10d, %10d) %9d\n", lowerBound, upperBound, count)
- }
- fmt.Println()
-}
diff --git a/vendor/github.com/dgraph-io/badger/iterator.go b/vendor/github.com/dgraph-io/badger/iterator.go
deleted file mode 100644
index f4af4058..00000000
--- a/vendor/github.com/dgraph-io/badger/iterator.go
+++ /dev/null
@@ -1,684 +0,0 @@
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package badger
-
-import (
- "bytes"
- "fmt"
- "hash/crc32"
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/dgraph-io/badger/options"
- "github.com/dgraph-io/badger/table"
-
- "github.com/dgraph-io/badger/y"
-)
-
-type prefetchStatus uint8
-
-const (
- prefetched prefetchStatus = iota + 1
-)
-
-// Item is returned during iteration. Both the Key() and Value() output is only valid until
-// iterator.Next() is called.
-type Item struct {
- status prefetchStatus
- err error
- wg sync.WaitGroup
- db *DB
- key []byte
- vptr []byte
- meta byte // We need to store meta to know about bitValuePointer.
- userMeta byte
- expiresAt uint64
- val []byte
- slice *y.Slice // Used only during prefetching.
- next *Item
- version uint64
- txn *Txn
-}
-
-// String returns a string representation of Item
-func (item *Item) String() string {
- return fmt.Sprintf("key=%q, version=%d, meta=%x", item.Key(), item.Version(), item.meta)
-}
-
-// Key returns the key.
-//
-// Key is only valid as long as item is valid, or transaction is valid. If you need to use it
-// outside its validity, please use KeyCopy.
-func (item *Item) Key() []byte {
- return item.key
-}
-
-// KeyCopy returns a copy of the key of the item, writing it to dst slice.
-// If nil is passed, or capacity of dst isn't sufficient, a new slice would be allocated and
-// returned.
-func (item *Item) KeyCopy(dst []byte) []byte {
- return y.SafeCopy(dst, item.key)
-}
-
-// Version returns the commit timestamp of the item.
-func (item *Item) Version() uint64 {
- return item.version
-}
-
-// Value retrieves the value of the item from the value log.
-//
-// This method must be called within a transaction. Calling it outside a
-// transaction is considered undefined behavior. If an iterator is being used,
-// then Item.Value() is defined in the current iteration only, because items are
-// reused.
-//
-// If you need to use a value outside a transaction, please use Item.ValueCopy
-// instead, or copy it yourself. Value might change once discard or commit is called.
-// Use ValueCopy if you want to do a Set after Get.
-func (item *Item) Value(fn func(val []byte) error) error {
- item.wg.Wait()
- if item.status == prefetched {
- if item.err == nil && fn != nil {
- if err := fn(item.val); err != nil {
- return err
- }
- }
- return item.err
- }
- buf, cb, err := item.yieldItemValue()
- defer runCallback(cb)
- if err != nil {
- return err
- }
- if fn != nil {
- return fn(buf)
- }
- return nil
-}
-
-// ValueCopy returns a copy of the value of the item from the value log, writing it to dst slice.
-// If nil is passed, or capacity of dst isn't sufficient, a new slice would be allocated and
-// returned. Tip: It might make sense to reuse the returned slice as dst argument for the next call.
-//
-// This function is useful in long running iterate/update transactions to avoid a write deadlock.
-// See Github issue: https://github.com/dgraph-io/badger/issues/315
-func (item *Item) ValueCopy(dst []byte) ([]byte, error) {
- item.wg.Wait()
- if item.status == prefetched {
- return y.SafeCopy(dst, item.val), item.err
- }
- buf, cb, err := item.yieldItemValue()
- defer runCallback(cb)
- return y.SafeCopy(dst, buf), err
-}
-
-func (item *Item) hasValue() bool {
- if item.meta == 0 && item.vptr == nil {
- // key not found
- return false
- }
- return true
-}
-
-// IsDeletedOrExpired returns true if item contains deleted or expired value.
-func (item *Item) IsDeletedOrExpired() bool {
- return isDeletedOrExpired(item.meta, item.expiresAt)
-}
-
-// DiscardEarlierVersions returns whether the item was created with the
-// option to discard earlier versions of a key when multiple are available.
-func (item *Item) DiscardEarlierVersions() bool {
- return item.meta&bitDiscardEarlierVersions > 0
-}
-
-func (item *Item) yieldItemValue() ([]byte, func(), error) {
- key := item.Key() // No need to copy.
- for {
- if !item.hasValue() {
- return nil, nil, nil
- }
-
- if item.slice == nil {
- item.slice = new(y.Slice)
- }
-
- if (item.meta & bitValuePointer) == 0 {
- val := item.slice.Resize(len(item.vptr))
- copy(val, item.vptr)
- return val, nil, nil
- }
-
- var vp valuePointer
- vp.Decode(item.vptr)
- result, cb, err := item.db.vlog.Read(vp, item.slice)
- if err != ErrRetry {
- return result, cb, err
- }
- if bytes.HasPrefix(key, badgerMove) {
- // err == ErrRetry
- // Error is retry even after checking the move keyspace. So, let's
- // just assume that value is not present.
- return nil, cb, nil
- }
-
- // The value pointer is pointing to a deleted value log. Look for the
- // move key and read that instead.
- runCallback(cb)
- // Do not put badgerMove on the left in append. It seems to cause some sort of manipulation.
- keyTs := y.KeyWithTs(item.Key(), item.Version())
- key = make([]byte, len(badgerMove)+len(keyTs))
- n := copy(key, badgerMove)
- copy(key[n:], keyTs)
- // Note that we can't set item.key to move key, because that would
- // change the key user sees before and after this call. Also, this move
- // logic is internal logic and should not impact the external behavior
- // of the retrieval.
- vs, err := item.db.get(key)
- if err != nil {
- return nil, nil, err
- }
- if vs.Version != item.Version() {
- return nil, nil, nil
- }
- // Bug fix: Always copy the vs.Value into vptr here. Otherwise, when item is reused this
- // slice gets overwritten.
- item.vptr = y.SafeCopy(item.vptr, vs.Value)
- item.meta &^= bitValuePointer // Clear the value pointer bit.
- if vs.Meta&bitValuePointer > 0 {
- item.meta |= bitValuePointer // This meta would only be about value pointer.
- }
- }
-}
-
-func runCallback(cb func()) {
- if cb != nil {
- cb()
- }
-}
-
-func (item *Item) prefetchValue() {
- val, cb, err := item.yieldItemValue()
- defer runCallback(cb)
-
- item.err = err
- item.status = prefetched
- if val == nil {
- return
- }
- if item.db.opt.ValueLogLoadingMode == options.MemoryMap {
- buf := item.slice.Resize(len(val))
- copy(buf, val)
- item.val = buf
- } else {
- item.val = val
- }
-}
-
-// EstimatedSize returns the approximate size of the key-value pair.
-//
-// This can be called while iterating through a store to quickly estimate the
-// size of a range of key-value pairs (without fetching the corresponding
-// values).
-func (item *Item) EstimatedSize() int64 {
- if !item.hasValue() {
- return 0
- }
- if (item.meta & bitValuePointer) == 0 {
- return int64(len(item.key) + len(item.vptr))
- }
- var vp valuePointer
- vp.Decode(item.vptr)
- return int64(vp.Len) // includes key length.
-}
-
-// KeySize returns the size of the key.
-// Exact size of the key is key + 8 bytes of timestamp
-func (item *Item) KeySize() int64 {
- return int64(len(item.key))
-}
-
-// ValueSize returns the exact size of the value.
-//
-// This can be called to quickly estimate the size of a value without fetching
-// it.
-func (item *Item) ValueSize() int64 {
- if !item.hasValue() {
- return 0
- }
- if (item.meta & bitValuePointer) == 0 {
- return int64(len(item.vptr))
- }
- var vp valuePointer
- vp.Decode(item.vptr)
-
- klen := int64(len(item.key) + 8) // 8 bytes for timestamp.
- return int64(vp.Len) - klen - headerBufSize - crc32.Size
-}
-
-// UserMeta returns the userMeta set by the user. Typically, this byte, optionally set by the user
-// is used to interpret the value.
-func (item *Item) UserMeta() byte {
- return item.userMeta
-}
-
-// ExpiresAt returns a Unix time value indicating when the item will be
-// considered expired. 0 indicates that the item will never expire.
-func (item *Item) ExpiresAt() uint64 {
- return item.expiresAt
-}
-
-// TODO: Switch this to use linked list container in Go.
-type list struct {
- head *Item
- tail *Item
-}
-
-func (l *list) push(i *Item) {
- i.next = nil
- if l.tail == nil {
- l.head = i
- l.tail = i
- return
- }
- l.tail.next = i
- l.tail = i
-}
-
-func (l *list) pop() *Item {
- if l.head == nil {
- return nil
- }
- i := l.head
- if l.head == l.tail {
- l.tail = nil
- l.head = nil
- } else {
- l.head = i.next
- }
- i.next = nil
- return i
-}
-
-// IteratorOptions is used to set options when iterating over Badger key-value
-// stores.
-//
-// This package provides DefaultIteratorOptions which contains options that
-// should work for most applications. Consider using that as a starting point
-// before customizing it for your own needs.
-type IteratorOptions struct {
- // Indicates whether we should prefetch values during iteration and store them.
- PrefetchValues bool
- // How many KV pairs to prefetch while iterating. Valid only if PrefetchValues is true.
- PrefetchSize int
- Reverse bool // Direction of iteration. False is forward, true is backward.
- AllVersions bool // Fetch all valid versions of the same key.
-
- // The following option is used to narrow down the SSTables that iterator picks up. If
- // Prefix is specified, only tables which could have this prefix are picked based on their range
- // of keys.
- Prefix []byte // Only iterate over this given prefix.
- prefixIsKey bool // If set, use the prefix for bloom filter lookup.
-
- InternalAccess bool // Used to allow internal access to badger keys.
-}
-
-func (opt *IteratorOptions) pickTable(t table.TableInterface) bool {
- if len(opt.Prefix) == 0 {
- return true
- }
- trim := func(key []byte) []byte {
- if len(key) > len(opt.Prefix) {
- return key[:len(opt.Prefix)]
- }
- return key
- }
- if bytes.Compare(trim(t.Smallest()), opt.Prefix) > 0 {
- return false
- }
- if bytes.Compare(trim(t.Biggest()), opt.Prefix) < 0 {
- return false
- }
- // Bloom filter lookup would only work if opt.Prefix does NOT have the read
- // timestamp as part of the key.
- if opt.prefixIsKey && t.DoesNotHave(opt.Prefix) {
- return false
- }
- return true
-}
-
-// DefaultIteratorOptions contains default options when iterating over Badger key-value stores.
-var DefaultIteratorOptions = IteratorOptions{
- PrefetchValues: true,
- PrefetchSize: 100,
- Reverse: false,
- AllVersions: false,
-}
-
-// Iterator helps iterating over the KV pairs in a lexicographically sorted order.
-type Iterator struct {
- iitr *y.MergeIterator
- txn *Txn
- readTs uint64
-
- opt IteratorOptions
- item *Item
- data list
- waste list
-
- lastKey []byte // Used to skip over multiple versions of the same key.
-
- closed bool
-}
-
-// NewIterator returns a new iterator. Depending upon the options, either only keys, or both
-// key-value pairs would be fetched. The keys are returned in lexicographically sorted order.
-// Using prefetch is recommended if you're doing a long running iteration, for performance.
-//
-// Multiple Iterators:
-// For a read-only txn, multiple iterators can be running simultaneously. However, for a read-write
-// txn, only one can be running at one time to avoid race conditions, because Txn is thread-unsafe.
-func (txn *Txn) NewIterator(opt IteratorOptions) *Iterator {
- if txn.discarded {
- panic("Transaction has already been discarded")
- }
- // Do not change the order of the next if. We must track the number of running iterators.
- if atomic.AddInt32(&txn.numIterators, 1) > 1 && txn.update {
- atomic.AddInt32(&txn.numIterators, -1)
- panic("Only one iterator can be active at one time, for a RW txn.")
- }
-
- // TODO: If Prefix is set, only pick those memtables which have keys with
- // the prefix.
- tables, decr := txn.db.getMemTables()
- defer decr()
- txn.db.vlog.incrIteratorCount()
- var iters []y.Iterator
- if itr := txn.newPendingWritesIterator(opt.Reverse); itr != nil {
- iters = append(iters, itr)
- }
- for i := 0; i < len(tables); i++ {
- iters = append(iters, tables[i].NewUniIterator(opt.Reverse))
- }
- iters = txn.db.lc.appendIterators(iters, &opt) // This will increment references.
- res := &Iterator{
- txn: txn,
- iitr: y.NewMergeIterator(iters, opt.Reverse),
- opt: opt,
- readTs: txn.readTs,
- }
- return res
-}
-
-// NewKeyIterator is just like NewIterator, but allows the user to iterate over all versions of a
-// single key. Internally, it sets the Prefix option in provided opt, and uses that prefix to
-// additionally run bloom filter lookups before picking tables from the LSM tree.
-func (txn *Txn) NewKeyIterator(key []byte, opt IteratorOptions) *Iterator {
- if len(opt.Prefix) > 0 {
- panic("opt.Prefix should be nil for NewKeyIterator.")
- }
- opt.Prefix = key // This key must be without the timestamp.
- opt.prefixIsKey = true
- return txn.NewIterator(opt)
-}
-
-func (it *Iterator) newItem() *Item {
- item := it.waste.pop()
- if item == nil {
- item = &Item{slice: new(y.Slice), db: it.txn.db, txn: it.txn}
- }
- return item
-}
-
-// Item returns pointer to the current key-value pair.
-// This item is only valid until it.Next() gets called.
-func (it *Iterator) Item() *Item {
- tx := it.txn
- tx.addReadKey(it.item.Key())
- return it.item
-}
-
-// Valid returns false when iteration is done.
-func (it *Iterator) Valid() bool {
- if it.item == nil {
- return false
- }
- return bytes.HasPrefix(it.item.key, it.opt.Prefix)
-}
-
-// ValidForPrefix returns false when iteration is done
-// or when the current key is not prefixed by the specified prefix.
-func (it *Iterator) ValidForPrefix(prefix []byte) bool {
- return it.Valid() && bytes.HasPrefix(it.item.key, prefix)
-}
-
-// Close would close the iterator. It is important to call this when you're done with iteration.
-func (it *Iterator) Close() {
- if it.closed {
- return
- }
- it.closed = true
-
- it.iitr.Close()
- // It is important to wait for the fill goroutines to finish. Otherwise, we might leave zombie
- // goroutines behind, which are waiting to acquire file read locks after DB has been closed.
- waitFor := func(l list) {
- item := l.pop()
- for item != nil {
- item.wg.Wait()
- item = l.pop()
- }
- }
- waitFor(it.waste)
- waitFor(it.data)
-
- // TODO: We could handle this error.
- _ = it.txn.db.vlog.decrIteratorCount()
- atomic.AddInt32(&it.txn.numIterators, -1)
-}
-
-// Next would advance the iterator by one. Always check it.Valid() after a Next()
-// to ensure you have access to a valid it.Item().
-func (it *Iterator) Next() {
- // Reuse current item
- it.item.wg.Wait() // Just cleaner to wait before pushing to avoid doing ref counting.
- it.waste.push(it.item)
-
- // Set next item to current
- it.item = it.data.pop()
-
- for it.iitr.Valid() {
- if it.parseItem() {
- // parseItem calls one extra next.
- // This is used to deal with the complexity of reverse iteration.
- break
- }
- }
-}
-
-func isDeletedOrExpired(meta byte, expiresAt uint64) bool {
- if meta&bitDelete > 0 {
- return true
- }
- if expiresAt == 0 {
- return false
- }
- return expiresAt <= uint64(time.Now().Unix())
-}
-
-// parseItem is a complex function because it needs to handle both forward and reverse iteration
-// implementation. We store keys such that their versions are sorted in descending order. This makes
-// forward iteration efficient, but revese iteration complicated. This tradeoff is better because
-// forward iteration is more common than reverse.
-//
-// This function advances the iterator.
-func (it *Iterator) parseItem() bool {
- mi := it.iitr
- key := mi.Key()
-
- setItem := func(item *Item) {
- if it.item == nil {
- it.item = item
- } else {
- it.data.push(item)
- }
- }
-
- // Skip badger keys.
- if !it.opt.InternalAccess && bytes.HasPrefix(key, badgerPrefix) {
- mi.Next()
- return false
- }
-
- // Skip any versions which are beyond the readTs.
- version := y.ParseTs(key)
- if version > it.readTs {
- mi.Next()
- return false
- }
-
- if it.opt.AllVersions {
- // Return deleted or expired values also, otherwise user can't figure out
- // whether the key was deleted.
- item := it.newItem()
- it.fill(item)
- setItem(item)
- mi.Next()
- return true
- }
-
- // If iterating in forward direction, then just checking the last key against current key would
- // be sufficient.
- if !it.opt.Reverse {
- if y.SameKey(it.lastKey, key) {
- mi.Next()
- return false
- }
- // Only track in forward direction.
- // We should update lastKey as soon as we find a different key in our snapshot.
- // Consider keys: a 5, b 7 (del), b 5. When iterating, lastKey = a.
- // Then we see b 7, which is deleted. If we don't store lastKey = b, we'll then return b 5,
- // which is wrong. Therefore, update lastKey here.
- it.lastKey = y.SafeCopy(it.lastKey, mi.Key())
- }
-
-FILL:
- // If deleted, advance and return.
- vs := mi.Value()
- if isDeletedOrExpired(vs.Meta, vs.ExpiresAt) {
- mi.Next()
- return false
- }
-
- item := it.newItem()
- it.fill(item)
- // fill item based on current cursor position. All Next calls have returned, so reaching here
- // means no Next was called.
-
- mi.Next() // Advance but no fill item yet.
- if !it.opt.Reverse || !mi.Valid() { // Forward direction, or invalid.
- setItem(item)
- return true
- }
-
- // Reverse direction.
- nextTs := y.ParseTs(mi.Key())
- mik := y.ParseKey(mi.Key())
- if nextTs <= it.readTs && bytes.Equal(mik, item.key) {
- // This is a valid potential candidate.
- goto FILL
- }
- // Ignore the next candidate. Return the current one.
- setItem(item)
- return true
-}
-
-func (it *Iterator) fill(item *Item) {
- vs := it.iitr.Value()
- item.meta = vs.Meta
- item.userMeta = vs.UserMeta
- item.expiresAt = vs.ExpiresAt
-
- item.version = y.ParseTs(it.iitr.Key())
- item.key = y.SafeCopy(item.key, y.ParseKey(it.iitr.Key()))
-
- item.vptr = y.SafeCopy(item.vptr, vs.Value)
- item.val = nil
- if it.opt.PrefetchValues {
- item.wg.Add(1)
- go func() {
- // FIXME we are not handling errors here.
- item.prefetchValue()
- item.wg.Done()
- }()
- }
-}
-
-func (it *Iterator) prefetch() {
- prefetchSize := 2
- if it.opt.PrefetchValues && it.opt.PrefetchSize > 1 {
- prefetchSize = it.opt.PrefetchSize
- }
-
- i := it.iitr
- var count int
- it.item = nil
- for i.Valid() {
- if !it.parseItem() {
- continue
- }
- count++
- if count == prefetchSize {
- break
- }
- }
-}
-
-// Seek would seek to the provided key if present. If absent, it would seek to the next
-// smallest key greater than the provided key if iterating in the forward direction.
-// Behavior would be reversed if iterating backwards.
-func (it *Iterator) Seek(key []byte) {
- for i := it.data.pop(); i != nil; i = it.data.pop() {
- i.wg.Wait()
- it.waste.push(i)
- }
-
- it.lastKey = it.lastKey[:0]
- if len(key) == 0 {
- key = it.opt.Prefix
- }
- if len(key) == 0 {
- it.iitr.Rewind()
- it.prefetch()
- return
- }
-
- if !it.opt.Reverse {
- key = y.KeyWithTs(key, it.txn.readTs)
- } else {
- key = y.KeyWithTs(key, 0)
- }
- it.iitr.Seek(key)
- it.prefetch()
-}
-
-// Rewind would rewind the iterator cursor all the way to zero-th position, which would be the
-// smallest key if iterating forward, and largest if iterating backward. It does not keep track of
-// whether the cursor started with a Seek().
-func (it *Iterator) Rewind() {
- it.Seek(nil)
-}
diff --git a/vendor/github.com/dgraph-io/badger/level_handler.go b/vendor/github.com/dgraph-io/badger/level_handler.go
deleted file mode 100644
index 147967fb..00000000
--- a/vendor/github.com/dgraph-io/badger/level_handler.go
+++ /dev/null
@@ -1,299 +0,0 @@
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package badger
-
-import (
- "fmt"
- "sort"
- "sync"
-
- "github.com/dgraph-io/badger/table"
- "github.com/dgraph-io/badger/y"
- "github.com/pkg/errors"
-)
-
-type levelHandler struct {
- // Guards tables, totalSize.
- sync.RWMutex
-
- // For level >= 1, tables are sorted by key ranges, which do not overlap.
- // For level 0, tables are sorted by time.
- // For level 0, newest table are at the back. Compact the oldest one first, which is at the front.
- tables []*table.Table
- totalSize int64
-
- // The following are initialized once and const.
- level int
- strLevel string
- maxTotalSize int64
- db *DB
-}
-
-func (s *levelHandler) getTotalSize() int64 {
- s.RLock()
- defer s.RUnlock()
- return s.totalSize
-}
-
-// initTables replaces s.tables with given tables. This is done during loading.
-func (s *levelHandler) initTables(tables []*table.Table) {
- s.Lock()
- defer s.Unlock()
-
- s.tables = tables
- s.totalSize = 0
- for _, t := range tables {
- s.totalSize += t.Size()
- }
-
- if s.level == 0 {
- // Key range will overlap. Just sort by fileID in ascending order
- // because newer tables are at the end of level 0.
- sort.Slice(s.tables, func(i, j int) bool {
- return s.tables[i].ID() < s.tables[j].ID()
- })
- } else {
- // Sort tables by keys.
- sort.Slice(s.tables, func(i, j int) bool {
- return y.CompareKeys(s.tables[i].Smallest(), s.tables[j].Smallest()) < 0
- })
- }
-}
-
-// deleteTables remove tables idx0, ..., idx1-1.
-func (s *levelHandler) deleteTables(toDel []*table.Table) error {
- s.Lock() // s.Unlock() below
-
- toDelMap := make(map[uint64]struct{})
- for _, t := range toDel {
- toDelMap[t.ID()] = struct{}{}
- }
-
- // Make a copy as iterators might be keeping a slice of tables.
- var newTables []*table.Table
- for _, t := range s.tables {
- _, found := toDelMap[t.ID()]
- if !found {
- newTables = append(newTables, t)
- continue
- }
- s.totalSize -= t.Size()
- }
- s.tables = newTables
-
- s.Unlock() // Unlock s _before_ we DecrRef our tables, which can be slow.
-
- return decrRefs(toDel)
-}
-
-// replaceTables will replace tables[left:right] with newTables. Note this EXCLUDES tables[right].
-// You must call decr() to delete the old tables _after_ writing the update to the manifest.
-func (s *levelHandler) replaceTables(toDel, toAdd []*table.Table) error {
- // Need to re-search the range of tables in this level to be replaced as other goroutines might
- // be changing it as well. (They can't touch our tables, but if they add/remove other tables,
- // the indices get shifted around.)
- s.Lock() // We s.Unlock() below.
-
- toDelMap := make(map[uint64]struct{})
- for _, t := range toDel {
- toDelMap[t.ID()] = struct{}{}
- }
- var newTables []*table.Table
- for _, t := range s.tables {
- _, found := toDelMap[t.ID()]
- if !found {
- newTables = append(newTables, t)
- continue
- }
- s.totalSize -= t.Size()
- }
-
- // Increase totalSize first.
- for _, t := range toAdd {
- s.totalSize += t.Size()
- t.IncrRef()
- newTables = append(newTables, t)
- }
-
- // Assign tables.
- s.tables = newTables
- sort.Slice(s.tables, func(i, j int) bool {
- return y.CompareKeys(s.tables[i].Smallest(), s.tables[j].Smallest()) < 0
- })
- s.Unlock() // s.Unlock before we DecrRef tables -- that can be slow.
- return decrRefs(toDel)
-}
-
-func decrRefs(tables []*table.Table) error {
- for _, table := range tables {
- if err := table.DecrRef(); err != nil {
- return err
- }
- }
- return nil
-}
-
-func newLevelHandler(db *DB, level int) *levelHandler {
- return &levelHandler{
- level: level,
- strLevel: fmt.Sprintf("l%d", level),
- db: db,
- }
-}
-
-// tryAddLevel0Table returns true if ok and no stalling.
-func (s *levelHandler) tryAddLevel0Table(t *table.Table) bool {
- y.AssertTrue(s.level == 0)
- // Need lock as we may be deleting the first table during a level 0 compaction.
- s.Lock()
- defer s.Unlock()
- if len(s.tables) >= s.db.opt.NumLevelZeroTablesStall {
- return false
- }
-
- s.tables = append(s.tables, t)
- t.IncrRef()
- s.totalSize += t.Size()
-
- return true
-}
-
-func (s *levelHandler) numTables() int {
- s.RLock()
- defer s.RUnlock()
- return len(s.tables)
-}
-
-func (s *levelHandler) close() error {
- s.RLock()
- defer s.RUnlock()
- var err error
- for _, t := range s.tables {
- if closeErr := t.Close(); closeErr != nil && err == nil {
- err = closeErr
- }
- }
- return errors.Wrap(err, "levelHandler.close")
-}
-
-// getTableForKey acquires a read-lock to access s.tables. It returns a list of tableHandlers.
-func (s *levelHandler) getTableForKey(key []byte) ([]*table.Table, func() error) {
- s.RLock()
- defer s.RUnlock()
-
- if s.level == 0 {
- // For level 0, we need to check every table. Remember to make a copy as s.tables may change
- // once we exit this function, and we don't want to lock s.tables while seeking in tables.
- // CAUTION: Reverse the tables.
- out := make([]*table.Table, 0, len(s.tables))
- for i := len(s.tables) - 1; i >= 0; i-- {
- out = append(out, s.tables[i])
- s.tables[i].IncrRef()
- }
- return out, func() error {
- for _, t := range out {
- if err := t.DecrRef(); err != nil {
- return err
- }
- }
- return nil
- }
- }
- // For level >= 1, we can do a binary search as key range does not overlap.
- idx := sort.Search(len(s.tables), func(i int) bool {
- return y.CompareKeys(s.tables[i].Biggest(), key) >= 0
- })
- if idx >= len(s.tables) {
- // Given key is strictly > than every element we have.
- return nil, func() error { return nil }
- }
- tbl := s.tables[idx]
- tbl.IncrRef()
- return []*table.Table{tbl}, tbl.DecrRef
-}
-
-// get returns value for a given key or the key after that. If not found, return nil.
-func (s *levelHandler) get(key []byte) (y.ValueStruct, error) {
- tables, decr := s.getTableForKey(key)
- keyNoTs := y.ParseKey(key)
-
- var maxVs y.ValueStruct
- for _, th := range tables {
- if th.DoesNotHave(keyNoTs) {
- y.NumLSMBloomHits.Add(s.strLevel, 1)
- continue
- }
-
- it := th.NewIterator(false)
- defer it.Close()
-
- y.NumLSMGets.Add(s.strLevel, 1)
- it.Seek(key)
- if !it.Valid() {
- continue
- }
- if y.SameKey(key, it.Key()) {
- if version := y.ParseTs(it.Key()); maxVs.Version < version {
- maxVs = it.Value()
- maxVs.Version = version
- }
- }
- }
- return maxVs, decr()
-}
-
-// appendIterators appends iterators to an array of iterators, for merging.
-// Note: This obtains references for the table handlers. Remember to close these iterators.
-func (s *levelHandler) appendIterators(iters []y.Iterator, opt *IteratorOptions) []y.Iterator {
- s.RLock()
- defer s.RUnlock()
-
- tables := make([]*table.Table, 0, len(s.tables))
- for _, t := range s.tables {
- if opt.pickTable(t) {
- tables = append(tables, t)
- }
- }
- if len(tables) == 0 {
- return iters
- }
-
- if s.level == 0 {
- // Remember to add in reverse order!
- // The newer table at the end of s.tables should be added first as it takes precedence.
- return appendIteratorsReversed(iters, tables, opt.Reverse)
- }
- return append(iters, table.NewConcatIterator(tables, opt.Reverse))
-}
-
-type levelHandlerRLocked struct{}
-
-// overlappingTables returns the tables that intersect with key range. Returns a half-interval.
-// This function should already have acquired a read lock, and this is so important the caller must
-// pass an empty parameter declaring such.
-func (s *levelHandler) overlappingTables(_ levelHandlerRLocked, kr keyRange) (int, int) {
- if len(kr.left) == 0 || len(kr.right) == 0 {
- return 0, 0
- }
- left := sort.Search(len(s.tables), func(i int) bool {
- return y.CompareKeys(kr.left, s.tables[i].Biggest()) <= 0
- })
- right := sort.Search(len(s.tables), func(i int) bool {
- return y.CompareKeys(kr.right, s.tables[i].Smallest()) < 0
- })
- return left, right
-}
diff --git a/vendor/github.com/dgraph-io/badger/levels.go b/vendor/github.com/dgraph-io/badger/levels.go
deleted file mode 100644
index a4efd662..00000000
--- a/vendor/github.com/dgraph-io/badger/levels.go
+++ /dev/null
@@ -1,989 +0,0 @@
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package badger
-
-import (
- "bytes"
- "fmt"
- "math"
- "math/rand"
- "os"
- "sort"
- "strings"
- "sync"
- "sync/atomic"
- "time"
-
- "golang.org/x/net/trace"
-
- "github.com/dgraph-io/badger/pb"
- "github.com/dgraph-io/badger/table"
- "github.com/dgraph-io/badger/y"
- "github.com/pkg/errors"
-)
-
-type levelsController struct {
- nextFileID uint64 // Atomic
- elog trace.EventLog
-
- // The following are initialized once and const.
- levels []*levelHandler
- kv *DB
-
- cstatus compactStatus
-}
-
-var (
- // This is for getting timings between stalls.
- lastUnstalled time.Time
-)
-
-// revertToManifest checks that all necessary table files exist and removes all table files not
-// referenced by the manifest. idMap is a set of table file id's that were read from the directory
-// listing.
-func revertToManifest(kv *DB, mf *Manifest, idMap map[uint64]struct{}) error {
- // 1. Check all files in manifest exist.
- for id := range mf.Tables {
- if _, ok := idMap[id]; !ok {
- return fmt.Errorf("file does not exist for table %d", id)
- }
- }
-
- // 2. Delete files that shouldn't exist.
- for id := range idMap {
- if _, ok := mf.Tables[id]; !ok {
- kv.elog.Printf("Table file %d not referenced in MANIFEST\n", id)
- filename := table.NewFilename(id, kv.opt.Dir)
- if err := os.Remove(filename); err != nil {
- return y.Wrapf(err, "While removing table %d", id)
- }
- }
- }
-
- return nil
-}
-
-func newLevelsController(db *DB, mf *Manifest) (*levelsController, error) {
- y.AssertTrue(db.opt.NumLevelZeroTablesStall > db.opt.NumLevelZeroTables)
- s := &levelsController{
- kv: db,
- elog: db.elog,
- levels: make([]*levelHandler, db.opt.MaxLevels),
- }
- s.cstatus.levels = make([]*levelCompactStatus, db.opt.MaxLevels)
-
- for i := 0; i < db.opt.MaxLevels; i++ {
- s.levels[i] = newLevelHandler(db, i)
- if i == 0 {
- // Do nothing.
- } else if i == 1 {
- // Level 1 probably shouldn't be too much bigger than level 0.
- s.levels[i].maxTotalSize = db.opt.LevelOneSize
- } else {
- s.levels[i].maxTotalSize = s.levels[i-1].maxTotalSize * int64(db.opt.LevelSizeMultiplier)
- }
- s.cstatus.levels[i] = new(levelCompactStatus)
- }
-
- // Compare manifest against directory, check for existent/non-existent files, and remove.
- if err := revertToManifest(db, mf, getIDMap(db.opt.Dir)); err != nil {
- return nil, err
- }
-
- // Some files may be deleted. Let's reload.
- var flags uint32 = y.Sync
- if db.opt.ReadOnly {
- flags |= y.ReadOnly
- }
-
- var mu sync.Mutex
- tables := make([][]*table.Table, db.opt.MaxLevels)
- var maxFileID uint64
-
- // We found that using 3 goroutines allows disk throughput to be utilized to its max.
- // Disk utilization is the main thing we should focus on, while trying to read the data. That's
- // the one factor that remains constant between HDD and SSD.
- throttle := y.NewThrottle(3)
-
- start := time.Now()
- var numOpened int32
- tick := time.NewTicker(3 * time.Second)
- defer tick.Stop()
-
- for fileID, tf := range mf.Tables {
- fname := table.NewFilename(fileID, db.opt.Dir)
- select {
- case <-tick.C:
- db.opt.Infof("%d tables out of %d opened in %s\n", atomic.LoadInt32(&numOpened),
- len(mf.Tables), time.Since(start).Round(time.Millisecond))
- default:
- }
- if err := throttle.Do(); err != nil {
- closeAllTables(tables)
- return nil, err
- }
- if fileID > maxFileID {
- maxFileID = fileID
- }
- go func(fname string, tf TableManifest) {
- var rerr error
- defer func() {
- throttle.Done(rerr)
- atomic.AddInt32(&numOpened, 1)
- }()
- fd, err := y.OpenExistingFile(fname, flags)
- if err != nil {
- rerr = errors.Wrapf(err, "Opening file: %q", fname)
- return
- }
-
- t, err := table.OpenTable(fd, db.opt.TableLoadingMode, tf.Checksum)
- if err != nil {
- if strings.HasPrefix(err.Error(), "CHECKSUM_MISMATCH:") {
- db.opt.Errorf(err.Error())
- db.opt.Errorf("Ignoring table %s", fd.Name())
- // Do not set rerr. We will continue without this table.
- } else {
- rerr = errors.Wrapf(err, "Opening table: %q", fname)
- }
- return
- }
-
- mu.Lock()
- tables[tf.Level] = append(tables[tf.Level], t)
- mu.Unlock()
- }(fname, tf)
- }
- if err := throttle.Finish(); err != nil {
- closeAllTables(tables)
- return nil, err
- }
- db.opt.Infof("All %d tables opened in %s\n", atomic.LoadInt32(&numOpened),
- time.Since(start).Round(time.Millisecond))
- s.nextFileID = maxFileID + 1
- for i, tbls := range tables {
- s.levels[i].initTables(tbls)
- }
-
- // Make sure key ranges do not overlap etc.
- if err := s.validate(); err != nil {
- _ = s.cleanupLevels()
- return nil, errors.Wrap(err, "Level validation")
- }
-
- // Sync directory (because we have at least removed some files, or previously created the
- // manifest file).
- if err := syncDir(db.opt.Dir); err != nil {
- _ = s.close()
- return nil, err
- }
-
- return s, nil
-}
-
-// Closes the tables, for cleanup in newLevelsController. (We Close() instead of using DecrRef()
-// because that would delete the underlying files.) We ignore errors, which is OK because tables
-// are read-only.
-func closeAllTables(tables [][]*table.Table) {
- for _, tableSlice := range tables {
- for _, table := range tableSlice {
- _ = table.Close()
- }
- }
-}
-
-func (s *levelsController) cleanupLevels() error {
- var firstErr error
- for _, l := range s.levels {
- if err := l.close(); err != nil && firstErr == nil {
- firstErr = err
- }
- }
- return firstErr
-}
-
-// dropTree picks all tables from all levels, creates a manifest changeset,
-// applies it, and then decrements the refs of these tables, which would result
-// in their deletion.
-func (s *levelsController) dropTree() (int, error) {
- // First pick all tables, so we can create a manifest changelog.
- var all []*table.Table
- for _, l := range s.levels {
- l.RLock()
- all = append(all, l.tables...)
- l.RUnlock()
- }
- if len(all) == 0 {
- return 0, nil
- }
-
- // Generate the manifest changes.
- changes := []*pb.ManifestChange{}
- for _, table := range all {
- changes = append(changes, newDeleteChange(table.ID()))
- }
- changeSet := pb.ManifestChangeSet{Changes: changes}
- if err := s.kv.manifest.addChanges(changeSet.Changes); err != nil {
- return 0, err
- }
-
- // Now that manifest has been successfully written, we can delete the tables.
- for _, l := range s.levels {
- l.Lock()
- l.totalSize = 0
- l.tables = l.tables[:0]
- l.Unlock()
- }
- for _, table := range all {
- if err := table.DecrRef(); err != nil {
- return 0, err
- }
- }
- return len(all), nil
-}
-
-// dropPrefix runs a L0->L1 compaction, and then runs same level compaction on the rest of the
-// levels. For L0->L1 compaction, it runs compactions normally, but skips over all the keys with the
-// provided prefix. For Li->Li compactions, it picks up the tables which would have the prefix. The
-// tables who only have keys with this prefix are quickly dropped. The ones which have other keys
-// are run through MergeIterator and compacted to create new tables. All the mechanisms of
-// compactions apply, i.e. level sizes and MANIFEST are updated as in the normal flow.
-func (s *levelsController) dropPrefix(prefix []byte) error {
- opt := s.kv.opt
- for _, l := range s.levels {
- l.RLock()
- if l.level == 0 {
- size := len(l.tables)
- l.RUnlock()
-
- if size > 0 {
- cp := compactionPriority{
- level: 0,
- score: 1.74,
- // A unique number greater than 1.0 does two things. Helps identify this
- // function in logs, and forces a compaction.
- dropPrefix: prefix,
- }
- if err := s.doCompact(cp); err != nil {
- opt.Warningf("While compacting level 0: %v", err)
- return nil
- }
- }
- continue
- }
-
- var tables []*table.Table
- for _, table := range l.tables {
- var absent bool
- switch {
- case bytes.HasPrefix(table.Smallest(), prefix):
- case bytes.HasPrefix(table.Biggest(), prefix):
- case bytes.Compare(prefix, table.Smallest()) > 0 &&
- bytes.Compare(prefix, table.Biggest()) < 0:
- default:
- absent = true
- }
- if !absent {
- tables = append(tables, table)
- }
- }
- l.RUnlock()
- if len(tables) == 0 {
- continue
- }
-
- cd := compactDef{
- elog: trace.New(fmt.Sprintf("Badger.L%d", l.level), "Compact"),
- thisLevel: l,
- nextLevel: l,
- top: []*table.Table{},
- bot: tables,
- dropPrefix: prefix,
- }
- if err := s.runCompactDef(l.level, cd); err != nil {
- opt.Warningf("While running compact def: %+v. Error: %v", cd, err)
- return err
- }
- }
- return nil
-}
-
-func (s *levelsController) startCompact(lc *y.Closer) {
- n := s.kv.opt.NumCompactors
- lc.AddRunning(n - 1)
- for i := 0; i < n; i++ {
- go s.runWorker(lc)
- }
-}
-
-func (s *levelsController) runWorker(lc *y.Closer) {
- defer lc.Done()
-
- randomDelay := time.NewTimer(time.Duration(rand.Int31n(1000)) * time.Millisecond)
- select {
- case <-randomDelay.C:
- case <-lc.HasBeenClosed():
- randomDelay.Stop()
- return
- }
-
- ticker := time.NewTicker(time.Second)
- defer ticker.Stop()
-
- for {
- select {
- // Can add a done channel or other stuff.
- case <-ticker.C:
- prios := s.pickCompactLevels()
- for _, p := range prios {
- if err := s.doCompact(p); err == nil {
- break
- } else if err == errFillTables {
- // pass
- } else {
- s.kv.opt.Warningf("While running doCompact: %v\n", err)
- }
- }
- case <-lc.HasBeenClosed():
- return
- }
- }
-}
-
-// Returns true if level zero may be compacted, without accounting for compactions that already
-// might be happening.
-func (s *levelsController) isLevel0Compactable() bool {
- return s.levels[0].numTables() >= s.kv.opt.NumLevelZeroTables
-}
-
-// Returns true if the non-zero level may be compacted. delSize provides the size of the tables
-// which are currently being compacted so that we treat them as already having started being
-// compacted (because they have been, yet their size is already counted in getTotalSize).
-func (l *levelHandler) isCompactable(delSize int64) bool {
- return l.getTotalSize()-delSize >= l.maxTotalSize
-}
-
-type compactionPriority struct {
- level int
- score float64
- dropPrefix []byte
-}
-
-// pickCompactLevel determines which level to compact.
-// Based on: https://github.com/facebook/rocksdb/wiki/Leveled-Compaction
-func (s *levelsController) pickCompactLevels() (prios []compactionPriority) {
- // This function must use identical criteria for guaranteeing compaction's progress that
- // addLevel0Table uses.
-
- // cstatus is checked to see if level 0's tables are already being compacted
- if !s.cstatus.overlapsWith(0, infRange) && s.isLevel0Compactable() {
- pri := compactionPriority{
- level: 0,
- score: float64(s.levels[0].numTables()) / float64(s.kv.opt.NumLevelZeroTables),
- }
- prios = append(prios, pri)
- }
-
- for i, l := range s.levels[1:] {
- // Don't consider those tables that are already being compacted right now.
- delSize := s.cstatus.delSize(i + 1)
-
- if l.isCompactable(delSize) {
- pri := compactionPriority{
- level: i + 1,
- score: float64(l.getTotalSize()-delSize) / float64(l.maxTotalSize),
- }
- prios = append(prios, pri)
- }
- }
- sort.Slice(prios, func(i, j int) bool {
- return prios[i].score > prios[j].score
- })
- return prios
-}
-
-// compactBuildTables merge topTables and botTables to form a list of new tables.
-func (s *levelsController) compactBuildTables(
- lev int, cd compactDef) ([]*table.Table, func() error, error) {
- topTables := cd.top
- botTables := cd.bot
-
- var hasOverlap bool
- {
- kr := getKeyRange(cd.top)
- for i, lh := range s.levels {
- if i <= lev { // Skip upper levels.
- continue
- }
- lh.RLock()
- left, right := lh.overlappingTables(levelHandlerRLocked{}, kr)
- lh.RUnlock()
- if right-left > 0 {
- hasOverlap = true
- break
- }
- }
- }
-
- // Try to collect stats so that we can inform value log about GC. That would help us find which
- // value log file should be GCed.
- discardStats := make(map[uint32]int64)
- updateStats := func(vs y.ValueStruct) {
- if vs.Meta&bitValuePointer > 0 {
- var vp valuePointer
- vp.Decode(vs.Value)
- discardStats[vp.Fid] += int64(vp.Len)
- }
- }
-
- // Create iterators across all the tables involved first.
- var iters []y.Iterator
- if lev == 0 {
- iters = appendIteratorsReversed(iters, topTables, false)
- } else if len(topTables) > 0 {
- y.AssertTrue(len(topTables) == 1)
- iters = []y.Iterator{topTables[0].NewIterator(false)}
- }
-
- // Next level has level>=1 and we can use ConcatIterator as key ranges do not overlap.
- var valid []*table.Table
- for _, table := range botTables {
- if len(cd.dropPrefix) > 0 &&
- bytes.HasPrefix(table.Smallest(), cd.dropPrefix) &&
- bytes.HasPrefix(table.Biggest(), cd.dropPrefix) {
- // All the keys in this table have the dropPrefix. So, this table does not need to be
- // in the iterator and can be dropped immediately.
- continue
- }
- valid = append(valid, table)
- }
- iters = append(iters, table.NewConcatIterator(valid, false))
- it := y.NewMergeIterator(iters, false)
- defer it.Close() // Important to close the iterator to do ref counting.
-
- it.Rewind()
-
- // Pick a discard ts, so we can discard versions below this ts. We should
- // never discard any versions starting from above this timestamp, because
- // that would affect the snapshot view guarantee provided by transactions.
- discardTs := s.kv.orc.discardAtOrBelow()
-
- // Start generating new tables.
- type newTableResult struct {
- table *table.Table
- err error
- }
- resultCh := make(chan newTableResult)
- var numBuilds, numVersions int
- var lastKey, skipKey []byte
- for it.Valid() {
- timeStart := time.Now()
- builder := table.NewTableBuilder()
- var numKeys, numSkips uint64
- for ; it.Valid(); it.Next() {
- // See if we need to skip the prefix.
- if len(cd.dropPrefix) > 0 && bytes.HasPrefix(it.Key(), cd.dropPrefix) {
- numSkips++
- updateStats(it.Value())
- continue
- }
-
- // See if we need to skip this key.
- if len(skipKey) > 0 {
- if y.SameKey(it.Key(), skipKey) {
- numSkips++
- updateStats(it.Value())
- continue
- } else {
- skipKey = skipKey[:0]
- }
- }
-
- if !y.SameKey(it.Key(), lastKey) {
- if builder.ReachedCapacity(s.kv.opt.MaxTableSize) {
- // Only break if we are on a different key, and have reached capacity. We want
- // to ensure that all versions of the key are stored in the same sstable, and
- // not divided across multiple tables at the same level.
- break
- }
- lastKey = y.SafeCopy(lastKey, it.Key())
- numVersions = 0
- }
-
- vs := it.Value()
- version := y.ParseTs(it.Key())
- // Do not discard entries inserted by merge operator. These entries will be
- // discarded once they're merged
- if version <= discardTs && vs.Meta&bitMergeEntry == 0 {
- // Keep track of the number of versions encountered for this key. Only consider the
- // versions which are below the minReadTs, otherwise, we might end up discarding the
- // only valid version for a running transaction.
- numVersions++
- lastValidVersion := vs.Meta&bitDiscardEarlierVersions > 0
- if isDeletedOrExpired(vs.Meta, vs.ExpiresAt) ||
- numVersions > s.kv.opt.NumVersionsToKeep ||
- lastValidVersion {
- // If this version of the key is deleted or expired, skip all the rest of the
- // versions. Ensure that we're only removing versions below readTs.
- skipKey = y.SafeCopy(skipKey, it.Key())
-
- if lastValidVersion {
- // Add this key. We have set skipKey, so the following key versions
- // would be skipped.
- } else if hasOverlap {
- // If this key range has overlap with lower levels, then keep the deletion
- // marker with the latest version, discarding the rest. We have set skipKey,
- // so the following key versions would be skipped.
- } else {
- // If no overlap, we can skip all the versions, by continuing here.
- numSkips++
- updateStats(vs)
- continue // Skip adding this key.
- }
- }
- }
- numKeys++
- y.Check(builder.Add(it.Key(), it.Value()))
- }
- // It was true that it.Valid() at least once in the loop above, which means we
- // called Add() at least once, and builder is not Empty().
- s.kv.opt.Debugf("LOG Compact. Added %d keys. Skipped %d keys. Iteration took: %v",
- numKeys, numSkips, time.Since(timeStart))
- if !builder.Empty() {
- numBuilds++
- fileID := s.reserveFileID()
- go func(builder *table.Builder) {
- defer builder.Close()
-
- fd, err := y.CreateSyncedFile(table.NewFilename(fileID, s.kv.opt.Dir), true)
- if err != nil {
- resultCh <- newTableResult{nil, errors.Wrapf(err, "While opening new table: %d", fileID)}
- return
- }
-
- if _, err := fd.Write(builder.Finish()); err != nil {
- resultCh <- newTableResult{nil, errors.Wrapf(err, "Unable to write to file: %d", fileID)}
- return
- }
-
- tbl, err := table.OpenTable(fd, s.kv.opt.TableLoadingMode, nil)
- // decrRef is added below.
- resultCh <- newTableResult{tbl, errors.Wrapf(err, "Unable to open table: %q", fd.Name())}
- }(builder)
- }
- }
-
- newTables := make([]*table.Table, 0, 20)
- // Wait for all table builders to finish.
- var firstErr error
- for x := 0; x < numBuilds; x++ {
- res := <-resultCh
- newTables = append(newTables, res.table)
- if firstErr == nil {
- firstErr = res.err
- }
- }
-
- if firstErr == nil {
- // Ensure created files' directory entries are visible. We don't mind the extra latency
- // from not doing this ASAP after all file creation has finished because this is a
- // background operation.
- firstErr = syncDir(s.kv.opt.Dir)
- }
-
- if firstErr != nil {
- // An error happened. Delete all the newly created table files (by calling DecrRef
- // -- we're the only holders of a ref).
- for j := 0; j < numBuilds; j++ {
- if newTables[j] != nil {
- _ = newTables[j].DecrRef()
- }
- }
- errorReturn := errors.Wrapf(firstErr, "While running compaction for: %+v", cd)
- return nil, nil, errorReturn
- }
-
- sort.Slice(newTables, func(i, j int) bool {
- return y.CompareKeys(newTables[i].Biggest(), newTables[j].Biggest()) < 0
- })
- if err := s.kv.vlog.updateDiscardStats(discardStats); err != nil {
- return nil, nil, errors.Wrap(err, "failed to update discard stats")
- }
- s.kv.opt.Debugf("Discard stats: %v", discardStats)
- return newTables, func() error { return decrRefs(newTables) }, nil
-}
-
-func buildChangeSet(cd *compactDef, newTables []*table.Table) pb.ManifestChangeSet {
- changes := []*pb.ManifestChange{}
- for _, table := range newTables {
- changes = append(changes,
- newCreateChange(table.ID(), cd.nextLevel.level, table.Checksum))
- }
- for _, table := range cd.top {
- changes = append(changes, newDeleteChange(table.ID()))
- }
- for _, table := range cd.bot {
- changes = append(changes, newDeleteChange(table.ID()))
- }
- return pb.ManifestChangeSet{Changes: changes}
-}
-
-type compactDef struct {
- elog trace.Trace
-
- thisLevel *levelHandler
- nextLevel *levelHandler
-
- top []*table.Table
- bot []*table.Table
-
- thisRange keyRange
- nextRange keyRange
-
- thisSize int64
-
- dropPrefix []byte
-}
-
-func (cd *compactDef) lockLevels() {
- cd.thisLevel.RLock()
- cd.nextLevel.RLock()
-}
-
-func (cd *compactDef) unlockLevels() {
- cd.nextLevel.RUnlock()
- cd.thisLevel.RUnlock()
-}
-
-func (s *levelsController) fillTablesL0(cd *compactDef) bool {
- cd.lockLevels()
- defer cd.unlockLevels()
-
- cd.top = make([]*table.Table, len(cd.thisLevel.tables))
- copy(cd.top, cd.thisLevel.tables)
- if len(cd.top) == 0 {
- return false
- }
- cd.thisRange = infRange
-
- kr := getKeyRange(cd.top)
- left, right := cd.nextLevel.overlappingTables(levelHandlerRLocked{}, kr)
- cd.bot = make([]*table.Table, right-left)
- copy(cd.bot, cd.nextLevel.tables[left:right])
-
- if len(cd.bot) == 0 {
- cd.nextRange = kr
- } else {
- cd.nextRange = getKeyRange(cd.bot)
- }
-
- if !s.cstatus.compareAndAdd(thisAndNextLevelRLocked{}, *cd) {
- return false
- }
-
- return true
-}
-
-func (s *levelsController) fillTables(cd *compactDef) bool {
- cd.lockLevels()
- defer cd.unlockLevels()
-
- tbls := make([]*table.Table, len(cd.thisLevel.tables))
- copy(tbls, cd.thisLevel.tables)
- if len(tbls) == 0 {
- return false
- }
-
- // Find the biggest table, and compact that first.
- // TODO: Try other table picking strategies.
- sort.Slice(tbls, func(i, j int) bool {
- return tbls[i].Size() > tbls[j].Size()
- })
-
- for _, t := range tbls {
- cd.thisSize = t.Size()
- cd.thisRange = keyRange{
- // We pick all the versions of the smallest and the biggest key.
- left: y.KeyWithTs(y.ParseKey(t.Smallest()), math.MaxUint64),
- // Note that version zero would be the rightmost key.
- right: y.KeyWithTs(y.ParseKey(t.Biggest()), 0),
- }
- if s.cstatus.overlapsWith(cd.thisLevel.level, cd.thisRange) {
- continue
- }
- cd.top = []*table.Table{t}
- left, right := cd.nextLevel.overlappingTables(levelHandlerRLocked{}, cd.thisRange)
-
- cd.bot = make([]*table.Table, right-left)
- copy(cd.bot, cd.nextLevel.tables[left:right])
-
- if len(cd.bot) == 0 {
- cd.bot = []*table.Table{}
- cd.nextRange = cd.thisRange
- if !s.cstatus.compareAndAdd(thisAndNextLevelRLocked{}, *cd) {
- continue
- }
- return true
- }
- cd.nextRange = getKeyRange(cd.bot)
-
- if s.cstatus.overlapsWith(cd.nextLevel.level, cd.nextRange) {
- continue
- }
- if !s.cstatus.compareAndAdd(thisAndNextLevelRLocked{}, *cd) {
- continue
- }
- return true
- }
- return false
-}
-
-func (s *levelsController) runCompactDef(l int, cd compactDef) (err error) {
- timeStart := time.Now()
-
- thisLevel := cd.thisLevel
- nextLevel := cd.nextLevel
-
- // Table should never be moved directly between levels, always be rewritten to allow discarding
- // invalid versions.
-
- newTables, decr, err := s.compactBuildTables(l, cd)
- if err != nil {
- return err
- }
- defer func() {
- // Only assign to err, if it's not already nil.
- if decErr := decr(); err == nil {
- err = decErr
- }
- }()
- changeSet := buildChangeSet(&cd, newTables)
-
- // We write to the manifest _before_ we delete files (and after we created files)
- if err := s.kv.manifest.addChanges(changeSet.Changes); err != nil {
- return err
- }
-
- // See comment earlier in this function about the ordering of these ops, and the order in which
- // we access levels when reading.
- if err := nextLevel.replaceTables(cd.bot, newTables); err != nil {
- return err
- }
- if err := thisLevel.deleteTables(cd.top); err != nil {
- return err
- }
-
- // Note: For level 0, while doCompact is running, it is possible that new tables are added.
- // However, the tables are added only to the end, so it is ok to just delete the first table.
-
- s.kv.opt.Infof("LOG Compact %d->%d, del %d tables, add %d tables, took %v\n",
- thisLevel.level, nextLevel.level, len(cd.top)+len(cd.bot),
- len(newTables), time.Since(timeStart))
- return nil
-}
-
-var errFillTables = errors.New("Unable to fill tables")
-
-// doCompact picks some table on level l and compacts it away to the next level.
-func (s *levelsController) doCompact(p compactionPriority) error {
- l := p.level
- y.AssertTrue(l+1 < s.kv.opt.MaxLevels) // Sanity check.
-
- cd := compactDef{
- elog: trace.New(fmt.Sprintf("Badger.L%d", l), "Compact"),
- thisLevel: s.levels[l],
- nextLevel: s.levels[l+1],
- dropPrefix: p.dropPrefix,
- }
- cd.elog.SetMaxEvents(100)
- defer cd.elog.Finish()
-
- s.kv.opt.Infof("Got compaction priority: %+v", p)
-
- // While picking tables to be compacted, both levels' tables are expected to
- // remain unchanged.
- if l == 0 {
- if !s.fillTablesL0(&cd) {
- return errFillTables
- }
-
- } else {
- if !s.fillTables(&cd) {
- return errFillTables
- }
- }
- defer s.cstatus.delete(cd) // Remove the ranges from compaction status.
-
- s.kv.opt.Infof("Running for level: %d\n", cd.thisLevel.level)
- s.cstatus.toLog(cd.elog)
- if err := s.runCompactDef(l, cd); err != nil {
- // This compaction couldn't be done successfully.
- s.kv.opt.Warningf("LOG Compact FAILED with error: %+v: %+v", err, cd)
- return err
- }
-
- s.cstatus.toLog(cd.elog)
- s.kv.opt.Infof("Compaction for level: %d DONE", cd.thisLevel.level)
- return nil
-}
-
-func (s *levelsController) addLevel0Table(t *table.Table) error {
- // We update the manifest _before_ the table becomes part of a levelHandler, because at that
- // point it could get used in some compaction. This ensures the manifest file gets updated in
- // the proper order. (That means this update happens before that of some compaction which
- // deletes the table.)
- err := s.kv.manifest.addChanges([]*pb.ManifestChange{
- newCreateChange(t.ID(), 0, t.Checksum),
- })
- if err != nil {
- return err
- }
-
- for !s.levels[0].tryAddLevel0Table(t) {
- // Stall. Make sure all levels are healthy before we unstall.
- var timeStart time.Time
- {
- s.elog.Printf("STALLED STALLED STALLED: %v\n", time.Since(lastUnstalled))
- s.cstatus.RLock()
- for i := 0; i < s.kv.opt.MaxLevels; i++ {
- s.elog.Printf("level=%d. Status=%s Size=%d\n",
- i, s.cstatus.levels[i].debug(), s.levels[i].getTotalSize())
- }
- s.cstatus.RUnlock()
- timeStart = time.Now()
- }
- // Before we unstall, we need to make sure that level 0 and 1 are healthy. Otherwise, we
- // will very quickly fill up level 0 again and if the compaction strategy favors level 0,
- // then level 1 is going to super full.
- for i := 0; ; i++ {
- // Passing 0 for delSize to compactable means we're treating incomplete compactions as
- // not having finished -- we wait for them to finish. Also, it's crucial this behavior
- // replicates pickCompactLevels' behavior in computing compactability in order to
- // guarantee progress.
- if !s.isLevel0Compactable() && !s.levels[1].isCompactable(0) {
- break
- }
- time.Sleep(10 * time.Millisecond)
- if i%100 == 0 {
- prios := s.pickCompactLevels()
- s.elog.Printf("Waiting to add level 0 table. Compaction priorities: %+v\n", prios)
- i = 0
- }
- }
- {
- s.elog.Printf("UNSTALLED UNSTALLED UNSTALLED: %v\n", time.Since(timeStart))
- lastUnstalled = time.Now()
- }
- }
-
- return nil
-}
-
-func (s *levelsController) close() error {
- err := s.cleanupLevels()
- return errors.Wrap(err, "levelsController.Close")
-}
-
-// get returns the found value if any. If not found, we return nil.
-func (s *levelsController) get(key []byte, maxVs *y.ValueStruct) (y.ValueStruct, error) {
- // It's important that we iterate the levels from 0 on upward. The reason is, if we iterated
- // in opposite order, or in parallel (naively calling all the h.RLock() in some order) we could
- // read level L's tables post-compaction and level L+1's tables pre-compaction. (If we do
- // parallelize this, we will need to call the h.RLock() function by increasing order of level
- // number.)
- version := y.ParseTs(key)
- for _, h := range s.levels {
- vs, err := h.get(key) // Calls h.RLock() and h.RUnlock().
- if err != nil {
- return y.ValueStruct{}, errors.Wrapf(err, "get key: %q", key)
- }
- if vs.Value == nil && vs.Meta == 0 {
- continue
- }
- if maxVs == nil || vs.Version == version {
- return vs, nil
- }
- if maxVs.Version < vs.Version {
- *maxVs = vs
- }
- }
- if maxVs != nil {
- return *maxVs, nil
- }
- return y.ValueStruct{}, nil
-}
-
-func appendIteratorsReversed(out []y.Iterator, th []*table.Table, reversed bool) []y.Iterator {
- for i := len(th) - 1; i >= 0; i-- {
- // This will increment the reference of the table handler.
- out = append(out, th[i].NewIterator(reversed))
- }
- return out
-}
-
-// appendIterators appends iterators to an array of iterators, for merging.
-// Note: This obtains references for the table handlers. Remember to close these iterators.
-func (s *levelsController) appendIterators(
- iters []y.Iterator, opt *IteratorOptions) []y.Iterator {
- // Just like with get, it's important we iterate the levels from 0 on upward, to avoid missing
- // data when there's a compaction.
- for _, level := range s.levels {
- iters = level.appendIterators(iters, opt)
- }
- return iters
-}
-
-// TableInfo represents the information about a table.
-type TableInfo struct {
- ID uint64
- Level int
- Left []byte
- Right []byte
- KeyCount uint64 // Number of keys in the table
-}
-
-func (s *levelsController) getTableInfo(withKeysCount bool) (result []TableInfo) {
- for _, l := range s.levels {
- l.RLock()
- for _, t := range l.tables {
- var count uint64
- if withKeysCount {
- it := t.NewIterator(false)
- for it.Rewind(); it.Valid(); it.Next() {
- count++
- }
- }
-
- info := TableInfo{
- ID: t.ID(),
- Level: l.level,
- Left: t.Smallest(),
- Right: t.Biggest(),
- KeyCount: count,
- }
- result = append(result, info)
- }
- l.RUnlock()
- }
- sort.Slice(result, func(i, j int) bool {
- if result[i].Level != result[j].Level {
- return result[i].Level < result[j].Level
- }
- return result[i].ID < result[j].ID
- })
- return
-}
diff --git a/vendor/github.com/dgraph-io/badger/logger.go b/vendor/github.com/dgraph-io/badger/logger.go
deleted file mode 100644
index 3a9b8a33..00000000
--- a/vendor/github.com/dgraph-io/badger/logger.go
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * Copyright 2018 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package badger
-
-import (
- "log"
- "os"
-)
-
-// Logger is implemented by any logging system that is used for standard logs.
-type Logger interface {
- Errorf(string, ...interface{})
- Warningf(string, ...interface{})
- Infof(string, ...interface{})
- Debugf(string, ...interface{})
-}
-
-// Errorf logs an ERROR log message to the logger specified in opts or to the
-// global logger if no logger is specified in opts.
-func (opt *Options) Errorf(format string, v ...interface{}) {
- if opt.Logger == nil {
- return
- }
- opt.Logger.Errorf(format, v...)
-}
-
-// Infof logs an INFO message to the logger specified in opts.
-func (opt *Options) Infof(format string, v ...interface{}) {
- if opt.Logger == nil {
- return
- }
- opt.Logger.Infof(format, v...)
-}
-
-// Warningf logs a WARNING message to the logger specified in opts.
-func (opt *Options) Warningf(format string, v ...interface{}) {
- if opt.Logger == nil {
- return
- }
- opt.Logger.Warningf(format, v...)
-}
-
-// Debugf logs a DEBUG message to the logger specified in opts.
-func (opt *Options) Debugf(format string, v ...interface{}) {
- if opt.Logger == nil {
- return
- }
- opt.Logger.Debugf(format, v...)
-}
-
-type defaultLog struct {
- *log.Logger
-}
-
-var defaultLogger = &defaultLog{Logger: log.New(os.Stderr, "badger ", log.LstdFlags)}
-
-func (l *defaultLog) Errorf(f string, v ...interface{}) {
- l.Printf("ERROR: "+f, v...)
-}
-
-func (l *defaultLog) Warningf(f string, v ...interface{}) {
- l.Printf("WARNING: "+f, v...)
-}
-
-func (l *defaultLog) Infof(f string, v ...interface{}) {
- l.Printf("INFO: "+f, v...)
-}
-
-func (l *defaultLog) Debugf(f string, v ...interface{}) {
- l.Printf("DEBUG: "+f, v...)
-}
diff --git a/vendor/github.com/dgraph-io/badger/managed_db.go b/vendor/github.com/dgraph-io/badger/managed_db.go
deleted file mode 100644
index 4de226ae..00000000
--- a/vendor/github.com/dgraph-io/badger/managed_db.go
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package badger
-
-// OpenManaged returns a new DB, which allows more control over setting
-// transaction timestamps, aka managed mode.
-//
-// This is only useful for databases built on top of Badger (like Dgraph), and
-// can be ignored by most users.
-func OpenManaged(opts Options) (*DB, error) {
- opts.managedTxns = true
- return Open(opts)
-}
-
-// NewTransactionAt follows the same logic as DB.NewTransaction(), but uses the
-// provided read timestamp.
-//
-// This is only useful for databases built on top of Badger (like Dgraph), and
-// can be ignored by most users.
-func (db *DB) NewTransactionAt(readTs uint64, update bool) *Txn {
- if !db.opt.managedTxns {
- panic("Cannot use NewTransactionAt with managedDB=false. Use NewTransaction instead.")
- }
- txn := db.newTransaction(update, true)
- txn.readTs = readTs
- return txn
-}
-
-// CommitAt commits the transaction, following the same logic as Commit(), but
-// at the given commit timestamp. This will panic if not used with managed transactions.
-//
-// This is only useful for databases built on top of Badger (like Dgraph), and
-// can be ignored by most users.
-func (txn *Txn) CommitAt(commitTs uint64, callback func(error)) error {
- if !txn.db.opt.managedTxns {
- panic("Cannot use CommitAt with managedDB=false. Use Commit instead.")
- }
- txn.commitTs = commitTs
- if callback == nil {
- return txn.Commit()
- }
- txn.CommitWith(callback)
- return nil
-}
-
-// SetDiscardTs sets a timestamp at or below which, any invalid or deleted
-// versions can be discarded from the LSM tree, and thence from the value log to
-// reclaim disk space. Can only be used with managed transactions.
-func (db *DB) SetDiscardTs(ts uint64) {
- if !db.opt.managedTxns {
- panic("Cannot use SetDiscardTs with managedDB=false.")
- }
- db.orc.setDiscardTs(ts)
-}
diff --git a/vendor/github.com/dgraph-io/badger/manifest.go b/vendor/github.com/dgraph-io/badger/manifest.go
deleted file mode 100644
index a5818829..00000000
--- a/vendor/github.com/dgraph-io/badger/manifest.go
+++ /dev/null
@@ -1,440 +0,0 @@
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package badger
-
-import (
- "bufio"
- "bytes"
- "encoding/binary"
- "fmt"
- "hash/crc32"
- "io"
- "os"
- "path/filepath"
- "sync"
-
- "github.com/dgraph-io/badger/pb"
- "github.com/dgraph-io/badger/y"
- "github.com/pkg/errors"
-)
-
-// Manifest represents the contents of the MANIFEST file in a Badger store.
-//
-// The MANIFEST file describes the startup state of the db -- all LSM files and what level they're
-// at.
-//
-// It consists of a sequence of ManifestChangeSet objects. Each of these is treated atomically,
-// and contains a sequence of ManifestChange's (file creations/deletions) which we use to
-// reconstruct the manifest at startup.
-type Manifest struct {
- Levels []levelManifest
- Tables map[uint64]TableManifest
-
- // Contains total number of creation and deletion changes in the manifest -- used to compute
- // whether it'd be useful to rewrite the manifest.
- Creations int
- Deletions int
-}
-
-func createManifest() Manifest {
- levels := make([]levelManifest, 0)
- return Manifest{
- Levels: levels,
- Tables: make(map[uint64]TableManifest),
- }
-}
-
-// levelManifest contains information about LSM tree levels
-// in the MANIFEST file.
-type levelManifest struct {
- Tables map[uint64]struct{} // Set of table id's
-}
-
-// TableManifest contains information about a specific level
-// in the LSM tree.
-type TableManifest struct {
- Level uint8
- Checksum []byte
-}
-
-// manifestFile holds the file pointer (and other info) about the manifest file, which is a log
-// file we append to.
-type manifestFile struct {
- fp *os.File
- directory string
- // We make this configurable so that unit tests can hit rewrite() code quickly
- deletionsRewriteThreshold int
-
- // Guards appends, which includes access to the manifest field.
- appendLock sync.Mutex
-
- // Used to track the current state of the manifest, used when rewriting.
- manifest Manifest
-}
-
-const (
- // ManifestFilename is the filename for the manifest file.
- ManifestFilename = "MANIFEST"
- manifestRewriteFilename = "MANIFEST-REWRITE"
- manifestDeletionsRewriteThreshold = 10000
- manifestDeletionsRatio = 10
-)
-
-// asChanges returns a sequence of changes that could be used to recreate the Manifest in its
-// present state.
-func (m *Manifest) asChanges() []*pb.ManifestChange {
- changes := make([]*pb.ManifestChange, 0, len(m.Tables))
- for id, tm := range m.Tables {
- changes = append(changes, newCreateChange(id, int(tm.Level), tm.Checksum))
- }
- return changes
-}
-
-func (m *Manifest) clone() Manifest {
- changeSet := pb.ManifestChangeSet{Changes: m.asChanges()}
- ret := createManifest()
- y.Check(applyChangeSet(&ret, &changeSet))
- return ret
-}
-
-// openOrCreateManifestFile opens a Badger manifest file if it exists, or creates on if
-// one doesn’t.
-func openOrCreateManifestFile(dir string, readOnly bool) (
- ret *manifestFile, result Manifest, err error) {
- return helpOpenOrCreateManifestFile(dir, readOnly, manifestDeletionsRewriteThreshold)
-}
-
-func helpOpenOrCreateManifestFile(dir string, readOnly bool, deletionsThreshold int) (
- ret *manifestFile, result Manifest, err error) {
-
- path := filepath.Join(dir, ManifestFilename)
- var flags uint32
- if readOnly {
- flags |= y.ReadOnly
- }
- fp, err := y.OpenExistingFile(path, flags) // We explicitly sync in addChanges, outside the lock.
- if err != nil {
- if !os.IsNotExist(err) {
- return nil, Manifest{}, err
- }
- if readOnly {
- return nil, Manifest{}, fmt.Errorf("no manifest found, required for read-only db")
- }
- m := createManifest()
- fp, netCreations, err := helpRewrite(dir, &m)
- if err != nil {
- return nil, Manifest{}, err
- }
- y.AssertTrue(netCreations == 0)
- mf := &manifestFile{
- fp: fp,
- directory: dir,
- manifest: m.clone(),
- deletionsRewriteThreshold: deletionsThreshold,
- }
- return mf, m, nil
- }
-
- manifest, truncOffset, err := ReplayManifestFile(fp)
- if err != nil {
- _ = fp.Close()
- return nil, Manifest{}, err
- }
-
- if !readOnly {
- // Truncate file so we don't have a half-written entry at the end.
- if err := fp.Truncate(truncOffset); err != nil {
- _ = fp.Close()
- return nil, Manifest{}, err
- }
- }
- if _, err = fp.Seek(0, io.SeekEnd); err != nil {
- _ = fp.Close()
- return nil, Manifest{}, err
- }
-
- mf := &manifestFile{
- fp: fp,
- directory: dir,
- manifest: manifest.clone(),
- deletionsRewriteThreshold: deletionsThreshold,
- }
- return mf, manifest, nil
-}
-
-func (mf *manifestFile) close() error {
- return mf.fp.Close()
-}
-
-// addChanges writes a batch of changes, atomically, to the file. By "atomically" that means when
-// we replay the MANIFEST file, we'll either replay all the changes or none of them. (The truth of
-// this depends on the filesystem -- some might append garbage data if a system crash happens at
-// the wrong time.)
-func (mf *manifestFile) addChanges(changesParam []*pb.ManifestChange) error {
- changes := pb.ManifestChangeSet{Changes: changesParam}
- buf, err := changes.Marshal()
- if err != nil {
- return err
- }
-
- // Maybe we could use O_APPEND instead (on certain file systems)
- mf.appendLock.Lock()
- if err := applyChangeSet(&mf.manifest, &changes); err != nil {
- mf.appendLock.Unlock()
- return err
- }
- // Rewrite manifest if it'd shrink by 1/10 and it's big enough to care
- if mf.manifest.Deletions > mf.deletionsRewriteThreshold &&
- mf.manifest.Deletions > manifestDeletionsRatio*(mf.manifest.Creations-mf.manifest.Deletions) {
- if err := mf.rewrite(); err != nil {
- mf.appendLock.Unlock()
- return err
- }
- } else {
- var lenCrcBuf [8]byte
- binary.BigEndian.PutUint32(lenCrcBuf[0:4], uint32(len(buf)))
- binary.BigEndian.PutUint32(lenCrcBuf[4:8], crc32.Checksum(buf, y.CastagnoliCrcTable))
- buf = append(lenCrcBuf[:], buf...)
- if _, err := mf.fp.Write(buf); err != nil {
- mf.appendLock.Unlock()
- return err
- }
- }
-
- mf.appendLock.Unlock()
- return y.FileSync(mf.fp)
-}
-
-// Has to be 4 bytes. The value can never change, ever, anyway.
-var magicText = [4]byte{'B', 'd', 'g', 'r'}
-
-// The magic version number.
-const magicVersion = 4
-
-func helpRewrite(dir string, m *Manifest) (*os.File, int, error) {
- rewritePath := filepath.Join(dir, manifestRewriteFilename)
- // We explicitly sync.
- fp, err := y.OpenTruncFile(rewritePath, false)
- if err != nil {
- return nil, 0, err
- }
-
- buf := make([]byte, 8)
- copy(buf[0:4], magicText[:])
- binary.BigEndian.PutUint32(buf[4:8], magicVersion)
-
- netCreations := len(m.Tables)
- changes := m.asChanges()
- set := pb.ManifestChangeSet{Changes: changes}
-
- changeBuf, err := set.Marshal()
- if err != nil {
- fp.Close()
- return nil, 0, err
- }
- var lenCrcBuf [8]byte
- binary.BigEndian.PutUint32(lenCrcBuf[0:4], uint32(len(changeBuf)))
- binary.BigEndian.PutUint32(lenCrcBuf[4:8], crc32.Checksum(changeBuf, y.CastagnoliCrcTable))
- buf = append(buf, lenCrcBuf[:]...)
- buf = append(buf, changeBuf...)
- if _, err := fp.Write(buf); err != nil {
- fp.Close()
- return nil, 0, err
- }
- if err := y.FileSync(fp); err != nil {
- fp.Close()
- return nil, 0, err
- }
-
- // In Windows the files should be closed before doing a Rename.
- if err = fp.Close(); err != nil {
- return nil, 0, err
- }
- manifestPath := filepath.Join(dir, ManifestFilename)
- if err := os.Rename(rewritePath, manifestPath); err != nil {
- return nil, 0, err
- }
- fp, err = y.OpenExistingFile(manifestPath, 0)
- if err != nil {
- return nil, 0, err
- }
- if _, err := fp.Seek(0, io.SeekEnd); err != nil {
- fp.Close()
- return nil, 0, err
- }
- if err := syncDir(dir); err != nil {
- fp.Close()
- return nil, 0, err
- }
-
- return fp, netCreations, nil
-}
-
-// Must be called while appendLock is held.
-func (mf *manifestFile) rewrite() error {
- // In Windows the files should be closed before doing a Rename.
- if err := mf.fp.Close(); err != nil {
- return err
- }
- fp, netCreations, err := helpRewrite(mf.directory, &mf.manifest)
- if err != nil {
- return err
- }
- mf.fp = fp
- mf.manifest.Creations = netCreations
- mf.manifest.Deletions = 0
-
- return nil
-}
-
-type countingReader struct {
- wrapped *bufio.Reader
- count int64
-}
-
-func (r *countingReader) Read(p []byte) (n int, err error) {
- n, err = r.wrapped.Read(p)
- r.count += int64(n)
- return
-}
-
-func (r *countingReader) ReadByte() (b byte, err error) {
- b, err = r.wrapped.ReadByte()
- if err == nil {
- r.count++
- }
- return
-}
-
-var (
- errBadMagic = errors.New("manifest has bad magic")
- errBadChecksum = errors.New("manifest has checksum mismatch")
-)
-
-// ReplayManifestFile reads the manifest file and constructs two manifest objects. (We need one
-// immutable copy and one mutable copy of the manifest. Easiest way is to construct two of them.)
-// Also, returns the last offset after a completely read manifest entry -- the file must be
-// truncated at that point before further appends are made (if there is a partial entry after
-// that). In normal conditions, truncOffset is the file size.
-func ReplayManifestFile(fp *os.File) (ret Manifest, truncOffset int64, err error) {
- r := countingReader{wrapped: bufio.NewReader(fp)}
-
- var magicBuf [8]byte
- if _, err := io.ReadFull(&r, magicBuf[:]); err != nil {
- return Manifest{}, 0, errBadMagic
- }
- if !bytes.Equal(magicBuf[0:4], magicText[:]) {
- return Manifest{}, 0, errBadMagic
- }
- version := binary.BigEndian.Uint32(magicBuf[4:8])
- if version != magicVersion {
- return Manifest{}, 0,
- fmt.Errorf("manifest has unsupported version: %d (we support %d)", version, magicVersion)
- }
-
- build := createManifest()
- var offset int64
- for {
- offset = r.count
- var lenCrcBuf [8]byte
- _, err := io.ReadFull(&r, lenCrcBuf[:])
- if err != nil {
- if err == io.EOF || err == io.ErrUnexpectedEOF {
- break
- }
- return Manifest{}, 0, err
- }
- length := binary.BigEndian.Uint32(lenCrcBuf[0:4])
- var buf = make([]byte, length)
- if _, err := io.ReadFull(&r, buf); err != nil {
- if err == io.EOF || err == io.ErrUnexpectedEOF {
- break
- }
- return Manifest{}, 0, err
- }
- if crc32.Checksum(buf, y.CastagnoliCrcTable) != binary.BigEndian.Uint32(lenCrcBuf[4:8]) {
- return Manifest{}, 0, errBadChecksum
- }
-
- var changeSet pb.ManifestChangeSet
- if err := changeSet.Unmarshal(buf); err != nil {
- return Manifest{}, 0, err
- }
-
- if err := applyChangeSet(&build, &changeSet); err != nil {
- return Manifest{}, 0, err
- }
- }
-
- return build, offset, err
-}
-
-func applyManifestChange(build *Manifest, tc *pb.ManifestChange) error {
- switch tc.Op {
- case pb.ManifestChange_CREATE:
- if _, ok := build.Tables[tc.Id]; ok {
- return fmt.Errorf("MANIFEST invalid, table %d exists", tc.Id)
- }
- build.Tables[tc.Id] = TableManifest{
- Level: uint8(tc.Level),
- Checksum: append([]byte{}, tc.Checksum...),
- }
- for len(build.Levels) <= int(tc.Level) {
- build.Levels = append(build.Levels, levelManifest{make(map[uint64]struct{})})
- }
- build.Levels[tc.Level].Tables[tc.Id] = struct{}{}
- build.Creations++
- case pb.ManifestChange_DELETE:
- tm, ok := build.Tables[tc.Id]
- if !ok {
- return fmt.Errorf("MANIFEST removes non-existing table %d", tc.Id)
- }
- delete(build.Levels[tm.Level].Tables, tc.Id)
- delete(build.Tables, tc.Id)
- build.Deletions++
- default:
- return fmt.Errorf("MANIFEST file has invalid manifestChange op")
- }
- return nil
-}
-
-// This is not a "recoverable" error -- opening the KV store fails because the MANIFEST file is
-// just plain broken.
-func applyChangeSet(build *Manifest, changeSet *pb.ManifestChangeSet) error {
- for _, change := range changeSet.Changes {
- if err := applyManifestChange(build, change); err != nil {
- return err
- }
- }
- return nil
-}
-
-func newCreateChange(id uint64, level int, checksum []byte) *pb.ManifestChange {
- return &pb.ManifestChange{
- Id: id,
- Op: pb.ManifestChange_CREATE,
- Level: uint32(level),
- Checksum: checksum,
- }
-}
-
-func newDeleteChange(id uint64) *pb.ManifestChange {
- return &pb.ManifestChange{
- Id: id,
- Op: pb.ManifestChange_DELETE,
- }
-}
diff --git a/vendor/github.com/dgraph-io/badger/merge.go b/vendor/github.com/dgraph-io/badger/merge.go
deleted file mode 100644
index 02ad4bcd..00000000
--- a/vendor/github.com/dgraph-io/badger/merge.go
+++ /dev/null
@@ -1,177 +0,0 @@
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package badger
-
-import (
- "sync"
- "time"
-
- "github.com/dgraph-io/badger/y"
- "github.com/pkg/errors"
-)
-
-// MergeOperator represents a Badger merge operator.
-type MergeOperator struct {
- sync.RWMutex
- f MergeFunc
- db *DB
- key []byte
- closer *y.Closer
-}
-
-// MergeFunc accepts two byte slices, one representing an existing value, and
-// another representing a new value that needs to be ‘merged’ into it. MergeFunc
-// contains the logic to perform the ‘merge’ and return an updated value.
-// MergeFunc could perform operations like integer addition, list appends etc.
-// Note that the ordering of the operands is maintained.
-type MergeFunc func(existingVal, newVal []byte) []byte
-
-// GetMergeOperator creates a new MergeOperator for a given key and returns a
-// pointer to it. It also fires off a goroutine that performs a compaction using
-// the merge function that runs periodically, as specified by dur.
-func (db *DB) GetMergeOperator(key []byte,
- f MergeFunc, dur time.Duration) *MergeOperator {
- op := &MergeOperator{
- f: f,
- db: db,
- key: key,
- closer: y.NewCloser(1),
- }
-
- go op.runCompactions(dur)
- return op
-}
-
-var errNoMerge = errors.New("No need for merge")
-
-func (op *MergeOperator) iterateAndMerge() (newVal []byte, latest uint64, err error) {
- txn := op.db.NewTransaction(false)
- defer txn.Discard()
- opt := DefaultIteratorOptions
- opt.AllVersions = true
- it := txn.NewKeyIterator(op.key, opt)
- defer it.Close()
-
- var numVersions int
- for it.Rewind(); it.Valid(); it.Next() {
- item := it.Item()
- numVersions++
- if numVersions == 1 {
- // This should be the newVal, considering this is the latest version.
- newVal, err = item.ValueCopy(newVal)
- if err != nil {
- return nil, 0, err
- }
- latest = item.Version()
- } else {
- if err := item.Value(func(oldVal []byte) error {
- // The merge should always be on the newVal considering it has the merge result of
- // the latest version. The value read should be the oldVal.
- newVal = op.f(oldVal, newVal)
- return nil
- }); err != nil {
- return nil, 0, err
- }
- }
- if item.DiscardEarlierVersions() {
- break
- }
- }
- if numVersions == 0 {
- return nil, latest, ErrKeyNotFound
- } else if numVersions == 1 {
- return newVal, latest, errNoMerge
- }
- return newVal, latest, nil
-}
-
-func (op *MergeOperator) compact() error {
- op.Lock()
- defer op.Unlock()
- val, version, err := op.iterateAndMerge()
- if err == ErrKeyNotFound || err == errNoMerge {
- return nil
- } else if err != nil {
- return err
- }
- entries := []*Entry{
- {
- Key: y.KeyWithTs(op.key, version),
- Value: val,
- meta: bitDiscardEarlierVersions,
- },
- }
- // Write value back to the DB. It is important that we do not set the bitMergeEntry bit
- // here. When compaction happens, all the older merged entries will be removed.
- return op.db.batchSetAsync(entries, func(err error) {
- if err != nil {
- op.db.opt.Errorf("failed to insert the result of merge compaction: %s", err)
- }
- })
-}
-
-func (op *MergeOperator) runCompactions(dur time.Duration) {
- ticker := time.NewTicker(dur)
- defer op.closer.Done()
- var stop bool
- for {
- select {
- case <-op.closer.HasBeenClosed():
- stop = true
- case <-ticker.C: // wait for tick
- }
- if err := op.compact(); err != nil {
- op.db.opt.Errorf("failure while running merge operation: %s", err)
- }
- if stop {
- ticker.Stop()
- break
- }
- }
-}
-
-// Add records a value in Badger which will eventually be merged by a background
-// routine into the values that were recorded by previous invocations to Add().
-func (op *MergeOperator) Add(val []byte) error {
- return op.db.Update(func(txn *Txn) error {
- return txn.SetEntry(NewEntry(op.key, val).withMergeBit())
- })
-}
-
-// Get returns the latest value for the merge operator, which is derived by
-// applying the merge function to all the values added so far.
-//
-// If Add has not been called even once, Get will return ErrKeyNotFound.
-func (op *MergeOperator) Get() ([]byte, error) {
- op.RLock()
- defer op.RUnlock()
- var existing []byte
- err := op.db.View(func(txn *Txn) (err error) {
- existing, _, err = op.iterateAndMerge()
- return err
- })
- if err == errNoMerge {
- return existing, nil
- }
- return existing, err
-}
-
-// Stop waits for any pending merge to complete and then stops the background
-// goroutine.
-func (op *MergeOperator) Stop() {
- op.closer.SignalAndWait()
-}
diff --git a/vendor/github.com/dgraph-io/badger/options.go b/vendor/github.com/dgraph-io/badger/options.go
deleted file mode 100644
index b91fdc5e..00000000
--- a/vendor/github.com/dgraph-io/badger/options.go
+++ /dev/null
@@ -1,374 +0,0 @@
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package badger
-
-import (
- "github.com/dgraph-io/badger/options"
-)
-
-// Note: If you add a new option X make sure you also add a WithX method on Options.
-
-// Options are params for creating DB object.
-//
-// This package provides DefaultOptions which contains options that should
-// work for most applications. Consider using that as a starting point before
-// customizing it for your own needs.
-//
-// Each option X is documented on the WithX method.
-type Options struct {
- // Required options.
-
- Dir string
- ValueDir string
-
- // Usually modified options.
-
- SyncWrites bool
- TableLoadingMode options.FileLoadingMode
- ValueLogLoadingMode options.FileLoadingMode
- NumVersionsToKeep int
- ReadOnly bool
- Truncate bool
- Logger Logger
-
- // Fine tuning options.
-
- MaxTableSize int64
- LevelSizeMultiplier int
- MaxLevels int
- ValueThreshold int
- NumMemtables int
-
- NumLevelZeroTables int
- NumLevelZeroTablesStall int
-
- LevelOneSize int64
- ValueLogFileSize int64
- ValueLogMaxEntries uint32
-
- NumCompactors int
- CompactL0OnClose bool
- LogRotatesToFlush int32
-
- // Transaction start and commit timestamps are managed by end-user.
- // This is only useful for databases built on top of Badger (like Dgraph).
- // Not recommended for most users.
- managedTxns bool
-
- // 4. Flags for testing purposes
- // ------------------------------
- maxBatchCount int64 // max entries in batch
- maxBatchSize int64 // max batch size in bytes
-
-}
-
-// DefaultOptions sets a list of recommended options for good performance.
-// Feel free to modify these to suit your needs with the WithX methods.
-func DefaultOptions(path string) Options {
- return Options{
- Dir: path,
- ValueDir: path,
- LevelOneSize: 256 << 20,
- LevelSizeMultiplier: 10,
- TableLoadingMode: options.MemoryMap,
- ValueLogLoadingMode: options.MemoryMap,
- // table.MemoryMap to mmap() the tables.
- // table.Nothing to not preload the tables.
- MaxLevels: 7,
- MaxTableSize: 64 << 20,
- NumCompactors: 2, // Compactions can be expensive. Only run 2.
- NumLevelZeroTables: 5,
- NumLevelZeroTablesStall: 10,
- NumMemtables: 5,
- SyncWrites: true,
- NumVersionsToKeep: 1,
- CompactL0OnClose: true,
- // Nothing to read/write value log using standard File I/O
- // MemoryMap to mmap() the value log files
- // (2^30 - 1)*2 when mmapping < 2^31 - 1, max int32.
- // -1 so 2*ValueLogFileSize won't overflow on 32-bit systems.
- ValueLogFileSize: 1<<30 - 1,
-
- ValueLogMaxEntries: 1000000,
- ValueThreshold: 32,
- Truncate: false,
- Logger: defaultLogger,
- LogRotatesToFlush: 2,
- }
-}
-
-// LSMOnlyOptions follows from DefaultOptions, but sets a higher ValueThreshold
-// so values would be colocated with the LSM tree, with value log largely acting
-// as a write-ahead log only. These options would reduce the disk usage of value
-// log, and make Badger act more like a typical LSM tree.
-func LSMOnlyOptions(path string) Options {
- // Max value length which fits in uint16.
- // Let's not set any other options, because they can cause issues with the
- // size of key-value a user can pass to Badger. For e.g., if we set
- // ValueLogFileSize to 64MB, a user can't pass a value more than that.
- // Setting it to ValueLogMaxEntries to 1000, can generate too many files.
- // These options are better configured on a usage basis, than broadly here.
- // The ValueThreshold is the most important setting a user needs to do to
- // achieve a heavier usage of LSM tree.
- // NOTE: If a user does not want to set 64KB as the ValueThreshold because
- // of performance reasons, 1KB would be a good option too, allowing
- // values smaller than 1KB to be colocated with the keys in the LSM tree.
- return DefaultOptions(path).WithValueThreshold(65500)
-}
-
-// WithDir returns a new Options value with Dir set to the given value.
-//
-// Dir is the path of the directory where key data will be stored in.
-// If it doesn't exist, Badger will try to create it for you.
-// This is set automatically to be the path given to `DefaultOptions`.
-func (opt Options) WithDir(val string) Options {
- opt.Dir = val
- return opt
-}
-
-// WithValueDir returns a new Options value with ValueDir set to the given value.
-//
-// ValueDir is the path of the directory where value data will be stored in.
-// If it doesn't exist, Badger will try to create it for you.
-// This is set automatically to be the path given to `DefaultOptions`.
-func (opt Options) WithValueDir(val string) Options {
- opt.ValueDir = val
- return opt
-}
-
-// WithSyncWrites returns a new Options value with SyncWrites set to the given value.
-//
-// When SyncWrites is true all writes are synced to disk. Setting this to false would achieve better
-// performance, but may cause data loss in case of crash.
-//
-// The default value of SyncWrites is true.
-func (opt Options) WithSyncWrites(val bool) Options {
- opt.SyncWrites = val
- return opt
-}
-
-// WithTableLoadingMode returns a new Options value with TableLoadingMode set to the given value.
-//
-// TableLoadingMode indicates which file loading mode should be used for the LSM tree data files.
-//
-// The default value of TableLoadingMode is options.MemoryMap.
-func (opt Options) WithTableLoadingMode(val options.FileLoadingMode) Options {
- opt.TableLoadingMode = val
- return opt
-}
-
-// WithValueLogLoadingMode returns a new Options value with ValueLogLoadingMode set to the given
-// value.
-//
-// ValueLogLoadingMode indicates which file loading mode should be used for the value log data
-// files.
-//
-// The default value of ValueLogLoadingMode is options.MemoryMap.
-func (opt Options) WithValueLogLoadingMode(val options.FileLoadingMode) Options {
- opt.ValueLogLoadingMode = val
- return opt
-}
-
-// WithNumVersionsToKeep returns a new Options value with NumVersionsToKeep set to the given value.
-//
-// NumVersionsToKeep sets how many versions to keep per key at most.
-//
-// The default value of NumVersionsToKeep is 1.
-func (opt Options) WithNumVersionsToKeep(val int) Options {
- opt.NumVersionsToKeep = val
- return opt
-}
-
-// WithReadOnly returns a new Options value with ReadOnly set to the given value.
-//
-// When ReadOnly is true the DB will be opened on read-only mode.
-// Multiple processes can open the same Badger DB.
-// Note: if the DB being opened had crashed before and has vlog data to be replayed,
-// ReadOnly will cause Open to fail with an appropriate message.
-//
-// The default value of ReadOnly is false.
-func (opt Options) WithReadOnly(val bool) Options {
- opt.ReadOnly = val
- return opt
-}
-
-// WithTruncate returns a new Options value with Truncate set to the given value.
-//
-// Truncate indicates whether value log files should be truncated to delete corrupt data, if any.
-// This option is ignored when ReadOnly is true.
-//
-// The default value of Truncate is false.
-func (opt Options) WithTruncate(val bool) Options {
- opt.Truncate = val
- return opt
-}
-
-// WithLogger returns a new Options value with Logger set to the given value.
-//
-// Logger provides a way to configure what logger each value of badger.DB uses.
-//
-// The default value of Logger writes to stderr using the log package from the Go standard library.
-func (opt Options) WithLogger(val Logger) Options {
- opt.Logger = val
- return opt
-}
-
-// WithMaxTableSize returns a new Options value with MaxTableSize set to the given value.
-//
-// MaxTableSize sets the maximum size in bytes for each LSM table or file.
-//
-// The default value of MaxTableSize is 64MB.
-func (opt Options) WithMaxTableSize(val int64) Options {
- opt.MaxTableSize = val
- return opt
-}
-
-// WithLevelSizeMultiplier returns a new Options value with LevelSizeMultiplier set to the given
-// value.
-//
-// LevelSizeMultiplier sets the ratio between the maximum sizes of contiguous levels in the LSM.
-// Once a level grows to be larger than this ratio allowed, the compaction process will be
-// triggered.
-//
-// The default value of LevelSizeMultiplier is 10.
-func (opt Options) WithLevelSizeMultiplier(val int) Options {
- opt.LevelSizeMultiplier = val
- return opt
-}
-
-// WithMaxLevels returns a new Options value with MaxLevels set to the given value.
-//
-// Maximum number of levels of compaction allowed in the LSM.
-//
-// The default value of MaxLevels is 7.
-func (opt Options) WithMaxLevels(val int) Options {
- opt.MaxLevels = val
- return opt
-}
-
-// WithValueThreshold returns a new Options value with ValueThreshold set to the given value.
-//
-// ValueThreshold sets the threshold used to decide whether a value is stored directly in the LSM
-// tree or separatedly in the log value files.
-//
-// The default value of ValueThreshold is 32, but LSMOnlyOptions sets it to 65500.
-func (opt Options) WithValueThreshold(val int) Options {
- opt.ValueThreshold = val
- return opt
-}
-
-// WithNumMemtables returns a new Options value with NumMemtables set to the given value.
-//
-// NumMemtables sets the maximum number of tables to keep in memory before stalling.
-//
-// The default value of NumMemtables is 5.
-func (opt Options) WithNumMemtables(val int) Options {
- opt.NumMemtables = val
- return opt
-}
-
-// WithNumLevelZeroTables returns a new Options value with NumLevelZeroTables set to the given
-// value.
-//
-// NumLevelZeroTables sets the maximum number of Level 0 tables before compaction starts.
-//
-// The default value of NumLevelZeroTables is 5.
-func (opt Options) WithNumLevelZeroTables(val int) Options {
- opt.NumLevelZeroTables = val
- return opt
-}
-
-// WithNumLevelZeroTablesStall returns a new Options value with NumLevelZeroTablesStall set to the
-// given value.
-//
-// NumLevelZeroTablesStall sets the number of Level 0 tables that once reached causes the DB to
-// stall until compaction succeeds.
-//
-// The default value of NumLevelZeroTablesStall is 10.
-func (opt Options) WithNumLevelZeroTablesStall(val int) Options {
- opt.NumLevelZeroTablesStall = val
- return opt
-}
-
-// WithLevelOneSize returns a new Options value with LevelOneSize set to the given value.
-//
-// LevelOneSize sets the maximum total size for Level 1.
-//
-// The default value of LevelOneSize is 20MB.
-func (opt Options) WithLevelOneSize(val int64) Options {
- opt.LevelOneSize = val
- return opt
-}
-
-// WithValueLogFileSize returns a new Options value with ValueLogFileSize set to the given value.
-//
-// ValueLogFileSize sets the maximum size of a single value log file.
-//
-// The default value of ValueLogFileSize is 1GB.
-func (opt Options) WithValueLogFileSize(val int64) Options {
- opt.ValueLogFileSize = val
- return opt
-}
-
-// WithValueLogMaxEntries returns a new Options value with ValueLogMaxEntries set to the given
-// value.
-//
-// ValueLogMaxEntries sets the maximum number of entries a value log file can hold approximately.
-// A actual size limit of a value log file is the minimum of ValueLogFileSize and
-// ValueLogMaxEntries.
-//
-// The default value of ValueLogMaxEntries is one million (1000000).
-func (opt Options) WithValueLogMaxEntries(val uint32) Options {
- opt.ValueLogMaxEntries = val
- return opt
-}
-
-// WithNumCompactors returns a new Options value with NumCompactors set to the given value.
-//
-// NumCompactors sets the number of compaction workers to run concurrently.
-// Setting this to zero stops compactions, which could eventually cause writes to block forever.
-//
-// The default value of NumCompactors is 2.
-func (opt Options) WithNumCompactors(val int) Options {
- opt.NumCompactors = val
- return opt
-}
-
-// WithCompactL0OnClose returns a new Options value with CompactL0OnClose set to the given value.
-//
-// CompactL0OnClose determines whether Level 0 should be compacted before closing the DB.
-// This ensures that both reads and writes are efficient when the DB is opened later.
-//
-// The default value of CompactL0OnClose is true.
-func (opt Options) WithCompactL0OnClose(val bool) Options {
- opt.CompactL0OnClose = val
- return opt
-}
-
-// WithLogRotatesToFlush returns a new Options value with LogRotatesToFlush set to the given value.
-//
-// LogRotatesToFlush sets the number of value log file rotates after which the Memtables are
-// flushed to disk. This is useful in write loads with fewer keys and larger values. This work load
-// would fill up the value logs quickly, while not filling up the Memtables. Thus, on a crash
-// and restart, the value log head could cause the replay of a good number of value log files
-// which can slow things on start.
-//
-// The default value of LogRotatesToFlush is 2.
-func (opt Options) WithLogRotatesToFlush(val int32) Options {
- opt.LogRotatesToFlush = val
- return opt
-}
diff --git a/vendor/github.com/dgraph-io/badger/options/options.go b/vendor/github.com/dgraph-io/badger/options/options.go
deleted file mode 100644
index 06c8b1b7..00000000
--- a/vendor/github.com/dgraph-io/badger/options/options.go
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package options
-
-// FileLoadingMode specifies how data in LSM table files and value log files should
-// be loaded.
-type FileLoadingMode int
-
-const (
- // FileIO indicates that files must be loaded using standard I/O
- FileIO FileLoadingMode = iota
- // LoadToRAM indicates that file must be loaded into RAM
- LoadToRAM
- // MemoryMap indicates that that the file must be memory-mapped
- MemoryMap
-)
diff --git a/vendor/github.com/dgraph-io/badger/pb/gen.sh b/vendor/github.com/dgraph-io/badger/pb/gen.sh
deleted file mode 100644
index 49b44ff4..00000000
--- a/vendor/github.com/dgraph-io/badger/pb/gen.sh
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/bin/bash
-
-# You might need to go get -v github.com/gogo/protobuf/...
-
-protos=${GOPATH-$HOME/go}/src/github.com/dgraph-io/badger/pb
-pushd $protos > /dev/null
-protoc --gofast_out=plugins=grpc:. -I=. pb.proto
diff --git a/vendor/github.com/dgraph-io/badger/pb/pb.pb.go b/vendor/github.com/dgraph-io/badger/pb/pb.pb.go
deleted file mode 100644
index f9a2c6ee..00000000
--- a/vendor/github.com/dgraph-io/badger/pb/pb.pb.go
+++ /dev/null
@@ -1,1313 +0,0 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: pb.proto
-
-package pb
-
-import (
- fmt "fmt"
- proto "github.com/golang/protobuf/proto"
- io "io"
- math "math"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
-
-type ManifestChange_Operation int32
-
-const (
- ManifestChange_CREATE ManifestChange_Operation = 0
- ManifestChange_DELETE ManifestChange_Operation = 1
-)
-
-var ManifestChange_Operation_name = map[int32]string{
- 0: "CREATE",
- 1: "DELETE",
-}
-
-var ManifestChange_Operation_value = map[string]int32{
- "CREATE": 0,
- "DELETE": 1,
-}
-
-func (x ManifestChange_Operation) String() string {
- return proto.EnumName(ManifestChange_Operation_name, int32(x))
-}
-
-func (ManifestChange_Operation) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_f80abaa17e25ccc8, []int{3, 0}
-}
-
-type KV struct {
- Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
- Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
- UserMeta []byte `protobuf:"bytes,3,opt,name=user_meta,json=userMeta,proto3" json:"user_meta,omitempty"`
- Version uint64 `protobuf:"varint,4,opt,name=version,proto3" json:"version,omitempty"`
- ExpiresAt uint64 `protobuf:"varint,5,opt,name=expires_at,json=expiresAt,proto3" json:"expires_at,omitempty"`
- Meta []byte `protobuf:"bytes,6,opt,name=meta,proto3" json:"meta,omitempty"`
- // Stream id is used to identify which stream the KV came from.
- StreamId uint32 `protobuf:"varint,10,opt,name=stream_id,json=streamId,proto3" json:"stream_id,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *KV) Reset() { *m = KV{} }
-func (m *KV) String() string { return proto.CompactTextString(m) }
-func (*KV) ProtoMessage() {}
-func (*KV) Descriptor() ([]byte, []int) {
- return fileDescriptor_f80abaa17e25ccc8, []int{0}
-}
-func (m *KV) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *KV) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_KV.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalTo(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *KV) XXX_Merge(src proto.Message) {
- xxx_messageInfo_KV.Merge(m, src)
-}
-func (m *KV) XXX_Size() int {
- return m.Size()
-}
-func (m *KV) XXX_DiscardUnknown() {
- xxx_messageInfo_KV.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_KV proto.InternalMessageInfo
-
-func (m *KV) GetKey() []byte {
- if m != nil {
- return m.Key
- }
- return nil
-}
-
-func (m *KV) GetValue() []byte {
- if m != nil {
- return m.Value
- }
- return nil
-}
-
-func (m *KV) GetUserMeta() []byte {
- if m != nil {
- return m.UserMeta
- }
- return nil
-}
-
-func (m *KV) GetVersion() uint64 {
- if m != nil {
- return m.Version
- }
- return 0
-}
-
-func (m *KV) GetExpiresAt() uint64 {
- if m != nil {
- return m.ExpiresAt
- }
- return 0
-}
-
-func (m *KV) GetMeta() []byte {
- if m != nil {
- return m.Meta
- }
- return nil
-}
-
-func (m *KV) GetStreamId() uint32 {
- if m != nil {
- return m.StreamId
- }
- return 0
-}
-
-type KVList struct {
- Kv []*KV `protobuf:"bytes,1,rep,name=kv,proto3" json:"kv,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *KVList) Reset() { *m = KVList{} }
-func (m *KVList) String() string { return proto.CompactTextString(m) }
-func (*KVList) ProtoMessage() {}
-func (*KVList) Descriptor() ([]byte, []int) {
- return fileDescriptor_f80abaa17e25ccc8, []int{1}
-}
-func (m *KVList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *KVList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_KVList.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalTo(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *KVList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_KVList.Merge(m, src)
-}
-func (m *KVList) XXX_Size() int {
- return m.Size()
-}
-func (m *KVList) XXX_DiscardUnknown() {
- xxx_messageInfo_KVList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_KVList proto.InternalMessageInfo
-
-func (m *KVList) GetKv() []*KV {
- if m != nil {
- return m.Kv
- }
- return nil
-}
-
-type ManifestChangeSet struct {
- // A set of changes that are applied atomically.
- Changes []*ManifestChange `protobuf:"bytes,1,rep,name=changes,proto3" json:"changes,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *ManifestChangeSet) Reset() { *m = ManifestChangeSet{} }
-func (m *ManifestChangeSet) String() string { return proto.CompactTextString(m) }
-func (*ManifestChangeSet) ProtoMessage() {}
-func (*ManifestChangeSet) Descriptor() ([]byte, []int) {
- return fileDescriptor_f80abaa17e25ccc8, []int{2}
-}
-func (m *ManifestChangeSet) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ManifestChangeSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_ManifestChangeSet.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalTo(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *ManifestChangeSet) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ManifestChangeSet.Merge(m, src)
-}
-func (m *ManifestChangeSet) XXX_Size() int {
- return m.Size()
-}
-func (m *ManifestChangeSet) XXX_DiscardUnknown() {
- xxx_messageInfo_ManifestChangeSet.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ManifestChangeSet proto.InternalMessageInfo
-
-func (m *ManifestChangeSet) GetChanges() []*ManifestChange {
- if m != nil {
- return m.Changes
- }
- return nil
-}
-
-type ManifestChange struct {
- Id uint64 `protobuf:"varint,1,opt,name=Id,proto3" json:"Id,omitempty"`
- Op ManifestChange_Operation `protobuf:"varint,2,opt,name=Op,proto3,enum=pb.ManifestChange_Operation" json:"Op,omitempty"`
- Level uint32 `protobuf:"varint,3,opt,name=Level,proto3" json:"Level,omitempty"`
- Checksum []byte `protobuf:"bytes,4,opt,name=Checksum,proto3" json:"Checksum,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *ManifestChange) Reset() { *m = ManifestChange{} }
-func (m *ManifestChange) String() string { return proto.CompactTextString(m) }
-func (*ManifestChange) ProtoMessage() {}
-func (*ManifestChange) Descriptor() ([]byte, []int) {
- return fileDescriptor_f80abaa17e25ccc8, []int{3}
-}
-func (m *ManifestChange) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ManifestChange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_ManifestChange.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalTo(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *ManifestChange) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ManifestChange.Merge(m, src)
-}
-func (m *ManifestChange) XXX_Size() int {
- return m.Size()
-}
-func (m *ManifestChange) XXX_DiscardUnknown() {
- xxx_messageInfo_ManifestChange.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ManifestChange proto.InternalMessageInfo
-
-func (m *ManifestChange) GetId() uint64 {
- if m != nil {
- return m.Id
- }
- return 0
-}
-
-func (m *ManifestChange) GetOp() ManifestChange_Operation {
- if m != nil {
- return m.Op
- }
- return ManifestChange_CREATE
-}
-
-func (m *ManifestChange) GetLevel() uint32 {
- if m != nil {
- return m.Level
- }
- return 0
-}
-
-func (m *ManifestChange) GetChecksum() []byte {
- if m != nil {
- return m.Checksum
- }
- return nil
-}
-
-func init() {
- proto.RegisterEnum("pb.ManifestChange_Operation", ManifestChange_Operation_name, ManifestChange_Operation_value)
- proto.RegisterType((*KV)(nil), "pb.KV")
- proto.RegisterType((*KVList)(nil), "pb.KVList")
- proto.RegisterType((*ManifestChangeSet)(nil), "pb.ManifestChangeSet")
- proto.RegisterType((*ManifestChange)(nil), "pb.ManifestChange")
-}
-
-func init() { proto.RegisterFile("pb.proto", fileDescriptor_f80abaa17e25ccc8) }
-
-var fileDescriptor_f80abaa17e25ccc8 = []byte{
- // 365 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x91, 0x4f, 0x8a, 0xdb, 0x30,
- 0x14, 0xc6, 0x47, 0x8a, 0xc7, 0xe3, 0xbc, 0xce, 0x04, 0x57, 0x94, 0x22, 0xfa, 0xc7, 0x18, 0x77,
- 0xe3, 0xc5, 0xe0, 0xc5, 0xf4, 0x04, 0x69, 0xea, 0x45, 0x48, 0x42, 0x40, 0x0d, 0xd9, 0x06, 0x39,
- 0x7e, 0x6d, 0x8c, 0x13, 0x5b, 0x58, 0x8a, 0x69, 0x6f, 0xd2, 0x0b, 0xf4, 0x04, 0xbd, 0x44, 0x97,
- 0x3d, 0x42, 0x49, 0x2f, 0x52, 0xac, 0xfc, 0x81, 0xd0, 0xdd, 0xfb, 0xbe, 0xef, 0xbd, 0x4f, 0xf0,
- 0x13, 0x78, 0x2a, 0x4b, 0x54, 0x53, 0x9b, 0x9a, 0x51, 0x95, 0x45, 0x3f, 0x09, 0xd0, 0xc9, 0x92,
- 0xf9, 0xd0, 0x2b, 0xf1, 0x1b, 0x27, 0x21, 0x89, 0xef, 0x45, 0x37, 0xb2, 0x17, 0x70, 0xdb, 0xca,
- 0xed, 0x1e, 0x39, 0xb5, 0xde, 0x51, 0xb0, 0xd7, 0xd0, 0xdf, 0x6b, 0x6c, 0x56, 0x3b, 0x34, 0x92,
- 0xf7, 0x6c, 0xe2, 0x75, 0xc6, 0x0c, 0x8d, 0x64, 0x1c, 0xee, 0x5a, 0x6c, 0x74, 0x51, 0x57, 0xdc,
- 0x09, 0x49, 0xec, 0x88, 0xb3, 0x64, 0x6f, 0x01, 0xf0, 0xab, 0x2a, 0x1a, 0xd4, 0x2b, 0x69, 0xf8,
- 0xad, 0x0d, 0xfb, 0x27, 0x67, 0x68, 0x18, 0x03, 0xc7, 0x16, 0xba, 0xb6, 0xd0, 0xce, 0xdd, 0x4b,
- 0xda, 0x34, 0x28, 0x77, 0xab, 0x22, 0xe7, 0x10, 0x92, 0xf8, 0x41, 0x78, 0x47, 0x63, 0x9c, 0x47,
- 0x21, 0xb8, 0x93, 0xe5, 0xb4, 0xd0, 0x86, 0xbd, 0x04, 0x5a, 0xb6, 0x9c, 0x84, 0xbd, 0xf8, 0xd9,
- 0x93, 0x9b, 0xa8, 0x2c, 0x99, 0x2c, 0x05, 0x2d, 0xdb, 0x68, 0x08, 0xcf, 0x67, 0xb2, 0x2a, 0x3e,
- 0xa3, 0x36, 0xa3, 0x8d, 0xac, 0xbe, 0xe0, 0x27, 0x34, 0xec, 0x11, 0xee, 0xd6, 0x56, 0xe8, 0xd3,
- 0x05, 0xeb, 0x2e, 0xae, 0xf7, 0xc4, 0x79, 0x25, 0xfa, 0x41, 0x60, 0x70, 0x9d, 0xb1, 0x01, 0xd0,
- 0x71, 0x6e, 0x29, 0x39, 0x82, 0x8e, 0x73, 0xf6, 0x08, 0x74, 0xae, 0x2c, 0xa1, 0xc1, 0xd3, 0x9b,
- 0xff, 0xbb, 0x92, 0xb9, 0xc2, 0x46, 0x9a, 0xa2, 0xae, 0x04, 0x9d, 0xab, 0x0e, 0xe9, 0x14, 0x5b,
- 0xdc, 0x5a, 0x70, 0x0f, 0xe2, 0x28, 0xd8, 0x2b, 0xf0, 0x46, 0x1b, 0x5c, 0x97, 0x7a, 0xbf, 0xb3,
- 0xd8, 0xee, 0xc5, 0x45, 0x47, 0xef, 0xa0, 0x7f, 0xa9, 0x60, 0x00, 0xee, 0x48, 0xa4, 0xc3, 0x45,
- 0xea, 0xdf, 0x74, 0xf3, 0xc7, 0x74, 0x9a, 0x2e, 0x52, 0x9f, 0x7c, 0xf0, 0x7f, 0x1d, 0x02, 0xf2,
- 0xfb, 0x10, 0x90, 0x3f, 0x87, 0x80, 0x7c, 0xff, 0x1b, 0xdc, 0x64, 0xae, 0xfd, 0xdf, 0xf7, 0xff,
- 0x02, 0x00, 0x00, 0xff, 0xff, 0xeb, 0x28, 0x5d, 0xcf, 0xeb, 0x01, 0x00, 0x00,
-}
-
-func (m *KV) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *KV) MarshalTo(dAtA []byte) (int, error) {
- var i int
- _ = i
- var l int
- _ = l
- if len(m.Key) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintPb(dAtA, i, uint64(len(m.Key)))
- i += copy(dAtA[i:], m.Key)
- }
- if len(m.Value) > 0 {
- dAtA[i] = 0x12
- i++
- i = encodeVarintPb(dAtA, i, uint64(len(m.Value)))
- i += copy(dAtA[i:], m.Value)
- }
- if len(m.UserMeta) > 0 {
- dAtA[i] = 0x1a
- i++
- i = encodeVarintPb(dAtA, i, uint64(len(m.UserMeta)))
- i += copy(dAtA[i:], m.UserMeta)
- }
- if m.Version != 0 {
- dAtA[i] = 0x20
- i++
- i = encodeVarintPb(dAtA, i, uint64(m.Version))
- }
- if m.ExpiresAt != 0 {
- dAtA[i] = 0x28
- i++
- i = encodeVarintPb(dAtA, i, uint64(m.ExpiresAt))
- }
- if len(m.Meta) > 0 {
- dAtA[i] = 0x32
- i++
- i = encodeVarintPb(dAtA, i, uint64(len(m.Meta)))
- i += copy(dAtA[i:], m.Meta)
- }
- if m.StreamId != 0 {
- dAtA[i] = 0x50
- i++
- i = encodeVarintPb(dAtA, i, uint64(m.StreamId))
- }
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
- }
- return i, nil
-}
-
-func (m *KVList) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *KVList) MarshalTo(dAtA []byte) (int, error) {
- var i int
- _ = i
- var l int
- _ = l
- if len(m.Kv) > 0 {
- for _, msg := range m.Kv {
- dAtA[i] = 0xa
- i++
- i = encodeVarintPb(dAtA, i, uint64(msg.Size()))
- n, err := msg.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
- }
- i += n
- }
- }
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
- }
- return i, nil
-}
-
-func (m *ManifestChangeSet) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ManifestChangeSet) MarshalTo(dAtA []byte) (int, error) {
- var i int
- _ = i
- var l int
- _ = l
- if len(m.Changes) > 0 {
- for _, msg := range m.Changes {
- dAtA[i] = 0xa
- i++
- i = encodeVarintPb(dAtA, i, uint64(msg.Size()))
- n, err := msg.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
- }
- i += n
- }
- }
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
- }
- return i, nil
-}
-
-func (m *ManifestChange) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ManifestChange) MarshalTo(dAtA []byte) (int, error) {
- var i int
- _ = i
- var l int
- _ = l
- if m.Id != 0 {
- dAtA[i] = 0x8
- i++
- i = encodeVarintPb(dAtA, i, uint64(m.Id))
- }
- if m.Op != 0 {
- dAtA[i] = 0x10
- i++
- i = encodeVarintPb(dAtA, i, uint64(m.Op))
- }
- if m.Level != 0 {
- dAtA[i] = 0x18
- i++
- i = encodeVarintPb(dAtA, i, uint64(m.Level))
- }
- if len(m.Checksum) > 0 {
- dAtA[i] = 0x22
- i++
- i = encodeVarintPb(dAtA, i, uint64(len(m.Checksum)))
- i += copy(dAtA[i:], m.Checksum)
- }
- if m.XXX_unrecognized != nil {
- i += copy(dAtA[i:], m.XXX_unrecognized)
- }
- return i, nil
-}
-
-func encodeVarintPb(dAtA []byte, offset int, v uint64) int {
- for v >= 1<<7 {
- dAtA[offset] = uint8(v&0x7f | 0x80)
- v >>= 7
- offset++
- }
- dAtA[offset] = uint8(v)
- return offset + 1
-}
-func (m *KV) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Key)
- if l > 0 {
- n += 1 + l + sovPb(uint64(l))
- }
- l = len(m.Value)
- if l > 0 {
- n += 1 + l + sovPb(uint64(l))
- }
- l = len(m.UserMeta)
- if l > 0 {
- n += 1 + l + sovPb(uint64(l))
- }
- if m.Version != 0 {
- n += 1 + sovPb(uint64(m.Version))
- }
- if m.ExpiresAt != 0 {
- n += 1 + sovPb(uint64(m.ExpiresAt))
- }
- l = len(m.Meta)
- if l > 0 {
- n += 1 + l + sovPb(uint64(l))
- }
- if m.StreamId != 0 {
- n += 1 + sovPb(uint64(m.StreamId))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *KVList) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if len(m.Kv) > 0 {
- for _, e := range m.Kv {
- l = e.Size()
- n += 1 + l + sovPb(uint64(l))
- }
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *ManifestChangeSet) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if len(m.Changes) > 0 {
- for _, e := range m.Changes {
- l = e.Size()
- n += 1 + l + sovPb(uint64(l))
- }
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *ManifestChange) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Id != 0 {
- n += 1 + sovPb(uint64(m.Id))
- }
- if m.Op != 0 {
- n += 1 + sovPb(uint64(m.Op))
- }
- if m.Level != 0 {
- n += 1 + sovPb(uint64(m.Level))
- }
- l = len(m.Checksum)
- if l > 0 {
- n += 1 + l + sovPb(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func sovPb(x uint64) (n int) {
- for {
- n++
- x >>= 7
- if x == 0 {
- break
- }
- }
- return n
-}
-func sozPb(x uint64) (n int) {
- return sovPb(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (m *KV) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowPb
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: KV: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: KV: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowPb
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthPb
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthPb
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
- if m.Key == nil {
- m.Key = []byte{}
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowPb
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthPb
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthPb
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...)
- if m.Value == nil {
- m.Value = []byte{}
- }
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field UserMeta", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowPb
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthPb
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthPb
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.UserMeta = append(m.UserMeta[:0], dAtA[iNdEx:postIndex]...)
- if m.UserMeta == nil {
- m.UserMeta = []byte{}
- }
- iNdEx = postIndex
- case 4:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
- }
- m.Version = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowPb
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Version |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 5:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field ExpiresAt", wireType)
- }
- m.ExpiresAt = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowPb
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.ExpiresAt |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 6:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Meta", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowPb
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthPb
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthPb
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Meta = append(m.Meta[:0], dAtA[iNdEx:postIndex]...)
- if m.Meta == nil {
- m.Meta = []byte{}
- }
- iNdEx = postIndex
- case 10:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field StreamId", wireType)
- }
- m.StreamId = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowPb
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.StreamId |= uint32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipPb(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthPb
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthPb
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *KVList) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowPb
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: KVList: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: KVList: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Kv", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowPb
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthPb
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthPb
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Kv = append(m.Kv, &KV{})
- if err := m.Kv[len(m.Kv)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipPb(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthPb
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthPb
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ManifestChangeSet) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowPb
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ManifestChangeSet: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ManifestChangeSet: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Changes", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowPb
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthPb
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthPb
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Changes = append(m.Changes, &ManifestChange{})
- if err := m.Changes[len(m.Changes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipPb(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthPb
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthPb
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ManifestChange) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowPb
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ManifestChange: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ManifestChange: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType)
- }
- m.Id = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowPb
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Id |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Op", wireType)
- }
- m.Op = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowPb
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Op |= ManifestChange_Operation(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Level", wireType)
- }
- m.Level = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowPb
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Level |= uint32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 4:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Checksum", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowPb
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthPb
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthPb
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Checksum = append(m.Checksum[:0], dAtA[iNdEx:postIndex]...)
- if m.Checksum == nil {
- m.Checksum = []byte{}
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipPb(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthPb
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthPb
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func skipPb(dAtA []byte) (n int, err error) {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowPb
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- wireType := int(wire & 0x7)
- switch wireType {
- case 0:
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowPb
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- iNdEx++
- if dAtA[iNdEx-1] < 0x80 {
- break
- }
- }
- return iNdEx, nil
- case 1:
- iNdEx += 8
- return iNdEx, nil
- case 2:
- var length int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowPb
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- length |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if length < 0 {
- return 0, ErrInvalidLengthPb
- }
- iNdEx += length
- if iNdEx < 0 {
- return 0, ErrInvalidLengthPb
- }
- return iNdEx, nil
- case 3:
- for {
- var innerWire uint64
- var start int = iNdEx
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowPb
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- innerWire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- innerWireType := int(innerWire & 0x7)
- if innerWireType == 4 {
- break
- }
- next, err := skipPb(dAtA[start:])
- if err != nil {
- return 0, err
- }
- iNdEx = start + next
- if iNdEx < 0 {
- return 0, ErrInvalidLengthPb
- }
- }
- return iNdEx, nil
- case 4:
- return iNdEx, nil
- case 5:
- iNdEx += 4
- return iNdEx, nil
- default:
- return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
- }
- }
- panic("unreachable")
-}
-
-var (
- ErrInvalidLengthPb = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowPb = fmt.Errorf("proto: integer overflow")
-)
diff --git a/vendor/github.com/dgraph-io/badger/pb/pb.proto b/vendor/github.com/dgraph-io/badger/pb/pb.proto
deleted file mode 100644
index c6e7f413..00000000
--- a/vendor/github.com/dgraph-io/badger/pb/pb.proto
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright (C) 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// Use protos/gen.sh to generate .pb.go files.
-syntax = "proto3";
-
-package pb;
-
-message KV {
- bytes key = 1;
- bytes value = 2;
- bytes user_meta = 3;
- uint64 version = 4;
- uint64 expires_at = 5;
- bytes meta = 6;
-
- // Stream id is used to identify which stream the KV came from.
- uint32 stream_id = 10;
-}
-
-message KVList {
- repeated KV kv = 1;
-}
-
-message ManifestChangeSet {
- // A set of changes that are applied atomically.
- repeated ManifestChange changes = 1;
-}
-
-message ManifestChange {
- uint64 Id = 1;
- enum Operation {
- CREATE = 0;
- DELETE = 1;
- }
- Operation Op = 2;
- uint32 Level = 3; // Only used for CREATE
- bytes Checksum = 4; // Only used for CREATE
-}
diff --git a/vendor/github.com/dgraph-io/badger/publisher.go b/vendor/github.com/dgraph-io/badger/publisher.go
deleted file mode 100644
index 24588f5c..00000000
--- a/vendor/github.com/dgraph-io/badger/publisher.go
+++ /dev/null
@@ -1,159 +0,0 @@
-/*
- * Copyright 2019 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package badger
-
-import (
- "bytes"
- "sync"
-
- "github.com/dgraph-io/badger/pb"
- "github.com/dgraph-io/badger/y"
-)
-
-type subscriber struct {
- prefixes [][]byte
- sendCh chan<- *pb.KVList
- subCloser *y.Closer
-}
-
-type publisher struct {
- sync.Mutex
- pubCh chan requests
- subscribers map[uint64]subscriber
- nextID uint64
-}
-
-func newPublisher() *publisher {
- return &publisher{
- pubCh: make(chan requests, 1000),
- subscribers: make(map[uint64]subscriber),
- nextID: 0,
- }
-}
-
-func (p *publisher) listenForUpdates(c *y.Closer) {
- defer func() {
- p.cleanSubscribers()
- c.Done()
- }()
- slurp := func(batch []*request) {
- for {
- select {
- case reqs := <-p.pubCh:
- batch = append(batch, reqs...)
- default:
- p.publishUpdates(batch)
- return
- }
- }
- }
- for {
- select {
- case <-c.HasBeenClosed():
- return
- case reqs := <-p.pubCh:
- slurp(reqs)
- }
- }
-}
-
-func (p *publisher) publishUpdates(reqs requests) {
- kvs := &pb.KVList{}
- p.Lock()
- defer func() {
- p.Unlock()
- // Release all the request.
- reqs.DecrRef()
- }()
-
- // TODO: Optimize this, so we can figure out key -> subscriber quickly, without iterating over
- // all the prefixes.
- // TODO: Use trie to find subscribers.
- for _, s := range p.subscribers {
- // BUG: This would send out the same entry multiple times on multiple matches for the same
- // subscriber.
- for _, prefix := range s.prefixes {
- for _, req := range reqs {
- for _, e := range req.Entries {
- if bytes.HasPrefix(e.Key, prefix) {
- // TODO: Maybe we can optimize this by creating the KV once and sending it
- // over to multiple subscribers.
- k := y.SafeCopy(nil, e.Key)
- kv := &pb.KV{
- Key: y.ParseKey(k),
- Value: y.SafeCopy(nil, e.Value),
- UserMeta: []byte{e.UserMeta},
- ExpiresAt: e.ExpiresAt,
- Version: y.ParseTs(k),
- }
- kvs.Kv = append(kvs.Kv, kv)
- }
- }
- }
- }
- if len(kvs.GetKv()) > 0 {
- s.sendCh <- kvs
- }
- }
-}
-
-func (p *publisher) newSubscriber(c *y.Closer, prefixes ...[]byte) (<-chan *pb.KVList, uint64) {
- p.Lock()
- defer p.Unlock()
- ch := make(chan *pb.KVList, 1000)
- id := p.nextID
- // Increment next ID.
- p.nextID++
- p.subscribers[id] = subscriber{
- prefixes: prefixes,
- sendCh: ch,
- subCloser: c,
- }
- return ch, id
-}
-
-// cleanSubscribers stops all the subscribers. Ideally, It should be called while closing DB.
-func (p *publisher) cleanSubscribers() {
- p.Lock()
- defer p.Unlock()
- for id, s := range p.subscribers {
- delete(p.subscribers, id)
- s.subCloser.SignalAndWait()
- }
-}
-
-func (p *publisher) deleteSubscriber(id uint64) {
- p.Lock()
- defer p.Unlock()
- if _, ok := p.subscribers[id]; !ok {
- return
- }
- delete(p.subscribers, id)
-}
-
-func (p *publisher) sendUpdates(reqs []*request) {
- // TODO: Prefix check before pushing into pubCh.
- if p.noOfSubscribers() != 0 {
- p.pubCh <- reqs
- }
-}
-
-func (p *publisher) noOfSubscribers() int {
- p.Lock()
- defer p.Unlock()
- return len(p.subscribers)
-}
diff --git a/vendor/github.com/dgraph-io/badger/skl/README.md b/vendor/github.com/dgraph-io/badger/skl/README.md
deleted file mode 100644
index e22e4590..00000000
--- a/vendor/github.com/dgraph-io/badger/skl/README.md
+++ /dev/null
@@ -1,113 +0,0 @@
-This is much better than `skiplist` and `slist`.
-
-```
-BenchmarkReadWrite/frac_0-8 3000000 537 ns/op
-BenchmarkReadWrite/frac_1-8 3000000 503 ns/op
-BenchmarkReadWrite/frac_2-8 3000000 492 ns/op
-BenchmarkReadWrite/frac_3-8 3000000 475 ns/op
-BenchmarkReadWrite/frac_4-8 3000000 440 ns/op
-BenchmarkReadWrite/frac_5-8 5000000 442 ns/op
-BenchmarkReadWrite/frac_6-8 5000000 380 ns/op
-BenchmarkReadWrite/frac_7-8 5000000 338 ns/op
-BenchmarkReadWrite/frac_8-8 5000000 294 ns/op
-BenchmarkReadWrite/frac_9-8 10000000 268 ns/op
-BenchmarkReadWrite/frac_10-8 100000000 26.3 ns/op
-```
-
-And even better than a simple map with read-write lock:
-
-```
-BenchmarkReadWriteMap/frac_0-8 2000000 774 ns/op
-BenchmarkReadWriteMap/frac_1-8 2000000 647 ns/op
-BenchmarkReadWriteMap/frac_2-8 3000000 605 ns/op
-BenchmarkReadWriteMap/frac_3-8 3000000 603 ns/op
-BenchmarkReadWriteMap/frac_4-8 3000000 556 ns/op
-BenchmarkReadWriteMap/frac_5-8 3000000 472 ns/op
-BenchmarkReadWriteMap/frac_6-8 3000000 476 ns/op
-BenchmarkReadWriteMap/frac_7-8 3000000 457 ns/op
-BenchmarkReadWriteMap/frac_8-8 5000000 444 ns/op
-BenchmarkReadWriteMap/frac_9-8 5000000 361 ns/op
-BenchmarkReadWriteMap/frac_10-8 10000000 212 ns/op
-```
-
-# Node Pooling
-
-Command used
-
-```
-rm -Rf tmp && /usr/bin/time -l ./populate -keys_mil 10
-```
-
-For pprof results, we run without using /usr/bin/time. There are four runs below.
-
-Results seem to vary quite a bit between runs.
-
-## Before node pooling
-
-```
-1311.53MB of 1338.69MB total (97.97%)
-Dropped 30 nodes (cum <= 6.69MB)
-Showing top 10 nodes out of 37 (cum >= 12.50MB)
- flat flat% sum% cum cum%
- 523.04MB 39.07% 39.07% 523.04MB 39.07% github.com/dgraph-io/badger/skl.(*Skiplist).Put
- 184.51MB 13.78% 52.85% 184.51MB 13.78% runtime.stringtoslicebyte
- 166.01MB 12.40% 65.25% 689.04MB 51.47% github.com/dgraph-io/badger/mem.(*Table).Put
- 165MB 12.33% 77.58% 165MB 12.33% runtime.convT2E
- 116.92MB 8.73% 86.31% 116.92MB 8.73% bytes.makeSlice
- 62.50MB 4.67% 90.98% 62.50MB 4.67% main.newValue
- 34.50MB 2.58% 93.56% 34.50MB 2.58% github.com/dgraph-io/badger/table.(*BlockIterator).parseKV
- 25.50MB 1.90% 95.46% 100.06MB 7.47% github.com/dgraph-io/badger/y.(*MergeIterator).Next
- 21.06MB 1.57% 97.04% 21.06MB 1.57% github.com/dgraph-io/badger/table.(*Table).read
- 12.50MB 0.93% 97.97% 12.50MB 0.93% github.com/dgraph-io/badger/table.header.Encode
-
- 128.31 real 329.37 user 17.11 sys
-3355660288 maximum resident set size
- 0 average shared memory size
- 0 average unshared data size
- 0 average unshared stack size
- 2203080 page reclaims
- 764 page faults
- 0 swaps
- 275 block input operations
- 76 block output operations
- 0 messages sent
- 0 messages received
- 0 signals received
- 49173 voluntary context switches
- 599922 involuntary context switches
-```
-
-## After node pooling
-
-```
-1963.13MB of 2026.09MB total (96.89%)
-Dropped 29 nodes (cum <= 10.13MB)
-Showing top 10 nodes out of 41 (cum >= 185.62MB)
- flat flat% sum% cum cum%
- 658.05MB 32.48% 32.48% 658.05MB 32.48% github.com/dgraph-io/badger/skl.glob..func1
- 297.51MB 14.68% 47.16% 297.51MB 14.68% runtime.convT2E
- 257.51MB 12.71% 59.87% 257.51MB 12.71% runtime.stringtoslicebyte
- 249.01MB 12.29% 72.16% 1007.06MB 49.70% github.com/dgraph-io/badger/mem.(*Table).Put
- 142.43MB 7.03% 79.19% 142.43MB 7.03% bytes.makeSlice
- 100MB 4.94% 84.13% 758.05MB 37.41% github.com/dgraph-io/badger/skl.newNode
- 99.50MB 4.91% 89.04% 99.50MB 4.91% main.newValue
- 75MB 3.70% 92.74% 75MB 3.70% github.com/dgraph-io/badger/table.(*BlockIterator).parseKV
- 44.62MB 2.20% 94.94% 44.62MB 2.20% github.com/dgraph-io/badger/table.(*Table).read
- 39.50MB 1.95% 96.89% 185.62MB 9.16% github.com/dgraph-io/badger/y.(*MergeIterator).Next
-
- 135.58 real 374.29 user 17.65 sys
-3740614656 maximum resident set size
- 0 average shared memory size
- 0 average unshared data size
- 0 average unshared stack size
- 2276566 page reclaims
- 770 page faults
- 0 swaps
- 128 block input operations
- 90 block output operations
- 0 messages sent
- 0 messages received
- 0 signals received
- 46434 voluntary context switches
- 597049 involuntary context switches
-```
diff --git a/vendor/github.com/dgraph-io/badger/skl/arena.go b/vendor/github.com/dgraph-io/badger/skl/arena.go
deleted file mode 100644
index def55071..00000000
--- a/vendor/github.com/dgraph-io/badger/skl/arena.go
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package skl
-
-import (
- "sync/atomic"
- "unsafe"
-
- "github.com/dgraph-io/badger/y"
-)
-
-const (
- offsetSize = int(unsafe.Sizeof(uint32(0)))
-
- // Always align nodes on 64-bit boundaries, even on 32-bit architectures,
- // so that the node.value field is 64-bit aligned. This is necessary because
- // node.getValueOffset uses atomic.LoadUint64, which expects its input
- // pointer to be 64-bit aligned.
- nodeAlign = int(unsafe.Sizeof(uint64(0))) - 1
-)
-
-// Arena should be lock-free.
-type Arena struct {
- n uint32
- buf []byte
-}
-
-// newArena returns a new arena.
-func newArena(n int64) *Arena {
- // Don't store data at position 0 in order to reserve offset=0 as a kind
- // of nil pointer.
- out := &Arena{
- n: 1,
- buf: make([]byte, n),
- }
- return out
-}
-
-func (s *Arena) size() int64 {
- return int64(atomic.LoadUint32(&s.n))
-}
-
-func (s *Arena) reset() {
- atomic.StoreUint32(&s.n, 0)
-}
-
-// putNode allocates a node in the arena. The node is aligned on a pointer-sized
-// boundary. The arena offset of the node is returned.
-func (s *Arena) putNode(height int) uint32 {
- // Compute the amount of the tower that will never be used, since the height
- // is less than maxHeight.
- unusedSize := (maxHeight - height) * offsetSize
-
- // Pad the allocation with enough bytes to ensure pointer alignment.
- l := uint32(MaxNodeSize - unusedSize + nodeAlign)
- n := atomic.AddUint32(&s.n, l)
- y.AssertTruef(int(n) <= len(s.buf),
- "Arena too small, toWrite:%d newTotal:%d limit:%d",
- l, n, len(s.buf))
-
- // Return the aligned offset.
- m := (n - l + uint32(nodeAlign)) & ^uint32(nodeAlign)
- return m
-}
-
-// Put will *copy* val into arena. To make better use of this, reuse your input
-// val buffer. Returns an offset into buf. User is responsible for remembering
-// size of val. We could also store this size inside arena but the encoding and
-// decoding will incur some overhead.
-func (s *Arena) putVal(v y.ValueStruct) uint32 {
- l := uint32(v.EncodedSize())
- n := atomic.AddUint32(&s.n, l)
- y.AssertTruef(int(n) <= len(s.buf),
- "Arena too small, toWrite:%d newTotal:%d limit:%d",
- l, n, len(s.buf))
- m := n - l
- v.Encode(s.buf[m:])
- return m
-}
-
-func (s *Arena) putKey(key []byte) uint32 {
- l := uint32(len(key))
- n := atomic.AddUint32(&s.n, l)
- y.AssertTruef(int(n) <= len(s.buf),
- "Arena too small, toWrite:%d newTotal:%d limit:%d",
- l, n, len(s.buf))
- m := n - l
- y.AssertTrue(len(key) == copy(s.buf[m:n], key))
- return m
-}
-
-// getNode returns a pointer to the node located at offset. If the offset is
-// zero, then the nil node pointer is returned.
-func (s *Arena) getNode(offset uint32) *node {
- if offset == 0 {
- return nil
- }
-
- return (*node)(unsafe.Pointer(&s.buf[offset]))
-}
-
-// getKey returns byte slice at offset.
-func (s *Arena) getKey(offset uint32, size uint16) []byte {
- return s.buf[offset : offset+uint32(size)]
-}
-
-// getVal returns byte slice at offset. The given size should be just the value
-// size and should NOT include the meta bytes.
-func (s *Arena) getVal(offset uint32, size uint16) (ret y.ValueStruct) {
- ret.Decode(s.buf[offset : offset+uint32(size)])
- return
-}
-
-// getNodeOffset returns the offset of node in the arena. If the node pointer is
-// nil, then the zero offset is returned.
-func (s *Arena) getNodeOffset(nd *node) uint32 {
- if nd == nil {
- return 0
- }
-
- return uint32(uintptr(unsafe.Pointer(nd)) - uintptr(unsafe.Pointer(&s.buf[0])))
-}
diff --git a/vendor/github.com/dgraph-io/badger/skl/skl.go b/vendor/github.com/dgraph-io/badger/skl/skl.go
deleted file mode 100644
index fc2eff98..00000000
--- a/vendor/github.com/dgraph-io/badger/skl/skl.go
+++ /dev/null
@@ -1,517 +0,0 @@
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/*
-Adapted from RocksDB inline skiplist.
-
-Key differences:
-- No optimization for sequential inserts (no "prev").
-- No custom comparator.
-- Support overwrites. This requires care when we see the same key when inserting.
- For RocksDB or LevelDB, overwrites are implemented as a newer sequence number in the key, so
- there is no need for values. We don't intend to support versioning. In-place updates of values
- would be more efficient.
-- We discard all non-concurrent code.
-- We do not support Splices. This simplifies the code a lot.
-- No AllocateNode or other pointer arithmetic.
-- We combine the findLessThan, findGreaterOrEqual, etc into one function.
-*/
-
-package skl
-
-import (
- "math"
- "math/rand"
- "sync/atomic"
- "unsafe"
-
- "github.com/dgraph-io/badger/y"
-)
-
-const (
- maxHeight = 20
- heightIncrease = math.MaxUint32 / 3
-)
-
-// MaxNodeSize is the memory footprint of a node of maximum height.
-const MaxNodeSize = int(unsafe.Sizeof(node{}))
-
-type node struct {
- // Multiple parts of the value are encoded as a single uint64 so that it
- // can be atomically loaded and stored:
- // value offset: uint32 (bits 0-31)
- // value size : uint16 (bits 32-47)
- value uint64
-
- // A byte slice is 24 bytes. We are trying to save space here.
- keyOffset uint32 // Immutable. No need to lock to access key.
- keySize uint16 // Immutable. No need to lock to access key.
-
- // Height of the tower.
- height uint16
-
- // Most nodes do not need to use the full height of the tower, since the
- // probability of each successive level decreases exponentially. Because
- // these elements are never accessed, they do not need to be allocated.
- // Therefore, when a node is allocated in the arena, its memory footprint
- // is deliberately truncated to not include unneeded tower elements.
- //
- // All accesses to elements should use CAS operations, with no need to lock.
- tower [maxHeight]uint32
-}
-
-// Skiplist maps keys to values (in memory)
-type Skiplist struct {
- height int32 // Current height. 1 <= height <= kMaxHeight. CAS.
- head *node
- ref int32
- arena *Arena
-}
-
-// IncrRef increases the refcount
-func (s *Skiplist) IncrRef() {
- atomic.AddInt32(&s.ref, 1)
-}
-
-// DecrRef decrements the refcount, deallocating the Skiplist when done using it
-func (s *Skiplist) DecrRef() {
- newRef := atomic.AddInt32(&s.ref, -1)
- if newRef > 0 {
- return
- }
-
- s.arena.reset()
- // Indicate we are closed. Good for testing. Also, lets GC reclaim memory. Race condition
- // here would suggest we are accessing skiplist when we are supposed to have no reference!
- s.arena = nil
- // Since the head references the arena's buf, as long as the head is kept around
- // GC can't release the buf.
- s.head = nil
-}
-
-func newNode(arena *Arena, key []byte, v y.ValueStruct, height int) *node {
- // The base level is already allocated in the node struct.
- offset := arena.putNode(height)
- node := arena.getNode(offset)
- node.keyOffset = arena.putKey(key)
- node.keySize = uint16(len(key))
- node.height = uint16(height)
- node.value = encodeValue(arena.putVal(v), v.EncodedSize())
- return node
-}
-
-func encodeValue(valOffset uint32, valSize uint16) uint64 {
- return uint64(valSize)<<32 | uint64(valOffset)
-}
-
-func decodeValue(value uint64) (valOffset uint32, valSize uint16) {
- valOffset = uint32(value)
- valSize = uint16(value >> 32)
- return
-}
-
-// NewSkiplist makes a new empty skiplist, with a given arena size
-func NewSkiplist(arenaSize int64) *Skiplist {
- arena := newArena(arenaSize)
- head := newNode(arena, nil, y.ValueStruct{}, maxHeight)
- return &Skiplist{
- height: 1,
- head: head,
- arena: arena,
- ref: 1,
- }
-}
-
-func (s *node) getValueOffset() (uint32, uint16) {
- value := atomic.LoadUint64(&s.value)
- return decodeValue(value)
-}
-
-func (s *node) key(arena *Arena) []byte {
- return arena.getKey(s.keyOffset, s.keySize)
-}
-
-func (s *node) setValue(arena *Arena, v y.ValueStruct) {
- valOffset := arena.putVal(v)
- value := encodeValue(valOffset, v.EncodedSize())
- atomic.StoreUint64(&s.value, value)
-}
-
-func (s *node) getNextOffset(h int) uint32 {
- return atomic.LoadUint32(&s.tower[h])
-}
-
-func (s *node) casNextOffset(h int, old, val uint32) bool {
- return atomic.CompareAndSwapUint32(&s.tower[h], old, val)
-}
-
-// Returns true if key is strictly > n.key.
-// If n is nil, this is an "end" marker and we return false.
-//func (s *Skiplist) keyIsAfterNode(key []byte, n *node) bool {
-// y.AssertTrue(n != s.head)
-// return n != nil && y.CompareKeys(key, n.key) > 0
-//}
-
-func randomHeight() int {
- h := 1
- for h < maxHeight && rand.Uint32() <= heightIncrease {
- h++
- }
- return h
-}
-
-func (s *Skiplist) getNext(nd *node, height int) *node {
- return s.arena.getNode(nd.getNextOffset(height))
-}
-
-// findNear finds the node near to key.
-// If less=true, it finds rightmost node such that node.key < key (if allowEqual=false) or
-// node.key <= key (if allowEqual=true).
-// If less=false, it finds leftmost node such that node.key > key (if allowEqual=false) or
-// node.key >= key (if allowEqual=true).
-// Returns the node found. The bool returned is true if the node has key equal to given key.
-func (s *Skiplist) findNear(key []byte, less bool, allowEqual bool) (*node, bool) {
- x := s.head
- level := int(s.getHeight() - 1)
- for {
- // Assume x.key < key.
- next := s.getNext(x, level)
- if next == nil {
- // x.key < key < END OF LIST
- if level > 0 {
- // Can descend further to iterate closer to the end.
- level--
- continue
- }
- // Level=0. Cannot descend further. Let's return something that makes sense.
- if !less {
- return nil, false
- }
- // Try to return x. Make sure it is not a head node.
- if x == s.head {
- return nil, false
- }
- return x, false
- }
-
- nextKey := next.key(s.arena)
- cmp := y.CompareKeys(key, nextKey)
- if cmp > 0 {
- // x.key < next.key < key. We can continue to move right.
- x = next
- continue
- }
- if cmp == 0 {
- // x.key < key == next.key.
- if allowEqual {
- return next, true
- }
- if !less {
- // We want >, so go to base level to grab the next bigger note.
- return s.getNext(next, 0), false
- }
- // We want <. If not base level, we should go closer in the next level.
- if level > 0 {
- level--
- continue
- }
- // On base level. Return x.
- if x == s.head {
- return nil, false
- }
- return x, false
- }
- // cmp < 0. In other words, x.key < key < next.
- if level > 0 {
- level--
- continue
- }
- // At base level. Need to return something.
- if !less {
- return next, false
- }
- // Try to return x. Make sure it is not a head node.
- if x == s.head {
- return nil, false
- }
- return x, false
- }
-}
-
-// findSpliceForLevel returns (outBefore, outAfter) with outBefore.key <= key <= outAfter.key.
-// The input "before" tells us where to start looking.
-// If we found a node with the same key, then we return outBefore = outAfter.
-// Otherwise, outBefore.key < key < outAfter.key.
-func (s *Skiplist) findSpliceForLevel(key []byte, before *node, level int) (*node, *node) {
- for {
- // Assume before.key < key.
- next := s.getNext(before, level)
- if next == nil {
- return before, next
- }
- nextKey := next.key(s.arena)
- cmp := y.CompareKeys(key, nextKey)
- if cmp == 0 {
- // Equality case.
- return next, next
- }
- if cmp < 0 {
- // before.key < key < next.key. We are done for this level.
- return before, next
- }
- before = next // Keep moving right on this level.
- }
-}
-
-func (s *Skiplist) getHeight() int32 {
- return atomic.LoadInt32(&s.height)
-}
-
-// Put inserts the key-value pair.
-func (s *Skiplist) Put(key []byte, v y.ValueStruct) {
- // Since we allow overwrite, we may not need to create a new node. We might not even need to
- // increase the height. Let's defer these actions.
-
- listHeight := s.getHeight()
- var prev [maxHeight + 1]*node
- var next [maxHeight + 1]*node
- prev[listHeight] = s.head
- next[listHeight] = nil
- for i := int(listHeight) - 1; i >= 0; i-- {
- // Use higher level to speed up for current level.
- prev[i], next[i] = s.findSpliceForLevel(key, prev[i+1], i)
- if prev[i] == next[i] {
- prev[i].setValue(s.arena, v)
- return
- }
- }
-
- // We do need to create a new node.
- height := randomHeight()
- x := newNode(s.arena, key, v, height)
-
- // Try to increase s.height via CAS.
- listHeight = s.getHeight()
- for height > int(listHeight) {
- if atomic.CompareAndSwapInt32(&s.height, listHeight, int32(height)) {
- // Successfully increased skiplist.height.
- break
- }
- listHeight = s.getHeight()
- }
-
- // We always insert from the base level and up. After you add a node in base level, we cannot
- // create a node in the level above because it would have discovered the node in the base level.
- for i := 0; i < height; i++ {
- for {
- if prev[i] == nil {
- y.AssertTrue(i > 1) // This cannot happen in base level.
- // We haven't computed prev, next for this level because height exceeds old listHeight.
- // For these levels, we expect the lists to be sparse, so we can just search from head.
- prev[i], next[i] = s.findSpliceForLevel(key, s.head, i)
- // Someone adds the exact same key before we are able to do so. This can only happen on
- // the base level. But we know we are not on the base level.
- y.AssertTrue(prev[i] != next[i])
- }
- nextOffset := s.arena.getNodeOffset(next[i])
- x.tower[i] = nextOffset
- if prev[i].casNextOffset(i, nextOffset, s.arena.getNodeOffset(x)) {
- // Managed to insert x between prev[i] and next[i]. Go to the next level.
- break
- }
- // CAS failed. We need to recompute prev and next.
- // It is unlikely to be helpful to try to use a different level as we redo the search,
- // because it is unlikely that lots of nodes are inserted between prev[i] and next[i].
- prev[i], next[i] = s.findSpliceForLevel(key, prev[i], i)
- if prev[i] == next[i] {
- y.AssertTruef(i == 0, "Equality can happen only on base level: %d", i)
- prev[i].setValue(s.arena, v)
- return
- }
- }
- }
-}
-
-// Empty returns if the Skiplist is empty.
-func (s *Skiplist) Empty() bool {
- return s.findLast() == nil
-}
-
-// findLast returns the last element. If head (empty list), we return nil. All the find functions
-// will NEVER return the head nodes.
-func (s *Skiplist) findLast() *node {
- n := s.head
- level := int(s.getHeight()) - 1
- for {
- next := s.getNext(n, level)
- if next != nil {
- n = next
- continue
- }
- if level == 0 {
- if n == s.head {
- return nil
- }
- return n
- }
- level--
- }
-}
-
-// Get gets the value associated with the key. It returns a valid value if it finds equal or earlier
-// version of the same key.
-func (s *Skiplist) Get(key []byte) y.ValueStruct {
- n, _ := s.findNear(key, false, true) // findGreaterOrEqual.
- if n == nil {
- return y.ValueStruct{}
- }
-
- nextKey := s.arena.getKey(n.keyOffset, n.keySize)
- if !y.SameKey(key, nextKey) {
- return y.ValueStruct{}
- }
-
- valOffset, valSize := n.getValueOffset()
- vs := s.arena.getVal(valOffset, valSize)
- vs.Version = y.ParseTs(nextKey)
- return vs
-}
-
-// NewIterator returns a skiplist iterator. You have to Close() the iterator.
-func (s *Skiplist) NewIterator() *Iterator {
- s.IncrRef()
- return &Iterator{list: s}
-}
-
-// MemSize returns the size of the Skiplist in terms of how much memory is used within its internal
-// arena.
-func (s *Skiplist) MemSize() int64 { return s.arena.size() }
-
-// Iterator is an iterator over skiplist object. For new objects, you just
-// need to initialize Iterator.list.
-type Iterator struct {
- list *Skiplist
- n *node
-}
-
-// Close frees the resources held by the iterator
-func (s *Iterator) Close() error {
- s.list.DecrRef()
- return nil
-}
-
-// Valid returns true iff the iterator is positioned at a valid node.
-func (s *Iterator) Valid() bool { return s.n != nil }
-
-// Key returns the key at the current position.
-func (s *Iterator) Key() []byte {
- return s.list.arena.getKey(s.n.keyOffset, s.n.keySize)
-}
-
-// Value returns value.
-func (s *Iterator) Value() y.ValueStruct {
- valOffset, valSize := s.n.getValueOffset()
- return s.list.arena.getVal(valOffset, valSize)
-}
-
-// Next advances to the next position.
-func (s *Iterator) Next() {
- y.AssertTrue(s.Valid())
- s.n = s.list.getNext(s.n, 0)
-}
-
-// Prev advances to the previous position.
-func (s *Iterator) Prev() {
- y.AssertTrue(s.Valid())
- s.n, _ = s.list.findNear(s.Key(), true, false) // find <. No equality allowed.
-}
-
-// Seek advances to the first entry with a key >= target.
-func (s *Iterator) Seek(target []byte) {
- s.n, _ = s.list.findNear(target, false, true) // find >=.
-}
-
-// SeekForPrev finds an entry with key <= target.
-func (s *Iterator) SeekForPrev(target []byte) {
- s.n, _ = s.list.findNear(target, true, true) // find <=.
-}
-
-// SeekToFirst seeks position at the first entry in list.
-// Final state of iterator is Valid() iff list is not empty.
-func (s *Iterator) SeekToFirst() {
- s.n = s.list.getNext(s.list.head, 0)
-}
-
-// SeekToLast seeks position at the last entry in list.
-// Final state of iterator is Valid() iff list is not empty.
-func (s *Iterator) SeekToLast() {
- s.n = s.list.findLast()
-}
-
-// UniIterator is a unidirectional memtable iterator. It is a thin wrapper around
-// Iterator. We like to keep Iterator as before, because it is more powerful and
-// we might support bidirectional iterators in the future.
-type UniIterator struct {
- iter *Iterator
- reversed bool
-}
-
-// NewUniIterator returns a UniIterator.
-func (s *Skiplist) NewUniIterator(reversed bool) *UniIterator {
- return &UniIterator{
- iter: s.NewIterator(),
- reversed: reversed,
- }
-}
-
-// Next implements y.Interface
-func (s *UniIterator) Next() {
- if !s.reversed {
- s.iter.Next()
- } else {
- s.iter.Prev()
- }
-}
-
-// Rewind implements y.Interface
-func (s *UniIterator) Rewind() {
- if !s.reversed {
- s.iter.SeekToFirst()
- } else {
- s.iter.SeekToLast()
- }
-}
-
-// Seek implements y.Interface
-func (s *UniIterator) Seek(key []byte) {
- if !s.reversed {
- s.iter.Seek(key)
- } else {
- s.iter.SeekForPrev(key)
- }
-}
-
-// Key implements y.Interface
-func (s *UniIterator) Key() []byte { return s.iter.Key() }
-
-// Value implements y.Interface
-func (s *UniIterator) Value() y.ValueStruct { return s.iter.Value() }
-
-// Valid implements y.Interface
-func (s *UniIterator) Valid() bool { return s.iter.Valid() }
-
-// Close implements y.Interface (and frees up the iter's resources)
-func (s *UniIterator) Close() error { return s.iter.Close() }
diff --git a/vendor/github.com/dgraph-io/badger/stream.go b/vendor/github.com/dgraph-io/badger/stream.go
deleted file mode 100644
index f0841a6a..00000000
--- a/vendor/github.com/dgraph-io/badger/stream.go
+++ /dev/null
@@ -1,385 +0,0 @@
-/*
- * Copyright 2018 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package badger
-
-import (
- "bytes"
- "context"
- "math"
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/dgraph-io/badger/pb"
- "github.com/dgraph-io/badger/y"
- humanize "github.com/dustin/go-humanize"
-)
-
-const pageSize = 4 << 20 // 4MB
-
-// Stream provides a framework to concurrently iterate over a snapshot of Badger, pick up
-// key-values, batch them up and call Send. Stream does concurrent iteration over many smaller key
-// ranges. It does NOT send keys in lexicographical sorted order. To get keys in sorted
-// order, use Iterator.
-type Stream struct {
- // Prefix to only iterate over certain range of keys. If set to nil (default), Stream would
- // iterate over the entire DB.
- Prefix []byte
-
- // Number of goroutines to use for iterating over key ranges. Defaults to 16.
- NumGo int
-
- // Badger would produce log entries in Infof to indicate the progress of Stream. LogPrefix can
- // be used to help differentiate them from other activities. Default is "Badger.Stream".
- LogPrefix string
-
- // ChooseKey is invoked each time a new key is encountered. Note that this is not called
- // on every version of the value, only the first encountered version (i.e. the highest version
- // of the value a key has). ChooseKey can be left nil to select all keys.
- //
- // Note: Calls to ChooseKey are concurrent.
- ChooseKey func(item *Item) bool
-
- // KeyToList, similar to ChooseKey, is only invoked on the highest version of the value. It
- // is upto the caller to iterate over the versions and generate zero, one or more KVs. It
- // is expected that the user would advance the iterator to go through the versions of the
- // values. However, the user MUST immediately return from this function on the first encounter
- // with a mismatching key. See example usage in ToList function. Can be left nil to use ToList
- // function by default.
- //
- // Note: Calls to KeyToList are concurrent.
- KeyToList func(key []byte, itr *Iterator) (*pb.KVList, error)
-
- // This is the method where Stream sends the final output. All calls to Send are done by a
- // single goroutine, i.e. logic within Send method can expect single threaded execution.
- Send func(*pb.KVList) error
-
- readTs uint64
- db *DB
- rangeCh chan keyRange
- kvChan chan *pb.KVList
- nextStreamId uint32
-}
-
-// ToList is a default implementation of KeyToList. It picks up all valid versions of the key,
-// skipping over deleted or expired keys.
-func (st *Stream) ToList(key []byte, itr *Iterator) (*pb.KVList, error) {
- list := &pb.KVList{}
- for ; itr.Valid(); itr.Next() {
- item := itr.Item()
- if item.IsDeletedOrExpired() {
- break
- }
- if !bytes.Equal(key, item.Key()) {
- // Break out on the first encounter with another key.
- break
- }
-
- valCopy, err := item.ValueCopy(nil)
- if err != nil {
- return nil, err
- }
- kv := &pb.KV{
- Key: item.KeyCopy(nil),
- Value: valCopy,
- UserMeta: []byte{item.UserMeta()},
- Version: item.Version(),
- ExpiresAt: item.ExpiresAt(),
- }
- list.Kv = append(list.Kv, kv)
- if st.db.opt.NumVersionsToKeep == 1 {
- break
- }
-
- if item.DiscardEarlierVersions() {
- break
- }
- }
- return list, nil
-}
-
-// keyRange is [start, end), including start, excluding end. Do ensure that the start,
-// end byte slices are owned by keyRange struct.
-func (st *Stream) produceRanges(ctx context.Context) {
- splits := st.db.KeySplits(st.Prefix)
-
- // We don't need to create more key ranges than NumGo goroutines. This way, we will have limited
- // number of "streams" coming out, which then helps limit the memory used by SSWriter.
- {
- pickEvery := int(math.Floor(float64(len(splits)) / float64(st.NumGo)))
- if pickEvery < 1 {
- pickEvery = 1
- }
- filtered := splits[:0]
- for i, split := range splits {
- if (i+1)%pickEvery == 0 {
- filtered = append(filtered, split)
- }
- }
- splits = filtered
- }
-
- start := y.SafeCopy(nil, st.Prefix)
- for _, key := range splits {
- st.rangeCh <- keyRange{left: start, right: y.SafeCopy(nil, []byte(key))}
- start = y.SafeCopy(nil, []byte(key))
- }
- // Edge case: prefix is empty and no splits exist. In that case, we should have at least one
- // keyRange output.
- st.rangeCh <- keyRange{left: start}
- close(st.rangeCh)
-}
-
-// produceKVs picks up ranges from rangeCh, generates KV lists and sends them to kvChan.
-func (st *Stream) produceKVs(ctx context.Context) error {
- var size int
- var txn *Txn
- if st.readTs > 0 {
- txn = st.db.NewTransactionAt(st.readTs, false)
- } else {
- txn = st.db.NewTransaction(false)
- }
- defer txn.Discard()
-
- iterate := func(kr keyRange) error {
- iterOpts := DefaultIteratorOptions
- iterOpts.AllVersions = true
- iterOpts.Prefix = st.Prefix
- iterOpts.PrefetchValues = false
- itr := txn.NewIterator(iterOpts)
- defer itr.Close()
-
- // This unique stream id is used to identify all the keys from this iteration.
- streamId := atomic.AddUint32(&st.nextStreamId, 1)
-
- outList := new(pb.KVList)
- var prevKey []byte
- for itr.Seek(kr.left); itr.Valid(); {
- // it.Valid would only return true for keys with the provided Prefix in iterOpts.
- item := itr.Item()
- if bytes.Equal(item.Key(), prevKey) {
- itr.Next()
- continue
- }
- prevKey = append(prevKey[:0], item.Key()...)
-
- // Check if we reached the end of the key range.
- if len(kr.right) > 0 && bytes.Compare(item.Key(), kr.right) >= 0 {
- break
- }
- // Check if we should pick this key.
- if st.ChooseKey != nil && !st.ChooseKey(item) {
- continue
- }
-
- // Now convert to key value.
- list, err := st.KeyToList(item.KeyCopy(nil), itr)
- if err != nil {
- return err
- }
- if list == nil || len(list.Kv) == 0 {
- continue
- }
- outList.Kv = append(outList.Kv, list.Kv...)
- size += list.Size()
- if size >= pageSize {
- for _, kv := range outList.Kv {
- kv.StreamId = streamId
- }
- select {
- case st.kvChan <- outList:
- case <-ctx.Done():
- return ctx.Err()
- }
- outList = new(pb.KVList)
- size = 0
- }
- }
- if len(outList.Kv) > 0 {
- for _, kv := range outList.Kv {
- kv.StreamId = streamId
- }
- // TODO: Think of a way to indicate that a stream is over.
- select {
- case st.kvChan <- outList:
- case <-ctx.Done():
- return ctx.Err()
- }
- }
- return nil
- }
-
- for {
- select {
- case kr, ok := <-st.rangeCh:
- if !ok {
- // Done with the keys.
- return nil
- }
- if err := iterate(kr); err != nil {
- return err
- }
- case <-ctx.Done():
- return ctx.Err()
- }
- }
-}
-
-func (st *Stream) streamKVs(ctx context.Context) error {
- var count int
- var bytesSent uint64
- t := time.NewTicker(time.Second)
- defer t.Stop()
- now := time.Now()
-
- slurp := func(batch *pb.KVList) error {
- loop:
- for {
- select {
- case kvs, ok := <-st.kvChan:
- if !ok {
- break loop
- }
- y.AssertTrue(kvs != nil)
- batch.Kv = append(batch.Kv, kvs.Kv...)
- default:
- break loop
- }
- }
- sz := uint64(batch.Size())
- bytesSent += sz
- count += len(batch.Kv)
- t := time.Now()
- if err := st.Send(batch); err != nil {
- return err
- }
- st.db.opt.Infof("%s Created batch of size: %s in %s.\n",
- st.LogPrefix, humanize.Bytes(sz), time.Since(t))
- return nil
- }
-
-outer:
- for {
- var batch *pb.KVList
- select {
- case <-ctx.Done():
- return ctx.Err()
-
- case <-t.C:
- dur := time.Since(now)
- durSec := uint64(dur.Seconds())
- if durSec == 0 {
- continue
- }
- speed := bytesSent / durSec
- st.db.opt.Infof("%s Time elapsed: %s, bytes sent: %s, speed: %s/sec\n", st.LogPrefix,
- y.FixedDuration(dur), humanize.Bytes(bytesSent), humanize.Bytes(speed))
-
- case kvs, ok := <-st.kvChan:
- if !ok {
- break outer
- }
- y.AssertTrue(kvs != nil)
- batch = kvs
- if err := slurp(batch); err != nil {
- return err
- }
- }
- }
-
- st.db.opt.Infof("%s Sent %d keys\n", st.LogPrefix, count)
- return nil
-}
-
-// Orchestrate runs Stream. It picks up ranges from the SSTables, then runs NumGo number of
-// goroutines to iterate over these ranges and batch up KVs in lists. It concurrently runs a single
-// goroutine to pick these lists, batch them up further and send to Output.Send. Orchestrate also
-// spits logs out to Infof, using provided LogPrefix. Note that all calls to Output.Send
-// are serial. In case any of these steps encounter an error, Orchestrate would stop execution and
-// return that error. Orchestrate can be called multiple times, but in serial order.
-func (st *Stream) Orchestrate(ctx context.Context) error {
- st.rangeCh = make(chan keyRange, 3) // Contains keys for posting lists.
-
- // kvChan should only have a small capacity to ensure that we don't buffer up too much data if
- // sending is slow. Page size is set to 4MB, which is used to lazily cap the size of each
- // KVList. To get 128MB buffer, we can set the channel size to 32.
- st.kvChan = make(chan *pb.KVList, 32)
-
- if st.KeyToList == nil {
- st.KeyToList = st.ToList
- }
-
- // Picks up ranges from Badger, and sends them to rangeCh.
- go st.produceRanges(ctx)
-
- errCh := make(chan error, 1) // Stores error by consumeKeys.
- var wg sync.WaitGroup
- for i := 0; i < st.NumGo; i++ {
- wg.Add(1)
- go func() {
- defer wg.Done()
- // Picks up ranges from rangeCh, generates KV lists, and sends them to kvChan.
- if err := st.produceKVs(ctx); err != nil {
- select {
- case errCh <- err:
- default:
- }
- }
- }()
- }
-
- // Pick up key-values from kvChan and send to stream.
- kvErr := make(chan error, 1)
- go func() {
- // Picks up KV lists from kvChan, and sends them to Output.
- kvErr <- st.streamKVs(ctx)
- }()
- wg.Wait() // Wait for produceKVs to be over.
- close(st.kvChan) // Now we can close kvChan.
-
- select {
- case err := <-errCh: // Check error from produceKVs.
- return err
- default:
- }
-
- // Wait for key streaming to be over.
- err := <-kvErr
- return err
-}
-
-func (db *DB) newStream() *Stream {
- return &Stream{db: db, NumGo: 16, LogPrefix: "Badger.Stream"}
-}
-
-// NewStream creates a new Stream.
-func (db *DB) NewStream() *Stream {
- if db.opt.managedTxns {
- panic("This API can not be called in managed mode.")
- }
- return db.newStream()
-}
-
-// NewStreamAt creates a new Stream at a particular timestamp. Should only be used with managed DB.
-func (db *DB) NewStreamAt(readTs uint64) *Stream {
- if !db.opt.managedTxns {
- panic("This API can only be called in managed mode.")
- }
- stream := db.newStream()
- stream.readTs = readTs
- return stream
-}
diff --git a/vendor/github.com/dgraph-io/badger/stream_writer.go b/vendor/github.com/dgraph-io/badger/stream_writer.go
deleted file mode 100644
index 3d2a7992..00000000
--- a/vendor/github.com/dgraph-io/badger/stream_writer.go
+++ /dev/null
@@ -1,358 +0,0 @@
-/*
- * Copyright 2019 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package badger
-
-import (
- "math"
-
- "github.com/dgraph-io/badger/pb"
- "github.com/dgraph-io/badger/table"
- "github.com/dgraph-io/badger/y"
- humanize "github.com/dustin/go-humanize"
- "github.com/pkg/errors"
-)
-
-const headStreamId uint32 = math.MaxUint32
-
-// StreamWriter is used to write data coming from multiple streams. The streams must not have any
-// overlapping key ranges. Within each stream, the keys must be sorted. Badger Stream framework is
-// capable of generating such an output. So, this StreamWriter can be used at the other end to build
-// BadgerDB at a much faster pace by writing SSTables (and value logs) directly to LSM tree levels
-// without causing any compactions at all. This is way faster than using batched writer or using
-// transactions, but only applicable in situations where the keys are pre-sorted and the DB is being
-// bootstrapped. Existing data would get deleted when using this writer. So, this is only useful
-// when restoring from backup or replicating DB across servers.
-//
-// StreamWriter should not be called on in-use DB instances. It is designed only to bootstrap new
-// DBs.
-type StreamWriter struct {
- db *DB
- done func()
- throttle *y.Throttle
- maxVersion uint64
- writers map[uint32]*sortedWriter
- closer *y.Closer
-}
-
-// NewStreamWriter creates a StreamWriter. Right after creating StreamWriter, Prepare must be
-// called. The memory usage of a StreamWriter is directly proportional to the number of streams
-// possible. So, efforts must be made to keep the number of streams low. Stream framework would
-// typically use 16 goroutines and hence create 16 streams.
-func (db *DB) NewStreamWriter() *StreamWriter {
- return &StreamWriter{
- db: db,
- // throttle shouldn't make much difference. Memory consumption is based on the number of
- // concurrent streams being processed.
- throttle: y.NewThrottle(16),
- writers: make(map[uint32]*sortedWriter),
- closer: y.NewCloser(0),
- }
-}
-
-// Prepare should be called before writing any entry to StreamWriter. It deletes all data present in
-// existing DB, stops compactions and any writes being done by other means. Be very careful when
-// calling Prepare, because it could result in permanent data loss. Not calling Prepare would result
-// in a corrupt Badger instance.
-func (sw *StreamWriter) Prepare() error {
- var err error
- sw.done, err = sw.db.dropAll()
- return err
-}
-
-// Write writes KVList to DB. Each KV within the list contains the stream id which StreamWriter
-// would use to demux the writes. Write is not thread safe and it should NOT be called concurrently.
-func (sw *StreamWriter) Write(kvs *pb.KVList) error {
- if len(kvs.GetKv()) == 0 {
- return nil
- }
- streamReqs := make(map[uint32]*request)
- for _, kv := range kvs.Kv {
- var meta, userMeta byte
- if len(kv.Meta) > 0 {
- meta = kv.Meta[0]
- }
- if len(kv.UserMeta) > 0 {
- userMeta = kv.UserMeta[0]
- }
- if sw.maxVersion < kv.Version {
- sw.maxVersion = kv.Version
- }
- e := &Entry{
- Key: y.KeyWithTs(kv.Key, kv.Version),
- Value: kv.Value,
- UserMeta: userMeta,
- ExpiresAt: kv.ExpiresAt,
- meta: meta,
- }
- // If the value can be colocated with the key in LSM tree, we can skip
- // writing the value to value log.
- e.skipVlog = sw.db.shouldWriteValueToLSM(*e)
- req := streamReqs[kv.StreamId]
- if req == nil {
- req = &request{}
- streamReqs[kv.StreamId] = req
- }
- req.Entries = append(req.Entries, e)
- }
- var all []*request
- for _, req := range streamReqs {
- all = append(all, req)
- }
- if err := sw.db.vlog.write(all); err != nil {
- return err
- }
-
- for streamId, req := range streamReqs {
- writer, ok := sw.writers[streamId]
- if !ok {
- writer = sw.newWriter(streamId)
- sw.writers[streamId] = writer
- }
- writer.reqCh <- req
- }
- return nil
-}
-
-// Flush is called once we are done writing all the entries. It syncs DB directories. It also
-// updates Oracle with maxVersion found in all entries (if DB is not managed).
-func (sw *StreamWriter) Flush() error {
- defer sw.done()
-
- sw.closer.SignalAndWait()
- var maxHead valuePointer
- for _, writer := range sw.writers {
- if err := writer.Done(); err != nil {
- return err
- }
- if maxHead.Less(writer.head) {
- maxHead = writer.head
- }
- }
-
- // Encode and write the value log head into a new table.
- data := make([]byte, vptrSize)
- maxHead.Encode(data)
- headWriter := sw.newWriter(headStreamId)
- if err := headWriter.Add(
- y.KeyWithTs(head, sw.maxVersion),
- y.ValueStruct{Value: data}); err != nil {
- return err
- }
- if err := headWriter.Done(); err != nil {
- return err
- }
-
- if !sw.db.opt.managedTxns {
- if sw.db.orc != nil {
- sw.db.orc.Stop()
- }
- sw.db.orc = newOracle(sw.db.opt)
- sw.db.orc.nextTxnTs = sw.maxVersion
- sw.db.orc.txnMark.Done(sw.maxVersion)
- sw.db.orc.readMark.Done(sw.maxVersion)
- sw.db.orc.incrementNextTs()
- }
-
- // Wait for all files to be written.
- if err := sw.throttle.Finish(); err != nil {
- return err
- }
-
- // Now sync the directories, so all the files are registered.
- if sw.db.opt.ValueDir != sw.db.opt.Dir {
- if err := syncDir(sw.db.opt.ValueDir); err != nil {
- return err
- }
- }
- if err := syncDir(sw.db.opt.Dir); err != nil {
- return err
- }
- return sw.db.lc.validate()
-}
-
-type sortedWriter struct {
- db *DB
- throttle *y.Throttle
-
- builder *table.Builder
- lastKey []byte
- streamId uint32
- reqCh chan *request
- head valuePointer
-}
-
-func (sw *StreamWriter) newWriter(streamId uint32) *sortedWriter {
- w := &sortedWriter{
- db: sw.db,
- streamId: streamId,
- throttle: sw.throttle,
- builder: table.NewTableBuilder(),
- reqCh: make(chan *request, 3),
- }
- sw.closer.AddRunning(1)
- go w.handleRequests(sw.closer)
- return w
-}
-
-// ErrUnsortedKey is returned when any out of order key arrives at sortedWriter during call to Add.
-var ErrUnsortedKey = errors.New("Keys not in sorted order")
-
-func (w *sortedWriter) handleRequests(closer *y.Closer) {
- defer closer.Done()
-
- process := func(req *request) {
- for i, e := range req.Entries {
- vptr := req.Ptrs[i]
- if !vptr.IsZero() {
- y.AssertTrue(w.head.Less(vptr))
- w.head = vptr
- }
-
- var vs y.ValueStruct
- if e.skipVlog {
- vs = y.ValueStruct{
- Value: e.Value,
- Meta: e.meta,
- UserMeta: e.UserMeta,
- ExpiresAt: e.ExpiresAt,
- }
- } else {
- vbuf := make([]byte, vptrSize)
- vs = y.ValueStruct{
- Value: vptr.Encode(vbuf),
- Meta: e.meta | bitValuePointer,
- UserMeta: e.UserMeta,
- ExpiresAt: e.ExpiresAt,
- }
- }
- if err := w.Add(e.Key, vs); err != nil {
- panic(err)
- }
- }
- }
-
- for {
- select {
- case req := <-w.reqCh:
- process(req)
- case <-closer.HasBeenClosed():
- close(w.reqCh)
- for req := range w.reqCh {
- process(req)
- }
- return
- }
- }
-}
-
-// Add adds key and vs to sortedWriter.
-func (w *sortedWriter) Add(key []byte, vs y.ValueStruct) error {
- if len(w.lastKey) > 0 && y.CompareKeys(key, w.lastKey) <= 0 {
- return ErrUnsortedKey
- }
-
- sameKey := y.SameKey(key, w.lastKey)
- // Same keys should go into the same SSTable.
- if !sameKey && w.builder.ReachedCapacity(w.db.opt.MaxTableSize) {
- if err := w.send(); err != nil {
- return err
- }
- }
-
- w.lastKey = y.SafeCopy(w.lastKey, key)
- return w.builder.Add(key, vs)
-}
-
-func (w *sortedWriter) send() error {
- if err := w.throttle.Do(); err != nil {
- return err
- }
- go func(builder *table.Builder) {
- data := builder.Finish()
- err := w.createTable(data)
- w.throttle.Done(err)
- }(w.builder)
- w.builder = table.NewTableBuilder()
- return nil
-}
-
-// Done is called once we are done writing all keys and valueStructs
-// to sortedWriter. It completes writing current SST to disk.
-func (w *sortedWriter) Done() error {
- if w.builder.Empty() {
- return nil
- }
- return w.send()
-}
-
-func (w *sortedWriter) createTable(data []byte) error {
- if len(data) == 0 {
- return nil
- }
- fileID := w.db.lc.reserveFileID()
- fd, err := y.CreateSyncedFile(table.NewFilename(fileID, w.db.opt.Dir), true)
- if err != nil {
- return err
- }
- if _, err := fd.Write(data); err != nil {
- return err
- }
- tbl, err := table.OpenTable(fd, w.db.opt.TableLoadingMode, nil)
- if err != nil {
- return err
- }
- lc := w.db.lc
-
- var lhandler *levelHandler
- // We should start the levels from 1, because we need level 0 to set the !badger!head key. We
- // cannot mix up this key with other keys from the DB, otherwise we would introduce a range
- // overlap violation.
- y.AssertTrue(len(lc.levels) > 1)
- for _, l := range lc.levels[1:] {
- ratio := float64(l.getTotalSize()) / float64(l.maxTotalSize)
- if ratio < 1.0 {
- lhandler = l
- break
- }
- }
- if lhandler == nil {
- // If we're exceeding the size of the lowest level, shove it in the lowest level. Can't do
- // better than that.
- lhandler = lc.levels[len(lc.levels)-1]
- }
- if w.streamId == headStreamId {
- // This is a special !badger!head key. We should store it at level 0, separate from all the
- // other keys to avoid an overlap.
- lhandler = lc.levels[0]
- }
- // Now that table can be opened successfully, let's add this to the MANIFEST.
- change := &pb.ManifestChange{
- Id: tbl.ID(),
- Op: pb.ManifestChange_CREATE,
- Level: uint32(lhandler.level),
- Checksum: tbl.Checksum,
- }
- if err := w.db.manifest.addChanges([]*pb.ManifestChange{change}); err != nil {
- return err
- }
- if err := lhandler.replaceTables([]*table.Table{}, []*table.Table{tbl}); err != nil {
- return err
- }
- w.db.opt.Infof("Table created: %d at level: %d for stream: %d. Size: %s\n",
- fileID, lhandler.level, w.streamId, humanize.Bytes(uint64(tbl.Size())))
- return nil
-}
diff --git a/vendor/github.com/dgraph-io/badger/structs.go b/vendor/github.com/dgraph-io/badger/structs.go
deleted file mode 100644
index 51d16cdb..00000000
--- a/vendor/github.com/dgraph-io/badger/structs.go
+++ /dev/null
@@ -1,186 +0,0 @@
-package badger
-
-import (
- "bytes"
- "encoding/binary"
- "fmt"
- "hash/crc32"
- "time"
-
- "github.com/dgraph-io/badger/y"
-)
-
-type valuePointer struct {
- Fid uint32
- Len uint32
- Offset uint32
-}
-
-func (p valuePointer) Less(o valuePointer) bool {
- if p.Fid != o.Fid {
- return p.Fid < o.Fid
- }
- if p.Offset != o.Offset {
- return p.Offset < o.Offset
- }
- return p.Len < o.Len
-}
-
-func (p valuePointer) IsZero() bool {
- return p.Fid == 0 && p.Offset == 0 && p.Len == 0
-}
-
-const vptrSize = 12
-
-// Encode encodes Pointer into byte buffer.
-func (p valuePointer) Encode(b []byte) []byte {
- binary.BigEndian.PutUint32(b[:4], p.Fid)
- binary.BigEndian.PutUint32(b[4:8], p.Len)
- binary.BigEndian.PutUint32(b[8:12], p.Offset)
- return b[:vptrSize]
-}
-
-func (p *valuePointer) Decode(b []byte) {
- p.Fid = binary.BigEndian.Uint32(b[:4])
- p.Len = binary.BigEndian.Uint32(b[4:8])
- p.Offset = binary.BigEndian.Uint32(b[8:12])
-}
-
-// header is used in value log as a header before Entry.
-type header struct {
- klen uint32
- vlen uint32
- expiresAt uint64
- meta byte
- userMeta byte
-}
-
-const (
- headerBufSize = 18
-)
-
-func (h header) Encode(out []byte) {
- y.AssertTrue(len(out) >= headerBufSize)
- binary.BigEndian.PutUint32(out[0:4], h.klen)
- binary.BigEndian.PutUint32(out[4:8], h.vlen)
- binary.BigEndian.PutUint64(out[8:16], h.expiresAt)
- out[16] = h.meta
- out[17] = h.userMeta
-}
-
-// Decodes h from buf.
-func (h *header) Decode(buf []byte) {
- h.klen = binary.BigEndian.Uint32(buf[0:4])
- h.vlen = binary.BigEndian.Uint32(buf[4:8])
- h.expiresAt = binary.BigEndian.Uint64(buf[8:16])
- h.meta = buf[16]
- h.userMeta = buf[17]
-}
-
-// Entry provides Key, Value, UserMeta and ExpiresAt. This struct can be used by
-// the user to set data.
-type Entry struct {
- Key []byte
- Value []byte
- UserMeta byte
- ExpiresAt uint64 // time.Unix
- meta byte
-
- // Fields maintained internally.
- offset uint32
- skipVlog bool
-}
-
-func (e *Entry) estimateSize(threshold int) int {
- if len(e.Value) < threshold {
- return len(e.Key) + len(e.Value) + 2 // Meta, UserMeta
- }
- return len(e.Key) + 12 + 2 // 12 for ValuePointer, 2 for metas.
-}
-
-// Encodes e to buf. Returns number of bytes written.
-func encodeEntry(e *Entry, buf *bytes.Buffer) (int, error) {
- h := header{
- klen: uint32(len(e.Key)),
- vlen: uint32(len(e.Value)),
- expiresAt: e.ExpiresAt,
- meta: e.meta,
- userMeta: e.UserMeta,
- }
-
- var headerEnc [headerBufSize]byte
- h.Encode(headerEnc[:])
-
- hash := crc32.New(y.CastagnoliCrcTable)
-
- buf.Write(headerEnc[:])
- if _, err := hash.Write(headerEnc[:]); err != nil {
- return 0, err
- }
-
- buf.Write(e.Key)
- if _, err := hash.Write(e.Key); err != nil {
- return 0, err
- }
-
- buf.Write(e.Value)
- if _, err := hash.Write(e.Value); err != nil {
- return 0, err
- }
-
- var crcBuf [crc32.Size]byte
- binary.BigEndian.PutUint32(crcBuf[:], hash.Sum32())
- buf.Write(crcBuf[:])
-
- return len(headerEnc) + len(e.Key) + len(e.Value) + len(crcBuf), nil
-}
-
-func (e Entry) print(prefix string) {
- fmt.Printf("%s Key: %s Meta: %d UserMeta: %d Offset: %d len(val)=%d",
- prefix, e.Key, e.meta, e.UserMeta, e.offset, len(e.Value))
-}
-
-// NewEntry creates a new entry with key and value passed in args. This newly created entry can be
-// set in a transaction by calling txn.SetEntry(). All other properties of Entry can be set by
-// calling WithMeta, WithDiscard, WithTTL methods on it.
-// This function uses key and value reference, hence users must
-// not modify key and value until the end of transaction.
-func NewEntry(key, value []byte) *Entry {
- return &Entry{
- Key: key,
- Value: value,
- }
-}
-
-// WithMeta adds meta data to Entry e. This byte is stored alongside the key
-// and can be used as an aid to interpret the value or store other contextual
-// bits corresponding to the key-value pair of entry.
-func (e *Entry) WithMeta(meta byte) *Entry {
- e.UserMeta = meta
- return e
-}
-
-// WithDiscard adds a marker to Entry e. This means all the previous versions of the key (of the
-// Entry) will be eligible for garbage collection.
-// This method is only useful if you have set a higher limit for options.NumVersionsToKeep. The
-// default setting is 1, in which case, this function doesn't add any more benefit. If however, you
-// have a higher setting for NumVersionsToKeep (in Dgraph, we set it to infinity), you can use this
-// method to indicate that all the older versions can be discarded and removed during compactions.
-func (e *Entry) WithDiscard() *Entry {
- e.meta = bitDiscardEarlierVersions
- return e
-}
-
-// WithTTL adds time to live duration to Entry e. Entry stored with a TTL would automatically expire
-// after the time has elapsed, and will be eligible for garbage collection.
-func (e *Entry) WithTTL(dur time.Duration) *Entry {
- e.ExpiresAt = uint64(time.Now().Add(dur).Unix())
- return e
-}
-
-// withMergeBit sets merge bit in entry's metadata. This
-// function is called by MergeOperator's Add method.
-func (e *Entry) withMergeBit() *Entry {
- e.meta = bitMergeEntry
- return e
-}
diff --git a/vendor/github.com/dgraph-io/badger/table/README.md b/vendor/github.com/dgraph-io/badger/table/README.md
deleted file mode 100644
index a784f126..00000000
--- a/vendor/github.com/dgraph-io/badger/table/README.md
+++ /dev/null
@@ -1,69 +0,0 @@
-Size of table is 122,173,606 bytes for all benchmarks.
-
-# BenchmarkRead
-```
-$ go test -bench ^BenchmarkRead$ -run ^$ -count 3
-goos: linux
-goarch: amd64
-pkg: github.com/dgraph-io/badger/table
-BenchmarkRead-16 10 153281932 ns/op
-BenchmarkRead-16 10 153454443 ns/op
-BenchmarkRead-16 10 155349696 ns/op
-PASS
-ok github.com/dgraph-io/badger/table 23.549s
-```
-
-Size of table is 122,173,606 bytes, which is ~117MB.
-
-The rate is ~750MB/s using LoadToRAM (when table is in RAM).
-
-To read a 64MB table, this would take ~0.0853s, which is negligible.
-
-# BenchmarkReadAndBuild
-```go
-$ go test -bench BenchmarkReadAndBuild -run ^$ -count 3
-goos: linux
-goarch: amd64
-pkg: github.com/dgraph-io/badger/table
-BenchmarkReadAndBuild-16 2 945041628 ns/op
-BenchmarkReadAndBuild-16 2 947120893 ns/op
-BenchmarkReadAndBuild-16 2 954909506 ns/op
-PASS
-ok github.com/dgraph-io/badger/table 26.856s
-```
-
-The rate is ~122MB/s. To build a 64MB table, this would take ~0.52s. Note that this
-does NOT include the flushing of the table to disk. All we are doing above is
-reading one table (which is in RAM) and write one table in memory.
-
-The table building takes 0.52-0.0853s ~ 0.4347s.
-
-# BenchmarkReadMerged
-Below, we merge 5 tables. The total size remains unchanged at ~122M.
-
-```go
-$ go test -bench ReadMerged -run ^$ -count 3
-BenchmarkReadMerged-16 2 954475788 ns/op
-BenchmarkReadMerged-16 2 955252462 ns/op
-BenchmarkReadMerged-16 2 956857353 ns/op
-PASS
-ok github.com/dgraph-io/badger/table 33.327s
-```
-
-The rate is ~122MB/s. To read a 64MB table using merge iterator, this would take ~0.52s.
-
-# BenchmarkRandomRead
-
-```go
-go test -bench BenchmarkRandomRead$ -run ^$ -count 3
-goos: linux
-goarch: amd64
-pkg: github.com/dgraph-io/badger/table
-BenchmarkRandomRead-16 300000 3596 ns/op
-BenchmarkRandomRead-16 300000 3621 ns/op
-BenchmarkRandomRead-16 300000 3596 ns/op
-PASS
-ok github.com/dgraph-io/badger/table 44.727s
-```
-
-For random read benchmarking, we are randomly reading a key and verifying its value.
diff --git a/vendor/github.com/dgraph-io/badger/table/builder.go b/vendor/github.com/dgraph-io/badger/table/builder.go
deleted file mode 100644
index 0657cbca..00000000
--- a/vendor/github.com/dgraph-io/badger/table/builder.go
+++ /dev/null
@@ -1,237 +0,0 @@
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package table
-
-import (
- "bytes"
- "encoding/binary"
- "io"
- "math"
-
- "github.com/AndreasBriese/bbloom"
- "github.com/dgraph-io/badger/y"
-)
-
-var (
- restartInterval = 100 // Might want to change this to be based on total size instead of numKeys.
-)
-
-func newBuffer(sz int) *bytes.Buffer {
- b := new(bytes.Buffer)
- b.Grow(sz)
- return b
-}
-
-type header struct {
- plen uint16 // Overlap with base key.
- klen uint16 // Length of the diff.
- vlen uint16 // Length of value.
- prev uint32 // Offset for the previous key-value pair. The offset is relative to block base offset.
-}
-
-// Encode encodes the header.
-func (h header) Encode(b []byte) {
- binary.BigEndian.PutUint16(b[0:2], h.plen)
- binary.BigEndian.PutUint16(b[2:4], h.klen)
- binary.BigEndian.PutUint16(b[4:6], h.vlen)
- binary.BigEndian.PutUint32(b[6:10], h.prev)
-}
-
-// Decode decodes the header.
-func (h *header) Decode(buf []byte) int {
- h.plen = binary.BigEndian.Uint16(buf[0:2])
- h.klen = binary.BigEndian.Uint16(buf[2:4])
- h.vlen = binary.BigEndian.Uint16(buf[4:6])
- h.prev = binary.BigEndian.Uint32(buf[6:10])
- return h.Size()
-}
-
-// Size returns size of the header. Currently it's just a constant.
-func (h header) Size() int { return 10 }
-
-// Builder is used in building a table.
-type Builder struct {
- counter int // Number of keys written for the current block.
-
- // Typically tens or hundreds of meg. This is for one single file.
- buf *bytes.Buffer
-
- baseKey []byte // Base key for the current block.
- baseOffset uint32 // Offset for the current block.
-
- restarts []uint32 // Base offsets of every block.
-
- // Tracks offset for the previous key-value pair. Offset is relative to block base offset.
- prevOffset uint32
-
- keyBuf *bytes.Buffer
- keyCount int
-}
-
-// NewTableBuilder makes a new TableBuilder.
-func NewTableBuilder() *Builder {
- return &Builder{
- keyBuf: newBuffer(1 << 20),
- buf: newBuffer(1 << 20),
- prevOffset: math.MaxUint32, // Used for the first element!
- }
-}
-
-// Close closes the TableBuilder.
-func (b *Builder) Close() {}
-
-// Empty returns whether it's empty.
-func (b *Builder) Empty() bool { return b.buf.Len() == 0 }
-
-// keyDiff returns a suffix of newKey that is different from b.baseKey.
-func (b Builder) keyDiff(newKey []byte) []byte {
- var i int
- for i = 0; i < len(newKey) && i < len(b.baseKey); i++ {
- if newKey[i] != b.baseKey[i] {
- break
- }
- }
- return newKey[i:]
-}
-
-func (b *Builder) addHelper(key []byte, v y.ValueStruct) {
- // Add key to bloom filter.
- if len(key) > 0 {
- var klen [2]byte
- keyNoTs := y.ParseKey(key)
- binary.BigEndian.PutUint16(klen[:], uint16(len(keyNoTs)))
- b.keyBuf.Write(klen[:])
- b.keyBuf.Write(keyNoTs)
- b.keyCount++
- }
-
- // diffKey stores the difference of key with baseKey.
- var diffKey []byte
- if len(b.baseKey) == 0 {
- // Make a copy. Builder should not keep references. Otherwise, caller has to be very careful
- // and will have to make copies of keys every time they add to builder, which is even worse.
- b.baseKey = append(b.baseKey[:0], key...)
- diffKey = key
- } else {
- diffKey = b.keyDiff(key)
- }
-
- h := header{
- plen: uint16(len(key) - len(diffKey)),
- klen: uint16(len(diffKey)),
- vlen: uint16(v.EncodedSize()),
- prev: b.prevOffset, // prevOffset is the location of the last key-value added.
- }
- b.prevOffset = uint32(b.buf.Len()) - b.baseOffset // Remember current offset for the next Add call.
-
- // Layout: header, diffKey, value.
- var hbuf [10]byte
- h.Encode(hbuf[:])
- b.buf.Write(hbuf[:])
- b.buf.Write(diffKey) // We only need to store the key difference.
-
- v.EncodeTo(b.buf)
- b.counter++ // Increment number of keys added for this current block.
-}
-
-func (b *Builder) finishBlock() {
- // When we are at the end of the block and Valid=false, and the user wants to do a Prev,
- // we need a dummy header to tell us the offset of the previous key-value pair.
- b.addHelper([]byte{}, y.ValueStruct{})
-}
-
-// Add adds a key-value pair to the block.
-// If doNotRestart is true, we will not restart even if b.counter >= restartInterval.
-func (b *Builder) Add(key []byte, value y.ValueStruct) error {
- if b.counter >= restartInterval {
- b.finishBlock()
- // Start a new block. Initialize the block.
- b.restarts = append(b.restarts, uint32(b.buf.Len()))
- b.counter = 0
- b.baseKey = []byte{}
- b.baseOffset = uint32(b.buf.Len())
- b.prevOffset = math.MaxUint32 // First key-value pair of block has header.prev=MaxInt.
- }
- b.addHelper(key, value)
- return nil // Currently, there is no meaningful error.
-}
-
-// TODO: vvv this was the comment on ReachedCapacity.
-// FinalSize returns the *rough* final size of the array, counting the header which is
-// not yet written.
-// TODO: Look into why there is a discrepancy. I suspect it is because of Write(empty, empty)
-// at the end. The diff can vary.
-
-// ReachedCapacity returns true if we... roughly (?) reached capacity?
-func (b *Builder) ReachedCapacity(cap int64) bool {
- estimateSz := b.buf.Len() + 8 /* empty header */ + 4*len(b.restarts) +
- 8 /* 8 = end of buf offset + len(restarts) */
- return int64(estimateSz) > cap
-}
-
-// blockIndex generates the block index for the table.
-// It is mainly a list of all the block base offsets.
-func (b *Builder) blockIndex() []byte {
- // Store the end offset, so we know the length of the final block.
- b.restarts = append(b.restarts, uint32(b.buf.Len()))
-
- // Add 4 because we want to write out number of restarts at the end.
- sz := 4*len(b.restarts) + 4
- out := make([]byte, sz)
- buf := out
- for _, r := range b.restarts {
- binary.BigEndian.PutUint32(buf[:4], r)
- buf = buf[4:]
- }
- binary.BigEndian.PutUint32(buf[:4], uint32(len(b.restarts)))
- return out
-}
-
-// Finish finishes the table by appending the index.
-func (b *Builder) Finish() []byte {
- bf := bbloom.New(float64(b.keyCount), 0.01)
- var klen [2]byte
- key := make([]byte, 1024)
- for {
- if _, err := b.keyBuf.Read(klen[:]); err == io.EOF {
- break
- } else if err != nil {
- y.Check(err)
- }
- kl := int(binary.BigEndian.Uint16(klen[:]))
- if cap(key) < kl {
- key = make([]byte, 2*int(kl)) // 2 * uint16 will overflow
- }
- key = key[:kl]
- y.Check2(b.keyBuf.Read(key))
- bf.Add(key)
- }
-
- b.finishBlock() // This will never start a new block.
- index := b.blockIndex()
- b.buf.Write(index)
-
- // Write bloom filter.
- bdata := bf.JSONMarshal()
- n, err := b.buf.Write(bdata)
- y.Check(err)
- var buf [4]byte
- binary.BigEndian.PutUint32(buf[:], uint32(n))
- b.buf.Write(buf[:])
-
- return b.buf.Bytes()
-}
diff --git a/vendor/github.com/dgraph-io/badger/table/iterator.go b/vendor/github.com/dgraph-io/badger/table/iterator.go
deleted file mode 100644
index 0eb5ed01..00000000
--- a/vendor/github.com/dgraph-io/badger/table/iterator.go
+++ /dev/null
@@ -1,539 +0,0 @@
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package table
-
-import (
- "bytes"
- "io"
- "math"
- "sort"
-
- "github.com/dgraph-io/badger/y"
- "github.com/pkg/errors"
-)
-
-type blockIterator struct {
- data []byte
- pos uint32
- err error
- baseKey []byte
-
- key []byte
- val []byte
- init bool
-
- last header // The last header we saw.
-}
-
-func (itr *blockIterator) Reset() {
- itr.pos = 0
- itr.err = nil
- itr.baseKey = []byte{}
- itr.key = []byte{}
- itr.val = []byte{}
- itr.init = false
- itr.last = header{}
-}
-
-func (itr *blockIterator) Init() {
- if !itr.init {
- itr.Next()
- }
-}
-
-func (itr *blockIterator) Valid() bool {
- return itr != nil && itr.err == nil
-}
-
-func (itr *blockIterator) Error() error {
- return itr.err
-}
-
-func (itr *blockIterator) Close() {}
-
-var (
- origin = 0
- current = 1
-)
-
-// Seek brings us to the first block element that is >= input key.
-func (itr *blockIterator) Seek(key []byte, whence int) {
- itr.err = nil
-
- switch whence {
- case origin:
- itr.Reset()
- case current:
- }
-
- var done bool
- for itr.Init(); itr.Valid(); itr.Next() {
- k := itr.Key()
- if y.CompareKeys(k, key) >= 0 {
- // We are done as k is >= key.
- done = true
- break
- }
- }
- if !done {
- itr.err = io.EOF
- }
-}
-
-func (itr *blockIterator) SeekToFirst() {
- itr.err = nil
- itr.Init()
-}
-
-// SeekToLast brings us to the last element. Valid should return true.
-func (itr *blockIterator) SeekToLast() {
- itr.err = nil
- for itr.Init(); itr.Valid(); itr.Next() {
- }
- itr.Prev()
-}
-
-// parseKV would allocate a new byte slice for key and for value.
-func (itr *blockIterator) parseKV(h header) {
- if cap(itr.key) < int(h.plen+h.klen) {
- sz := int(h.plen) + int(h.klen) // Convert to int before adding to avoid uint16 overflow.
- itr.key = make([]byte, 2*sz)
- }
- itr.key = itr.key[:h.plen+h.klen]
- copy(itr.key, itr.baseKey[:h.plen])
- copy(itr.key[h.plen:], itr.data[itr.pos:itr.pos+uint32(h.klen)])
- itr.pos += uint32(h.klen)
-
- if itr.pos+uint32(h.vlen) > uint32(len(itr.data)) {
- itr.err = errors.Errorf("Value exceeded size of block: %d %d %d %d %v",
- itr.pos, h.klen, h.vlen, len(itr.data), h)
- return
- }
- itr.val = y.SafeCopy(itr.val, itr.data[itr.pos:itr.pos+uint32(h.vlen)])
- itr.pos += uint32(h.vlen)
-}
-
-func (itr *blockIterator) Next() {
- itr.init = true
- itr.err = nil
- if itr.pos >= uint32(len(itr.data)) {
- itr.err = io.EOF
- return
- }
-
- var h header
- itr.pos += uint32(h.Decode(itr.data[itr.pos:]))
- itr.last = h // Store the last header.
-
- if h.klen == 0 && h.plen == 0 {
- // Last entry in the table.
- itr.err = io.EOF
- return
- }
-
- // Populate baseKey if it isn't set yet. This would only happen for the first Next.
- if len(itr.baseKey) == 0 {
- // This should be the first Next() for this block. Hence, prefix length should be zero.
- y.AssertTrue(h.plen == 0)
- itr.baseKey = itr.data[itr.pos : itr.pos+uint32(h.klen)]
- }
- itr.parseKV(h)
-}
-
-func (itr *blockIterator) Prev() {
- if !itr.init {
- return
- }
- itr.err = nil
- if itr.last.prev == math.MaxUint32 {
- // This is the first element of the block!
- itr.err = io.EOF
- itr.pos = 0
- return
- }
-
- // Move back using current header's prev.
- itr.pos = itr.last.prev
-
- var h header
- y.AssertTruef(itr.pos < uint32(len(itr.data)), "%d %d", itr.pos, len(itr.data))
- itr.pos += uint32(h.Decode(itr.data[itr.pos:]))
- itr.parseKV(h)
- itr.last = h
-}
-
-func (itr *blockIterator) Key() []byte {
- if itr.err != nil {
- return nil
- }
- return itr.key
-}
-
-func (itr *blockIterator) Value() []byte {
- if itr.err != nil {
- return nil
- }
- return itr.val
-}
-
-// Iterator is an iterator for a Table.
-type Iterator struct {
- t *Table
- bpos int
- bi *blockIterator
- err error
-
- // Internally, Iterator is bidirectional. However, we only expose the
- // unidirectional functionality for now.
- reversed bool
-}
-
-// NewIterator returns a new iterator of the Table
-func (t *Table) NewIterator(reversed bool) *Iterator {
- t.IncrRef() // Important.
- ti := &Iterator{t: t, reversed: reversed}
- ti.next()
- return ti
-}
-
-// Close closes the iterator (and it must be called).
-func (itr *Iterator) Close() error {
- return itr.t.DecrRef()
-}
-
-func (itr *Iterator) reset() {
- itr.bpos = 0
- itr.err = nil
-}
-
-// Valid follows the y.Iterator interface
-func (itr *Iterator) Valid() bool {
- return itr.err == nil
-}
-
-func (itr *Iterator) seekToFirst() {
- numBlocks := len(itr.t.blockIndex)
- if numBlocks == 0 {
- itr.err = io.EOF
- return
- }
- itr.bpos = 0
- block, err := itr.t.block(itr.bpos)
- if err != nil {
- itr.err = err
- return
- }
- itr.bi = block.NewIterator()
- itr.bi.SeekToFirst()
- itr.err = itr.bi.Error()
-}
-
-func (itr *Iterator) seekToLast() {
- numBlocks := len(itr.t.blockIndex)
- if numBlocks == 0 {
- itr.err = io.EOF
- return
- }
- itr.bpos = numBlocks - 1
- block, err := itr.t.block(itr.bpos)
- if err != nil {
- itr.err = err
- return
- }
- itr.bi = block.NewIterator()
- itr.bi.SeekToLast()
- itr.err = itr.bi.Error()
-}
-
-func (itr *Iterator) seekHelper(blockIdx int, key []byte) {
- itr.bpos = blockIdx
- block, err := itr.t.block(blockIdx)
- if err != nil {
- itr.err = err
- return
- }
- itr.bi = block.NewIterator()
- itr.bi.Seek(key, origin)
- itr.err = itr.bi.Error()
-}
-
-// seekFrom brings us to a key that is >= input key.
-func (itr *Iterator) seekFrom(key []byte, whence int) {
- itr.err = nil
- switch whence {
- case origin:
- itr.reset()
- case current:
- }
-
- idx := sort.Search(len(itr.t.blockIndex), func(idx int) bool {
- ko := itr.t.blockIndex[idx]
- return y.CompareKeys(ko.key, key) > 0
- })
- if idx == 0 {
- // The smallest key in our table is already strictly > key. We can return that.
- // This is like a SeekToFirst.
- itr.seekHelper(0, key)
- return
- }
-
- // block[idx].smallest is > key.
- // Since idx>0, we know block[idx-1].smallest is <= key.
- // There are two cases.
- // 1) Everything in block[idx-1] is strictly < key. In this case, we should go to the first
- // element of block[idx].
- // 2) Some element in block[idx-1] is >= key. We should go to that element.
- itr.seekHelper(idx-1, key)
- if itr.err == io.EOF {
- // Case 1. Need to visit block[idx].
- if idx == len(itr.t.blockIndex) {
- // If idx == len(itr.t.blockIndex), then input key is greater than ANY element of table.
- // There's nothing we can do. Valid() should return false as we seek to end of table.
- return
- }
- // Since block[idx].smallest is > key. This is essentially a block[idx].SeekToFirst.
- itr.seekHelper(idx, key)
- }
- // Case 2: No need to do anything. We already did the seek in block[idx-1].
-}
-
-// seek will reset iterator and seek to >= key.
-func (itr *Iterator) seek(key []byte) {
- itr.seekFrom(key, origin)
-}
-
-// seekForPrev will reset iterator and seek to <= key.
-func (itr *Iterator) seekForPrev(key []byte) {
- // TODO: Optimize this. We shouldn't have to take a Prev step.
- itr.seekFrom(key, origin)
- if !bytes.Equal(itr.Key(), key) {
- itr.prev()
- }
-}
-
-func (itr *Iterator) next() {
- itr.err = nil
-
- if itr.bpos >= len(itr.t.blockIndex) {
- itr.err = io.EOF
- return
- }
-
- if itr.bi == nil {
- block, err := itr.t.block(itr.bpos)
- if err != nil {
- itr.err = err
- return
- }
- itr.bi = block.NewIterator()
- itr.bi.SeekToFirst()
- itr.err = itr.bi.Error()
- return
- }
-
- itr.bi.Next()
- if !itr.bi.Valid() {
- itr.bpos++
- itr.bi = nil
- itr.next()
- return
- }
-}
-
-func (itr *Iterator) prev() {
- itr.err = nil
- if itr.bpos < 0 {
- itr.err = io.EOF
- return
- }
-
- if itr.bi == nil {
- block, err := itr.t.block(itr.bpos)
- if err != nil {
- itr.err = err
- return
- }
- itr.bi = block.NewIterator()
- itr.bi.SeekToLast()
- itr.err = itr.bi.Error()
- return
- }
-
- itr.bi.Prev()
- if !itr.bi.Valid() {
- itr.bpos--
- itr.bi = nil
- itr.prev()
- return
- }
-}
-
-// Key follows the y.Iterator interface
-func (itr *Iterator) Key() []byte {
- return itr.bi.Key()
-}
-
-// Value follows the y.Iterator interface
-func (itr *Iterator) Value() (ret y.ValueStruct) {
- ret.Decode(itr.bi.Value())
- return
-}
-
-// Next follows the y.Iterator interface
-func (itr *Iterator) Next() {
- if !itr.reversed {
- itr.next()
- } else {
- itr.prev()
- }
-}
-
-// Rewind follows the y.Iterator interface
-func (itr *Iterator) Rewind() {
- if !itr.reversed {
- itr.seekToFirst()
- } else {
- itr.seekToLast()
- }
-}
-
-// Seek follows the y.Iterator interface
-func (itr *Iterator) Seek(key []byte) {
- if !itr.reversed {
- itr.seek(key)
- } else {
- itr.seekForPrev(key)
- }
-}
-
-// ConcatIterator concatenates the sequences defined by several iterators. (It only works with
-// TableIterators, probably just because it's faster to not be so generic.)
-type ConcatIterator struct {
- idx int // Which iterator is active now.
- cur *Iterator
- iters []*Iterator // Corresponds to tables.
- tables []*Table // Disregarding reversed, this is in ascending order.
- reversed bool
-}
-
-// NewConcatIterator creates a new concatenated iterator
-func NewConcatIterator(tbls []*Table, reversed bool) *ConcatIterator {
- iters := make([]*Iterator, len(tbls))
- for i := 0; i < len(tbls); i++ {
- iters[i] = tbls[i].NewIterator(reversed)
- }
- return &ConcatIterator{
- reversed: reversed,
- iters: iters,
- tables: tbls,
- idx: -1, // Not really necessary because s.it.Valid()=false, but good to have.
- }
-}
-
-func (s *ConcatIterator) setIdx(idx int) {
- s.idx = idx
- if idx < 0 || idx >= len(s.iters) {
- s.cur = nil
- } else {
- s.cur = s.iters[s.idx]
- }
-}
-
-// Rewind implements y.Interface
-func (s *ConcatIterator) Rewind() {
- if len(s.iters) == 0 {
- return
- }
- if !s.reversed {
- s.setIdx(0)
- } else {
- s.setIdx(len(s.iters) - 1)
- }
- s.cur.Rewind()
-}
-
-// Valid implements y.Interface
-func (s *ConcatIterator) Valid() bool {
- return s.cur != nil && s.cur.Valid()
-}
-
-// Key implements y.Interface
-func (s *ConcatIterator) Key() []byte {
- return s.cur.Key()
-}
-
-// Value implements y.Interface
-func (s *ConcatIterator) Value() y.ValueStruct {
- return s.cur.Value()
-}
-
-// Seek brings us to element >= key if reversed is false. Otherwise, <= key.
-func (s *ConcatIterator) Seek(key []byte) {
- var idx int
- if !s.reversed {
- idx = sort.Search(len(s.tables), func(i int) bool {
- return y.CompareKeys(s.tables[i].Biggest(), key) >= 0
- })
- } else {
- n := len(s.tables)
- idx = n - 1 - sort.Search(n, func(i int) bool {
- return y.CompareKeys(s.tables[n-1-i].Smallest(), key) <= 0
- })
- }
- if idx >= len(s.tables) || idx < 0 {
- s.setIdx(-1)
- return
- }
- // For reversed=false, we know s.tables[i-1].Biggest() < key. Thus, the
- // previous table cannot possibly contain key.
- s.setIdx(idx)
- s.cur.Seek(key)
-}
-
-// Next advances our concat iterator.
-func (s *ConcatIterator) Next() {
- s.cur.Next()
- if s.cur.Valid() {
- // Nothing to do. Just stay with the current table.
- return
- }
- for { // In case there are empty tables.
- if !s.reversed {
- s.setIdx(s.idx + 1)
- } else {
- s.setIdx(s.idx - 1)
- }
- if s.cur == nil {
- // End of list. Valid will become false.
- return
- }
- s.cur.Rewind()
- if s.cur.Valid() {
- break
- }
- }
-}
-
-// Close implements y.Interface.
-func (s *ConcatIterator) Close() error {
- for _, it := range s.iters {
- if err := it.Close(); err != nil {
- return errors.Wrap(err, "ConcatIterator")
- }
- }
- return nil
-}
diff --git a/vendor/github.com/dgraph-io/badger/table/table.go b/vendor/github.com/dgraph-io/badger/table/table.go
deleted file mode 100644
index 0a1f42d4..00000000
--- a/vendor/github.com/dgraph-io/badger/table/table.go
+++ /dev/null
@@ -1,360 +0,0 @@
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package table
-
-import (
- "bytes"
- "crypto/sha256"
- "encoding/binary"
- "fmt"
- "io"
- "os"
- "path"
- "path/filepath"
- "strconv"
- "strings"
- "sync"
- "sync/atomic"
-
- "github.com/AndreasBriese/bbloom"
- "github.com/dgraph-io/badger/options"
- "github.com/dgraph-io/badger/y"
- "github.com/pkg/errors"
-)
-
-const fileSuffix = ".sst"
-
-type keyOffset struct {
- key []byte
- offset int
- len int
-}
-
-// TableInterface is useful for testing.
-type TableInterface interface {
- Smallest() []byte
- Biggest() []byte
- DoesNotHave(key []byte) bool
-}
-
-// Table represents a loaded table file with the info we have about it
-type Table struct {
- sync.Mutex
-
- fd *os.File // Own fd.
- tableSize int // Initialized in OpenTable, using fd.Stat().
-
- blockIndex []keyOffset
- ref int32 // For file garbage collection. Atomic.
-
- loadingMode options.FileLoadingMode
- mmap []byte // Memory mapped.
-
- // The following are initialized once and const.
- smallest, biggest []byte // Smallest and largest keys.
- id uint64 // file id, part of filename
-
- bf bbloom.Bloom
-
- Checksum []byte
-}
-
-// IncrRef increments the refcount (having to do with whether the file should be deleted)
-func (t *Table) IncrRef() {
- atomic.AddInt32(&t.ref, 1)
-}
-
-// DecrRef decrements the refcount and possibly deletes the table
-func (t *Table) DecrRef() error {
- newRef := atomic.AddInt32(&t.ref, -1)
- if newRef == 0 {
- // We can safely delete this file, because for all the current files, we always have
- // at least one reference pointing to them.
-
- // It's necessary to delete windows files
- if t.loadingMode == options.MemoryMap {
- if err := y.Munmap(t.mmap); err != nil {
- return err
- }
- }
- if err := t.fd.Truncate(0); err != nil {
- // This is very important to let the FS know that the file is deleted.
- return err
- }
- filename := t.fd.Name()
- if err := t.fd.Close(); err != nil {
- return err
- }
- if err := os.Remove(filename); err != nil {
- return err
- }
- }
- return nil
-}
-
-type block struct {
- offset int
- data []byte
-}
-
-func (b block) NewIterator() *blockIterator {
- return &blockIterator{data: b.data}
-}
-
-// OpenTable assumes file has only one table and opens it. Takes ownership of fd upon function
-// entry. Returns a table with one reference count on it (decrementing which may delete the file!
-// -- consider t.Close() instead). The fd has to writeable because we call Truncate on it before
-// deleting.
-func OpenTable(fd *os.File, mode options.FileLoadingMode, cksum []byte) (*Table, error) {
- fileInfo, err := fd.Stat()
- if err != nil {
- // It's OK to ignore fd.Close() errs in this function because we have only read
- // from the file.
- _ = fd.Close()
- return nil, y.Wrap(err)
- }
-
- filename := fileInfo.Name()
- id, ok := ParseFileID(filename)
- if !ok {
- _ = fd.Close()
- return nil, errors.Errorf("Invalid filename: %s", filename)
- }
- t := &Table{
- fd: fd,
- ref: 1, // Caller is given one reference.
- id: id,
- loadingMode: mode,
- }
-
- t.tableSize = int(fileInfo.Size())
-
- // We first load to RAM, so we can read the index and do checksum.
- if err := t.loadToRAM(); err != nil {
- return nil, err
- }
- // Enforce checksum before we read index. Otherwise, if the file was
- // truncated, we'd end up with panics in readIndex.
- if len(cksum) > 0 && !bytes.Equal(t.Checksum, cksum) {
- return nil, fmt.Errorf(
- "CHECKSUM_MISMATCH: Table checksum does not match checksum in MANIFEST."+
- " NOT including table %s. This would lead to missing data."+
- "\n sha256 %x Expected\n sha256 %x Found\n", filename, cksum, t.Checksum)
- }
- if err := t.readIndex(); err != nil {
- return nil, y.Wrap(err)
- }
-
- it := t.NewIterator(false)
- defer it.Close()
- it.Rewind()
- if it.Valid() {
- t.smallest = it.Key()
- }
-
- it2 := t.NewIterator(true)
- defer it2.Close()
- it2.Rewind()
- if it2.Valid() {
- t.biggest = it2.Key()
- }
-
- switch mode {
- case options.LoadToRAM:
- // No need to do anything. t.mmap is already filled.
- case options.MemoryMap:
- t.mmap, err = y.Mmap(fd, false, fileInfo.Size())
- if err != nil {
- _ = fd.Close()
- return nil, y.Wrapf(err, "Unable to map file: %q", fileInfo.Name())
- }
- case options.FileIO:
- t.mmap = nil
- default:
- panic(fmt.Sprintf("Invalid loading mode: %v", mode))
- }
- return t, nil
-}
-
-// Close closes the open table. (Releases resources back to the OS.)
-func (t *Table) Close() error {
- if t.loadingMode == options.MemoryMap {
- if err := y.Munmap(t.mmap); err != nil {
- return err
- }
- }
-
- return t.fd.Close()
-}
-
-func (t *Table) read(off, sz int) ([]byte, error) {
- if len(t.mmap) > 0 {
- if len(t.mmap[off:]) < sz {
- return nil, y.ErrEOF
- }
- return t.mmap[off : off+sz], nil
- }
-
- res := make([]byte, sz)
- nbr, err := t.fd.ReadAt(res, int64(off))
- y.NumReads.Add(1)
- y.NumBytesRead.Add(int64(nbr))
- return res, err
-}
-
-func (t *Table) readNoFail(off, sz int) []byte {
- res, err := t.read(off, sz)
- y.Check(err)
- return res
-}
-
-func (t *Table) readIndex() error {
- if len(t.mmap) != t.tableSize {
- panic("Table size does not match the read bytes")
- }
- readPos := t.tableSize
-
- // Read bloom filter.
- readPos -= 4
- buf := t.readNoFail(readPos, 4)
- bloomLen := int(binary.BigEndian.Uint32(buf))
- readPos -= bloomLen
- data := t.readNoFail(readPos, bloomLen)
- t.bf = bbloom.JSONUnmarshal(data)
-
- readPos -= 4
- buf = t.readNoFail(readPos, 4)
- restartsLen := int(binary.BigEndian.Uint32(buf))
-
- readPos -= 4 * restartsLen
- buf = t.readNoFail(readPos, 4*restartsLen)
-
- offsets := make([]int, restartsLen)
- for i := 0; i < restartsLen; i++ {
- offsets[i] = int(binary.BigEndian.Uint32(buf[:4]))
- buf = buf[4:]
- }
-
- // The last offset stores the end of the last block.
- for i := 0; i < len(offsets); i++ {
- var o int
- if i == 0 {
- o = 0
- } else {
- o = offsets[i-1]
- }
-
- ko := keyOffset{
- offset: o,
- len: offsets[i] - o,
- }
- t.blockIndex = append(t.blockIndex, ko)
- }
-
- // Execute this index read serially, because we already have table data in memory.
- var h header
- for idx := range t.blockIndex {
- ko := &t.blockIndex[idx]
-
- hbuf := t.readNoFail(ko.offset, h.Size())
- h.Decode(hbuf)
- y.AssertTrue(h.plen == 0)
-
- key := t.readNoFail(ko.offset+len(hbuf), int(h.klen))
- ko.key = append([]byte{}, key...)
- }
-
- return nil
-}
-
-func (t *Table) block(idx int) (block, error) {
- y.AssertTruef(idx >= 0, "idx=%d", idx)
- if idx >= len(t.blockIndex) {
- return block{}, errors.New("block out of index")
- }
-
- ko := t.blockIndex[idx]
- blk := block{
- offset: ko.offset,
- }
- var err error
- blk.data, err = t.read(blk.offset, ko.len)
- return blk, err
-}
-
-// Size is its file size in bytes
-func (t *Table) Size() int64 { return int64(t.tableSize) }
-
-// Smallest is its smallest key, or nil if there are none
-func (t *Table) Smallest() []byte { return t.smallest }
-
-// Biggest is its biggest key, or nil if there are none
-func (t *Table) Biggest() []byte { return t.biggest }
-
-// Filename is NOT the file name. Just kidding, it is.
-func (t *Table) Filename() string { return t.fd.Name() }
-
-// ID is the table's ID number (used to make the file name).
-func (t *Table) ID() uint64 { return t.id }
-
-// DoesNotHave returns true if (but not "only if") the table does not have the key. It does a
-// bloom filter lookup.
-func (t *Table) DoesNotHave(key []byte) bool { return !t.bf.Has(key) }
-
-// ParseFileID reads the file id out of a filename.
-func ParseFileID(name string) (uint64, bool) {
- name = path.Base(name)
- if !strings.HasSuffix(name, fileSuffix) {
- return 0, false
- }
- // suffix := name[len(fileSuffix):]
- name = strings.TrimSuffix(name, fileSuffix)
- id, err := strconv.Atoi(name)
- if err != nil {
- return 0, false
- }
- y.AssertTrue(id >= 0)
- return uint64(id), true
-}
-
-// IDToFilename does the inverse of ParseFileID
-func IDToFilename(id uint64) string {
- return fmt.Sprintf("%06d", id) + fileSuffix
-}
-
-// NewFilename should be named TableFilepath -- it combines the dir with the ID to make a table
-// filepath.
-func NewFilename(id uint64, dir string) string {
- return filepath.Join(dir, IDToFilename(id))
-}
-
-func (t *Table) loadToRAM() error {
- if _, err := t.fd.Seek(0, io.SeekStart); err != nil {
- return err
- }
- t.mmap = make([]byte, t.tableSize)
- sum := sha256.New()
- tee := io.TeeReader(t.fd, sum)
- read, err := tee.Read(t.mmap)
- if err != nil || read != t.tableSize {
- return y.Wrapf(err, "Unable to load file in memory. Table file: %s", t.Filename())
- }
- t.Checksum = sum.Sum(nil)
- y.NumReads.Add(1)
- y.NumBytesRead.Add(int64(read))
- return nil
-}
diff --git a/vendor/github.com/dgraph-io/badger/test.sh b/vendor/github.com/dgraph-io/badger/test.sh
deleted file mode 100644
index 5b14bfd8..00000000
--- a/vendor/github.com/dgraph-io/badger/test.sh
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/bin/bash
-
-set -e
-
-# Ensure that we can compile the binary.
-pushd badger
-go build -v .
-popd
-
-# Run the memory intensive tests first.
-go test -v --manual=true -run='TestBigKeyValuePairs$'
-go test -v --manual=true -run='TestPushValueLogLimit'
-
-# Run the special Truncate test.
-rm -rf p
-go test -v --manual=true -run='TestTruncateVlogNoClose$' .
-truncate --size=4096 p/000000.vlog
-go test -v --manual=true -run='TestTruncateVlogNoClose2$' .
-go test -v --manual=true -run='TestTruncateVlogNoClose3$' .
-rm -rf p
-
-# Then the normal tests.
-echo
-echo "==> Starting tests with value log mmapped..."
-sleep 5
-go test -v --vlog_mmap=true -race ./...
-
-echo
-echo "==> Starting tests with value log not mmapped..."
-sleep 5
-go test -v --vlog_mmap=false -race ./...
diff --git a/vendor/github.com/dgraph-io/badger/txn.go b/vendor/github.com/dgraph-io/badger/txn.go
deleted file mode 100644
index 67411a8f..00000000
--- a/vendor/github.com/dgraph-io/badger/txn.go
+++ /dev/null
@@ -1,701 +0,0 @@
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package badger
-
-import (
- "bytes"
- "context"
- "encoding/hex"
- "math"
- "sort"
- "strconv"
- "sync"
- "sync/atomic"
-
- "github.com/dgraph-io/badger/y"
- farm "github.com/dgryski/go-farm"
- "github.com/pkg/errors"
-)
-
-type oracle struct {
- // A 64-bit integer must be at the top for memory alignment. See issue #311.
- refCount int64
- isManaged bool // Does not change value, so no locking required.
-
- sync.Mutex // For nextTxnTs and commits.
- // writeChLock lock is for ensuring that transactions go to the write
- // channel in the same order as their commit timestamps.
- writeChLock sync.Mutex
- nextTxnTs uint64
-
- // Used to block NewTransaction, so all previous commits are visible to a new read.
- txnMark *y.WaterMark
-
- // Either of these is used to determine which versions can be permanently
- // discarded during compaction.
- discardTs uint64 // Used by ManagedDB.
- readMark *y.WaterMark // Used by DB.
-
- // commits stores a key fingerprint and latest commit counter for it.
- // refCount is used to clear out commits map to avoid a memory blowup.
- commits map[uint64]uint64
-
- // closer is used to stop watermarks.
- closer *y.Closer
-}
-
-func newOracle(opt Options) *oracle {
- orc := &oracle{
- isManaged: opt.managedTxns,
- commits: make(map[uint64]uint64),
- // We're not initializing nextTxnTs and readOnlyTs. It would be done after replay in Open.
- //
- // WaterMarks must be 64-bit aligned for atomic package, hence we must use pointers here.
- // See https://golang.org/pkg/sync/atomic/#pkg-note-BUG.
- readMark: &y.WaterMark{Name: "badger.PendingReads"},
- txnMark: &y.WaterMark{Name: "badger.TxnTimestamp"},
- closer: y.NewCloser(2),
- }
- orc.readMark.Init(orc.closer)
- orc.txnMark.Init(orc.closer)
- return orc
-}
-
-func (o *oracle) Stop() {
- o.closer.SignalAndWait()
-}
-
-func (o *oracle) addRef() {
- atomic.AddInt64(&o.refCount, 1)
-}
-
-func (o *oracle) decrRef() {
- if atomic.AddInt64(&o.refCount, -1) != 0 {
- return
- }
-
- // Clear out commits maps to release memory.
- o.Lock()
- defer o.Unlock()
- // Avoids the race where something new is added to commitsMap
- // after we check refCount and before we take Lock.
- if atomic.LoadInt64(&o.refCount) != 0 {
- return
- }
- if len(o.commits) >= 1000 { // If the map is still small, let it slide.
- o.commits = make(map[uint64]uint64)
- }
-}
-
-func (o *oracle) readTs() uint64 {
- if o.isManaged {
- panic("ReadTs should not be retrieved for managed DB")
- }
-
- var readTs uint64
- o.Lock()
- readTs = o.nextTxnTs - 1
- o.readMark.Begin(readTs)
- o.Unlock()
-
- // Wait for all txns which have no conflicts, have been assigned a commit
- // timestamp and are going through the write to value log and LSM tree
- // process. Not waiting here could mean that some txns which have been
- // committed would not be read.
- y.Check(o.txnMark.WaitForMark(context.Background(), readTs))
- return readTs
-}
-
-func (o *oracle) nextTs() uint64 {
- o.Lock()
- defer o.Unlock()
- return o.nextTxnTs
-}
-
-func (o *oracle) incrementNextTs() {
- o.Lock()
- defer o.Unlock()
- o.nextTxnTs++
-}
-
-// Any deleted or invalid versions at or below ts would be discarded during
-// compaction to reclaim disk space in LSM tree and thence value log.
-func (o *oracle) setDiscardTs(ts uint64) {
- o.Lock()
- defer o.Unlock()
- o.discardTs = ts
-}
-
-func (o *oracle) discardAtOrBelow() uint64 {
- if o.isManaged {
- o.Lock()
- defer o.Unlock()
- return o.discardTs
- }
- return o.readMark.DoneUntil()
-}
-
-// hasConflict must be called while having a lock.
-func (o *oracle) hasConflict(txn *Txn) bool {
- if len(txn.reads) == 0 {
- return false
- }
- for _, ro := range txn.reads {
- // A commit at the read timestamp is expected.
- // But, any commit after the read timestamp should cause a conflict.
- if ts, has := o.commits[ro]; has && ts > txn.readTs {
- return true
- }
- }
- return false
-}
-
-func (o *oracle) newCommitTs(txn *Txn) uint64 {
- o.Lock()
- defer o.Unlock()
-
- if o.hasConflict(txn) {
- return 0
- }
-
- var ts uint64
- if !o.isManaged {
- // This is the general case, when user doesn't specify the read and commit ts.
- ts = o.nextTxnTs
- o.nextTxnTs++
- o.txnMark.Begin(ts)
-
- } else {
- // If commitTs is set, use it instead.
- ts = txn.commitTs
- }
-
- for _, w := range txn.writes {
- o.commits[w] = ts // Update the commitTs.
- }
- return ts
-}
-
-func (o *oracle) doneCommit(cts uint64) {
- if o.isManaged {
- // No need to update anything.
- return
- }
- o.txnMark.Done(cts)
-}
-
-// Txn represents a Badger transaction.
-type Txn struct {
- readTs uint64
- commitTs uint64
-
- update bool // update is used to conditionally keep track of reads.
- reads []uint64 // contains fingerprints of keys read.
- writes []uint64 // contains fingerprints of keys written.
-
- pendingWrites map[string]*Entry // cache stores any writes done by txn.
-
- db *DB
- discarded bool
-
- size int64
- count int64
- numIterators int32
-}
-
-type pendingWritesIterator struct {
- entries []*Entry
- nextIdx int
- readTs uint64
- reversed bool
-}
-
-func (pi *pendingWritesIterator) Next() {
- pi.nextIdx++
-}
-
-func (pi *pendingWritesIterator) Rewind() {
- pi.nextIdx = 0
-}
-
-func (pi *pendingWritesIterator) Seek(key []byte) {
- key = y.ParseKey(key)
- pi.nextIdx = sort.Search(len(pi.entries), func(idx int) bool {
- cmp := bytes.Compare(pi.entries[idx].Key, key)
- if !pi.reversed {
- return cmp >= 0
- }
- return cmp <= 0
- })
-}
-
-func (pi *pendingWritesIterator) Key() []byte {
- y.AssertTrue(pi.Valid())
- entry := pi.entries[pi.nextIdx]
- return y.KeyWithTs(entry.Key, pi.readTs)
-}
-
-func (pi *pendingWritesIterator) Value() y.ValueStruct {
- y.AssertTrue(pi.Valid())
- entry := pi.entries[pi.nextIdx]
- return y.ValueStruct{
- Value: entry.Value,
- Meta: entry.meta,
- UserMeta: entry.UserMeta,
- ExpiresAt: entry.ExpiresAt,
- Version: pi.readTs,
- }
-}
-
-func (pi *pendingWritesIterator) Valid() bool {
- return pi.nextIdx < len(pi.entries)
-}
-
-func (pi *pendingWritesIterator) Close() error {
- return nil
-}
-
-func (txn *Txn) newPendingWritesIterator(reversed bool) *pendingWritesIterator {
- if !txn.update || len(txn.pendingWrites) == 0 {
- return nil
- }
- entries := make([]*Entry, 0, len(txn.pendingWrites))
- for _, e := range txn.pendingWrites {
- entries = append(entries, e)
- }
- // Number of pending writes per transaction shouldn't be too big in general.
- sort.Slice(entries, func(i, j int) bool {
- cmp := bytes.Compare(entries[i].Key, entries[j].Key)
- if !reversed {
- return cmp < 0
- }
- return cmp > 0
- })
- return &pendingWritesIterator{
- readTs: txn.readTs,
- entries: entries,
- reversed: reversed,
- }
-}
-
-func (txn *Txn) checkSize(e *Entry) error {
- count := txn.count + 1
- // Extra bytes for version in key.
- size := txn.size + int64(e.estimateSize(txn.db.opt.ValueThreshold)) + 10
- if count >= txn.db.opt.maxBatchCount || size >= txn.db.opt.maxBatchSize {
- return ErrTxnTooBig
- }
- txn.count, txn.size = count, size
- return nil
-}
-
-func exceedsSize(prefix string, max int64, key []byte) error {
- return errors.Errorf("%s with size %d exceeded %d limit. %s:\n%s",
- prefix, len(key), max, prefix, hex.Dump(key[:1<<10]))
-}
-
-func (txn *Txn) modify(e *Entry) error {
- const maxKeySize = 65000
-
- switch {
- case !txn.update:
- return ErrReadOnlyTxn
- case txn.discarded:
- return ErrDiscardedTxn
- case len(e.Key) == 0:
- return ErrEmptyKey
- case bytes.HasPrefix(e.Key, badgerPrefix):
- return ErrInvalidKey
- case len(e.Key) > maxKeySize:
- // Key length can't be more than uint16, as determined by table::header. To
- // keep things safe and allow badger move prefix and a timestamp suffix, let's
- // cut it down to 65000, instead of using 65536.
- return exceedsSize("Key", maxKeySize, e.Key)
- case int64(len(e.Value)) > txn.db.opt.ValueLogFileSize:
- return exceedsSize("Value", txn.db.opt.ValueLogFileSize, e.Value)
- }
-
- if err := txn.checkSize(e); err != nil {
- return err
- }
- fp := farm.Fingerprint64(e.Key) // Avoid dealing with byte arrays.
- txn.writes = append(txn.writes, fp)
- txn.pendingWrites[string(e.Key)] = e
- return nil
-}
-
-// Set adds a key-value pair to the database.
-// It will return ErrReadOnlyTxn if update flag was set to false when creating the transaction.
-//
-// The current transaction keeps a reference to the key and val byte slice
-// arguments. Users must not modify key and val until the end of the transaction.
-func (txn *Txn) Set(key, val []byte) error {
- return txn.SetEntry(NewEntry(key, val))
-}
-
-// SetEntry takes an Entry struct and adds the key-value pair in the struct,
-// along with other metadata to the database.
-//
-// The current transaction keeps a reference to the entry passed in argument.
-// Users must not modify the entry until the end of the transaction.
-func (txn *Txn) SetEntry(e *Entry) error {
- return txn.modify(e)
-}
-
-// Delete deletes a key.
-//
-// This is done by adding a delete marker for the key at commit timestamp. Any
-// reads happening before this timestamp would be unaffected. Any reads after
-// this commit would see the deletion.
-//
-// The current transaction keeps a reference to the key byte slice argument.
-// Users must not modify the key until the end of the transaction.
-func (txn *Txn) Delete(key []byte) error {
- e := &Entry{
- Key: key,
- meta: bitDelete,
- }
- return txn.modify(e)
-}
-
-// Get looks for key and returns corresponding Item.
-// If key is not found, ErrKeyNotFound is returned.
-func (txn *Txn) Get(key []byte) (item *Item, rerr error) {
- if len(key) == 0 {
- return nil, ErrEmptyKey
- } else if txn.discarded {
- return nil, ErrDiscardedTxn
- }
-
- item = new(Item)
- if txn.update {
- if e, has := txn.pendingWrites[string(key)]; has && bytes.Equal(key, e.Key) {
- if isDeletedOrExpired(e.meta, e.ExpiresAt) {
- return nil, ErrKeyNotFound
- }
- // Fulfill from cache.
- item.meta = e.meta
- item.val = e.Value
- item.userMeta = e.UserMeta
- item.key = key
- item.status = prefetched
- item.version = txn.readTs
- item.expiresAt = e.ExpiresAt
- // We probably don't need to set db on item here.
- return item, nil
- }
- // Only track reads if this is update txn. No need to track read if txn serviced it
- // internally.
- txn.addReadKey(key)
- }
-
- seek := y.KeyWithTs(key, txn.readTs)
- vs, err := txn.db.get(seek)
- if err != nil {
- return nil, errors.Wrapf(err, "DB::Get key: %q", key)
- }
- if vs.Value == nil && vs.Meta == 0 {
- return nil, ErrKeyNotFound
- }
- if isDeletedOrExpired(vs.Meta, vs.ExpiresAt) {
- return nil, ErrKeyNotFound
- }
-
- item.key = key
- item.version = vs.Version
- item.meta = vs.Meta
- item.userMeta = vs.UserMeta
- item.db = txn.db
- item.vptr = vs.Value // TODO: Do we need to copy this over?
- item.txn = txn
- item.expiresAt = vs.ExpiresAt
- return item, nil
-}
-
-func (txn *Txn) addReadKey(key []byte) {
- if txn.update {
- fp := farm.Fingerprint64(key)
- txn.reads = append(txn.reads, fp)
- }
-}
-
-// Discard discards a created transaction. This method is very important and must be called. Commit
-// method calls this internally, however, calling this multiple times doesn't cause any issues. So,
-// this can safely be called via a defer right when transaction is created.
-//
-// NOTE: If any operations are run on a discarded transaction, ErrDiscardedTxn is returned.
-func (txn *Txn) Discard() {
- if txn.discarded { // Avoid a re-run.
- return
- }
- if atomic.LoadInt32(&txn.numIterators) > 0 {
- panic("Unclosed iterator at time of Txn.Discard.")
- }
- txn.discarded = true
- if !txn.db.orc.isManaged {
- txn.db.orc.readMark.Done(txn.readTs)
- }
- if txn.update {
- txn.db.orc.decrRef()
- }
-}
-
-func (txn *Txn) commitAndSend() (func() error, error) {
- orc := txn.db.orc
- // Ensure that the order in which we get the commit timestamp is the same as
- // the order in which we push these updates to the write channel. So, we
- // acquire a writeChLock before getting a commit timestamp, and only release
- // it after pushing the entries to it.
- orc.writeChLock.Lock()
- defer orc.writeChLock.Unlock()
-
- commitTs := orc.newCommitTs(txn)
- if commitTs == 0 {
- return nil, ErrConflict
- }
-
- // The following debug information is what led to determining the cause of
- // bank txn violation bug, and it took a whole bunch of effort to narrow it
- // down to here. So, keep this around for at least a couple of months.
- // var b strings.Builder
- // fmt.Fprintf(&b, "Read: %d. Commit: %d. reads: %v. writes: %v. Keys: ",
- // txn.readTs, commitTs, txn.reads, txn.writes)
- entries := make([]*Entry, 0, len(txn.pendingWrites)+1)
- for _, e := range txn.pendingWrites {
- // fmt.Fprintf(&b, "[%q : %q], ", e.Key, e.Value)
-
- // Suffix the keys with commit ts, so the key versions are sorted in
- // descending order of commit timestamp.
- e.Key = y.KeyWithTs(e.Key, commitTs)
- e.meta |= bitTxn
- entries = append(entries, e)
- }
- // log.Printf("%s\n", b.String())
- e := &Entry{
- Key: y.KeyWithTs(txnKey, commitTs),
- Value: []byte(strconv.FormatUint(commitTs, 10)),
- meta: bitFinTxn,
- }
- entries = append(entries, e)
-
- req, err := txn.db.sendToWriteCh(entries)
- if err != nil {
- orc.doneCommit(commitTs)
- return nil, err
- }
- ret := func() error {
- err := req.Wait()
- // Wait before marking commitTs as done.
- // We can't defer doneCommit above, because it is being called from a
- // callback here.
- orc.doneCommit(commitTs)
- return err
- }
- return ret, nil
-}
-
-func (txn *Txn) commitPrecheck() {
- if txn.commitTs == 0 && txn.db.opt.managedTxns {
- panic("Commit cannot be called with managedDB=true. Use CommitAt.")
- }
- if txn.discarded {
- panic("Trying to commit a discarded txn")
- }
-}
-
-// Commit commits the transaction, following these steps:
-//
-// 1. If there are no writes, return immediately.
-//
-// 2. Check if read rows were updated since txn started. If so, return ErrConflict.
-//
-// 3. If no conflict, generate a commit timestamp and update written rows' commit ts.
-//
-// 4. Batch up all writes, write them to value log and LSM tree.
-//
-// 5. If callback is provided, Badger will return immediately after checking
-// for conflicts. Writes to the database will happen in the background. If
-// there is a conflict, an error will be returned and the callback will not
-// run. If there are no conflicts, the callback will be called in the
-// background upon successful completion of writes or any error during write.
-//
-// If error is nil, the transaction is successfully committed. In case of a non-nil error, the LSM
-// tree won't be updated, so there's no need for any rollback.
-func (txn *Txn) Commit() error {
- txn.commitPrecheck() // Precheck before discarding txn.
- defer txn.Discard()
-
- if len(txn.writes) == 0 {
- return nil // Nothing to do.
- }
-
- txnCb, err := txn.commitAndSend()
- if err != nil {
- return err
- }
- // If batchSet failed, LSM would not have been updated. So, no need to rollback anything.
-
- // TODO: What if some of the txns successfully make it to value log, but others fail.
- // Nothing gets updated to LSM, until a restart happens.
- return txnCb()
-}
-
-type txnCb struct {
- commit func() error
- user func(error)
- err error
-}
-
-func runTxnCallback(cb *txnCb) {
- switch {
- case cb == nil:
- panic("txn callback is nil")
- case cb.user == nil:
- panic("Must have caught a nil callback for txn.CommitWith")
- case cb.err != nil:
- cb.user(cb.err)
- case cb.commit != nil:
- err := cb.commit()
- cb.user(err)
- default:
- cb.user(nil)
- }
-}
-
-// CommitWith acts like Commit, but takes a callback, which gets run via a
-// goroutine to avoid blocking this function. The callback is guaranteed to run,
-// so it is safe to increment sync.WaitGroup before calling CommitWith, and
-// decrementing it in the callback; to block until all callbacks are run.
-func (txn *Txn) CommitWith(cb func(error)) {
- txn.commitPrecheck() // Precheck before discarding txn.
- defer txn.Discard()
-
- if cb == nil {
- panic("Nil callback provided to CommitWith")
- }
-
- if len(txn.writes) == 0 {
- // Do not run these callbacks from here, because the CommitWith and the
- // callback might be acquiring the same locks. Instead run the callback
- // from another goroutine.
- go runTxnCallback(&txnCb{user: cb, err: nil})
- return
- }
-
- commitCb, err := txn.commitAndSend()
- if err != nil {
- go runTxnCallback(&txnCb{user: cb, err: err})
- return
- }
-
- go runTxnCallback(&txnCb{user: cb, commit: commitCb})
-}
-
-// ReadTs returns the read timestamp of the transaction.
-func (txn *Txn) ReadTs() uint64 {
- return txn.readTs
-}
-
-// NewTransaction creates a new transaction. Badger supports concurrent execution of transactions,
-// providing serializable snapshot isolation, avoiding write skews. Badger achieves this by tracking
-// the keys read and at Commit time, ensuring that these read keys weren't concurrently modified by
-// another transaction.
-//
-// For read-only transactions, set update to false. In this mode, we don't track the rows read for
-// any changes. Thus, any long running iterations done in this mode wouldn't pay this overhead.
-//
-// Running transactions concurrently is OK. However, a transaction itself isn't thread safe, and
-// should only be run serially. It doesn't matter if a transaction is created by one goroutine and
-// passed down to other, as long as the Txn APIs are called serially.
-//
-// When you create a new transaction, it is absolutely essential to call
-// Discard(). This should be done irrespective of what the update param is set
-// to. Commit API internally runs Discard, but running it twice wouldn't cause
-// any issues.
-//
-// txn := db.NewTransaction(false)
-// defer txn.Discard()
-// // Call various APIs.
-func (db *DB) NewTransaction(update bool) *Txn {
- return db.newTransaction(update, false)
-}
-
-func (db *DB) newTransaction(update, isManaged bool) *Txn {
- if db.opt.ReadOnly && update {
- // DB is read-only, force read-only transaction.
- update = false
- }
-
- txn := &Txn{
- update: update,
- db: db,
- count: 1, // One extra entry for BitFin.
- size: int64(len(txnKey) + 10), // Some buffer for the extra entry.
- }
- if update {
- txn.pendingWrites = make(map[string]*Entry)
- txn.db.orc.addRef()
- }
- // It is important that the oracle addRef happens BEFORE we retrieve a read
- // timestamp. Otherwise, it is possible that the oracle commit map would
- // become nil after we get the read timestamp.
- // The sequence of events can be:
- // 1. This txn gets a read timestamp.
- // 2. Another txn working on the same keyset commits them, and decrements
- // the reference to oracle.
- // 3. Oracle ref reaches zero, resetting commit map.
- // 4. This txn increments the oracle reference.
- // 5. Now this txn would go on to commit the keyset, and no conflicts
- // would be detected.
- // See issue: https://github.com/dgraph-io/badger/issues/574
- if !isManaged {
- txn.readTs = db.orc.readTs()
- }
- return txn
-}
-
-// View executes a function creating and managing a read-only transaction for the user. Error
-// returned by the function is relayed by the View method.
-// If View is used with managed transactions, it would assume a read timestamp of MaxUint64.
-func (db *DB) View(fn func(txn *Txn) error) error {
- var txn *Txn
- if db.opt.managedTxns {
- txn = db.NewTransactionAt(math.MaxUint64, false)
- } else {
- txn = db.NewTransaction(false)
- }
- defer txn.Discard()
-
- return fn(txn)
-}
-
-// Update executes a function, creating and managing a read-write transaction
-// for the user. Error returned by the function is relayed by the Update method.
-// Update cannot be used with managed transactions.
-func (db *DB) Update(fn func(txn *Txn) error) error {
- if db.opt.managedTxns {
- panic("Update can only be used with managedDB=false.")
- }
- txn := db.NewTransaction(true)
- defer txn.Discard()
-
- if err := fn(txn); err != nil {
- return err
- }
-
- return txn.Commit()
-}
diff --git a/vendor/github.com/dgraph-io/badger/util.go b/vendor/github.com/dgraph-io/badger/util.go
deleted file mode 100644
index c5173e26..00000000
--- a/vendor/github.com/dgraph-io/badger/util.go
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package badger
-
-import (
- "encoding/hex"
- "io/ioutil"
- "math/rand"
- "sync/atomic"
- "time"
-
- "github.com/dgraph-io/badger/table"
- "github.com/dgraph-io/badger/y"
- "github.com/pkg/errors"
-)
-
-func (s *levelsController) validate() error {
- for _, l := range s.levels {
- if err := l.validate(); err != nil {
- return errors.Wrap(err, "Levels Controller")
- }
- }
- return nil
-}
-
-// Check does some sanity check on one level of data or in-memory index.
-func (s *levelHandler) validate() error {
- if s.level == 0 {
- return nil
- }
-
- s.RLock()
- defer s.RUnlock()
- numTables := len(s.tables)
- for j := 1; j < numTables; j++ {
- if j >= len(s.tables) {
- return errors.Errorf("Level %d, j=%d numTables=%d", s.level, j, numTables)
- }
-
- if y.CompareKeys(s.tables[j-1].Biggest(), s.tables[j].Smallest()) >= 0 {
- return errors.Errorf(
- "Inter: Biggest(j-1) \n%s\n vs Smallest(j): \n%s\n: level=%d j=%d numTables=%d",
- hex.Dump(s.tables[j-1].Biggest()), hex.Dump(s.tables[j].Smallest()),
- s.level, j, numTables)
- }
-
- if y.CompareKeys(s.tables[j].Smallest(), s.tables[j].Biggest()) > 0 {
- return errors.Errorf(
- "Intra: %q vs %q: level=%d j=%d numTables=%d",
- s.tables[j].Smallest(), s.tables[j].Biggest(), s.level, j, numTables)
- }
- }
- return nil
-}
-
-// func (s *KV) debugPrintMore() { s.lc.debugPrintMore() }
-
-// // debugPrintMore shows key ranges of each level.
-// func (s *levelsController) debugPrintMore() {
-// s.Lock()
-// defer s.Unlock()
-// for i := 0; i < s.kv.opt.MaxLevels; i++ {
-// s.levels[i].debugPrintMore()
-// }
-// }
-
-// func (s *levelHandler) debugPrintMore() {
-// s.RLock()
-// defer s.RUnlock()
-// s.elog.Printf("Level %d:", s.level)
-// for _, t := range s.tables {
-// y.Printf(" [%s, %s]", t.Smallest(), t.Biggest())
-// }
-// y.Printf("\n")
-// }
-
-// reserveFileID reserves a unique file id.
-func (s *levelsController) reserveFileID() uint64 {
- id := atomic.AddUint64(&s.nextFileID, 1)
- return id - 1
-}
-
-func getIDMap(dir string) map[uint64]struct{} {
- fileInfos, err := ioutil.ReadDir(dir)
- y.Check(err)
- idMap := make(map[uint64]struct{})
- for _, info := range fileInfos {
- if info.IsDir() {
- continue
- }
- fileID, ok := table.ParseFileID(info.Name())
- if !ok {
- continue
- }
- idMap[fileID] = struct{}{}
- }
- return idMap
-}
-
-func init() {
- rand.Seed(time.Now().UnixNano())
-}
diff --git a/vendor/github.com/dgraph-io/badger/value.go b/vendor/github.com/dgraph-io/badger/value.go
deleted file mode 100644
index f57f1b3b..00000000
--- a/vendor/github.com/dgraph-io/badger/value.go
+++ /dev/null
@@ -1,1455 +0,0 @@
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package badger
-
-import (
- "bufio"
- "bytes"
- "encoding/binary"
- "encoding/json"
- "fmt"
- "hash/crc32"
- "io"
- "io/ioutil"
- "math"
- "math/rand"
- "os"
- "sort"
- "strconv"
- "strings"
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/dgraph-io/badger/options"
- "github.com/dgraph-io/badger/y"
- "github.com/pkg/errors"
- "golang.org/x/net/trace"
-)
-
-// Values have their first byte being byteData or byteDelete. This helps us distinguish between
-// a key that has never been seen and a key that has been explicitly deleted.
-const (
- bitDelete byte = 1 << 0 // Set if the key has been deleted.
- bitValuePointer byte = 1 << 1 // Set if the value is NOT stored directly next to key.
- bitDiscardEarlierVersions byte = 1 << 2 // Set if earlier versions can be discarded.
- // Set if item shouldn't be discarded via compactions (used by merge operator)
- bitMergeEntry byte = 1 << 3
- // The MSB 2 bits are for transactions.
- bitTxn byte = 1 << 6 // Set if the entry is part of a txn.
- bitFinTxn byte = 1 << 7 // Set if the entry is to indicate end of txn in value log.
-
- mi int64 = 1 << 20
-
- // The number of updates after which discard map should be flushed into badger.
- discardStatsFlushThreshold = 100
-)
-
-type logFile struct {
- path string
- // This is a lock on the log file. It guards the fd’s value, the file’s
- // existence and the file’s memory map.
- //
- // Use shared ownership when reading/writing the file or memory map, use
- // exclusive ownership to open/close the descriptor, unmap or remove the file.
- lock sync.RWMutex
- fd *os.File
- fid uint32
- fmap []byte
- size uint32
- loadingMode options.FileLoadingMode
-}
-
-// openReadOnly assumes that we have a write lock on logFile.
-func (lf *logFile) openReadOnly() error {
- var err error
- lf.fd, err = os.OpenFile(lf.path, os.O_RDONLY, 0666)
- if err != nil {
- return errors.Wrapf(err, "Unable to open %q as RDONLY.", lf.path)
- }
-
- fi, err := lf.fd.Stat()
- if err != nil {
- return errors.Wrapf(err, "Unable to check stat for %q", lf.path)
- }
- y.AssertTrue(fi.Size() <= math.MaxUint32)
- lf.size = uint32(fi.Size())
-
- if err = lf.mmap(fi.Size()); err != nil {
- _ = lf.fd.Close()
- return y.Wrapf(err, "Unable to map file: %q", fi.Name())
- }
-
- return nil
-}
-
-func (lf *logFile) mmap(size int64) (err error) {
- if lf.loadingMode != options.MemoryMap {
- // Nothing to do
- return nil
- }
- lf.fmap, err = y.Mmap(lf.fd, false, size)
- if err == nil {
- err = y.Madvise(lf.fmap, false) // Disable readahead
- }
- return err
-}
-
-func (lf *logFile) munmap() (err error) {
- if lf.loadingMode != options.MemoryMap {
- // Nothing to do
- return nil
- }
- if err := y.Munmap(lf.fmap); err != nil {
- return errors.Wrapf(err, "Unable to munmap value log: %q", lf.path)
- }
- return nil
-}
-
-// Acquire lock on mmap/file if you are calling this
-func (lf *logFile) read(p valuePointer, s *y.Slice) (buf []byte, err error) {
- var nbr int64
- offset := p.Offset
- if lf.loadingMode == options.FileIO {
- buf = s.Resize(int(p.Len))
- var n int
- n, err = lf.fd.ReadAt(buf, int64(offset))
- nbr = int64(n)
- } else {
- // Do not convert size to uint32, because the lf.fmap can be of size
- // 4GB, which overflows the uint32 during conversion to make the size 0,
- // causing the read to fail with ErrEOF. See issue #585.
- size := int64(len(lf.fmap))
- valsz := p.Len
- if int64(offset) >= size || int64(offset+valsz) > size {
- err = y.ErrEOF
- } else {
- buf = lf.fmap[offset : offset+valsz]
- nbr = int64(valsz)
- }
- }
- y.NumReads.Add(1)
- y.NumBytesRead.Add(nbr)
- return buf, err
-}
-
-func (lf *logFile) doneWriting(offset uint32) error {
- // Sync before acquiring lock. (We call this from write() and thus know we have shared access
- // to the fd.)
- if err := y.FileSync(lf.fd); err != nil {
- return errors.Wrapf(err, "Unable to sync value log: %q", lf.path)
- }
- // Close and reopen the file read-only. Acquire lock because fd will become invalid for a bit.
- // Acquiring the lock is bad because, while we don't hold the lock for a long time, it forces
- // one batch of readers wait for the preceding batch of readers to finish.
- //
- // If there's a benefit to reopening the file read-only, it might be on Windows. I don't know
- // what the benefit is. Consider keeping the file read-write, or use fcntl to change
- // permissions.
- lf.lock.Lock()
- defer lf.lock.Unlock()
- if err := lf.munmap(); err != nil {
- return err
- }
- // TODO: Confirm if we need to run a file sync after truncation.
- // Truncation must run after unmapping, otherwise Windows would crap itself.
- if err := lf.fd.Truncate(int64(offset)); err != nil {
- return errors.Wrapf(err, "Unable to truncate file: %q", lf.path)
- }
- if err := lf.fd.Close(); err != nil {
- return errors.Wrapf(err, "Unable to close value log: %q", lf.path)
- }
-
- return lf.openReadOnly()
-}
-
-// You must hold lf.lock to sync()
-func (lf *logFile) sync() error {
- return y.FileSync(lf.fd)
-}
-
-var errStop = errors.New("Stop iteration")
-var errTruncate = errors.New("Do truncate")
-var errDeleteVlogFile = errors.New("Delete vlog file")
-
-type logEntry func(e Entry, vp valuePointer) error
-
-type safeRead struct {
- k []byte
- v []byte
-
- recordOffset uint32
-}
-
-func (r *safeRead) Entry(reader *bufio.Reader) (*Entry, error) {
- var hbuf [headerBufSize]byte
- var err error
-
- hash := crc32.New(y.CastagnoliCrcTable)
- tee := io.TeeReader(reader, hash)
- if _, err = io.ReadFull(tee, hbuf[:]); err != nil {
- return nil, err
- }
-
- var h header
- h.Decode(hbuf[:])
- if h.klen > uint32(1<<16) { // Key length must be below uint16.
- return nil, errTruncate
- }
- kl := int(h.klen)
- if cap(r.k) < kl {
- r.k = make([]byte, 2*kl)
- }
- vl := int(h.vlen)
- if cap(r.v) < vl {
- r.v = make([]byte, 2*vl)
- }
-
- e := &Entry{}
- e.offset = r.recordOffset
- e.Key = r.k[:kl]
- e.Value = r.v[:vl]
-
- if _, err = io.ReadFull(tee, e.Key); err != nil {
- if err == io.EOF {
- err = errTruncate
- }
- return nil, err
- }
- if _, err = io.ReadFull(tee, e.Value); err != nil {
- if err == io.EOF {
- err = errTruncate
- }
- return nil, err
- }
- var crcBuf [4]byte
- if _, err = io.ReadFull(reader, crcBuf[:]); err != nil {
- if err == io.EOF {
- err = errTruncate
- }
- return nil, err
- }
- crc := binary.BigEndian.Uint32(crcBuf[:])
- if crc != hash.Sum32() {
- return nil, errTruncate
- }
- e.meta = h.meta
- e.UserMeta = h.userMeta
- e.ExpiresAt = h.expiresAt
- return e, nil
-}
-
-// iterate iterates over log file. It doesn't not allocate new memory for every kv pair.
-// Therefore, the kv pair is only valid for the duration of fn call.
-func (vlog *valueLog) iterate(lf *logFile, offset uint32, fn logEntry) (uint32, error) {
- fi, err := lf.fd.Stat()
- if err != nil {
- return 0, err
- }
- if int64(offset) == fi.Size() {
- // We're at the end of the file already. No need to do anything.
- return offset, nil
- }
- if vlog.opt.ReadOnly {
- // We're not at the end of the file. We'd need to replay the entries, or
- // possibly truncate the file.
- return 0, ErrReplayNeeded
- }
-
- // We're not at the end of the file. Let's Seek to the offset and start reading.
- if _, err := lf.fd.Seek(int64(offset), io.SeekStart); err != nil {
- return 0, errFile(err, lf.path, "Unable to seek")
- }
-
- reader := bufio.NewReader(lf.fd)
- read := &safeRead{
- k: make([]byte, 10),
- v: make([]byte, 10),
- recordOffset: offset,
- }
-
- var lastCommit uint64
- var validEndOffset uint32
- for {
- e, err := read.Entry(reader)
- if err == io.EOF {
- break
- } else if err == io.ErrUnexpectedEOF || err == errTruncate {
- break
- } else if err != nil {
- return 0, err
- } else if e == nil {
- continue
- }
-
- var vp valuePointer
- vp.Len = uint32(headerBufSize + len(e.Key) + len(e.Value) + crc32.Size)
- read.recordOffset += vp.Len
-
- vp.Offset = e.offset
- vp.Fid = lf.fid
-
- if e.meta&bitTxn > 0 {
- txnTs := y.ParseTs(e.Key)
- if lastCommit == 0 {
- lastCommit = txnTs
- }
- if lastCommit != txnTs {
- break
- }
-
- } else if e.meta&bitFinTxn > 0 {
- txnTs, err := strconv.ParseUint(string(e.Value), 10, 64)
- if err != nil || lastCommit != txnTs {
- break
- }
- // Got the end of txn. Now we can store them.
- lastCommit = 0
- validEndOffset = read.recordOffset
-
- } else {
- if lastCommit != 0 {
- // This is most likely an entry which was moved as part of GC.
- // We shouldn't get this entry in the middle of a transaction.
- break
- }
- validEndOffset = read.recordOffset
- }
-
- if err := fn(*e, vp); err != nil {
- if err == errStop {
- break
- }
- return 0, errFile(err, lf.path, "Iteration function")
- }
- }
- return validEndOffset, nil
-}
-
-func (vlog *valueLog) rewrite(f *logFile, tr trace.Trace) error {
- maxFid := atomic.LoadUint32(&vlog.maxFid)
- y.AssertTruef(uint32(f.fid) < maxFid, "fid to move: %d. Current max fid: %d", f.fid, maxFid)
- tr.LazyPrintf("Rewriting fid: %d", f.fid)
-
- wb := make([]*Entry, 0, 1000)
- var size int64
-
- y.AssertTrue(vlog.db != nil)
- var count, moved int
- fe := func(e Entry) error {
- count++
- if count%100000 == 0 {
- tr.LazyPrintf("Processing entry %d", count)
- }
-
- vs, err := vlog.db.get(e.Key)
- if err != nil {
- return err
- }
- if discardEntry(e, vs) {
- return nil
- }
-
- // Value is still present in value log.
- if len(vs.Value) == 0 {
- return errors.Errorf("Empty value: %+v", vs)
- }
- var vp valuePointer
- vp.Decode(vs.Value)
-
- if vp.Fid > f.fid {
- return nil
- }
- if vp.Offset > e.offset {
- return nil
- }
- if vp.Fid == f.fid && vp.Offset == e.offset {
- moved++
- // This new entry only contains the key, and a pointer to the value.
- ne := new(Entry)
- ne.meta = 0 // Remove all bits. Different keyspace doesn't need these bits.
- ne.UserMeta = e.UserMeta
-
- // Create a new key in a separate keyspace, prefixed by moveKey. We are not
- // allowed to rewrite an older version of key in the LSM tree, because then this older
- // version would be at the top of the LSM tree. To work correctly, reads expect the
- // latest versions to be at the top, and the older versions at the bottom.
- if bytes.HasPrefix(e.Key, badgerMove) {
- ne.Key = append([]byte{}, e.Key...)
- } else {
- ne.Key = make([]byte, len(badgerMove)+len(e.Key))
- n := copy(ne.Key, badgerMove)
- copy(ne.Key[n:], e.Key)
- }
-
- ne.Value = append([]byte{}, e.Value...)
- wb = append(wb, ne)
- size += int64(e.estimateSize(vlog.opt.ValueThreshold))
- if size >= 64*mi {
- tr.LazyPrintf("request has %d entries, size %d", len(wb), size)
- if err := vlog.db.batchSet(wb); err != nil {
- return err
- }
- size = 0
- wb = wb[:0]
- }
- } else {
- vlog.db.opt.Warningf("This entry should have been caught. %+v\n", e)
- }
- return nil
- }
-
- _, err := vlog.iterate(f, 0, func(e Entry, vp valuePointer) error {
- return fe(e)
- })
- if err != nil {
- return err
- }
-
- tr.LazyPrintf("request has %d entries, size %d", len(wb), size)
- batchSize := 1024
- var loops int
- for i := 0; i < len(wb); {
- loops++
- if batchSize == 0 {
- vlog.db.opt.Warningf("We shouldn't reach batch size of zero.")
- return ErrNoRewrite
- }
- end := i + batchSize
- if end > len(wb) {
- end = len(wb)
- }
- if err := vlog.db.batchSet(wb[i:end]); err != nil {
- if err == ErrTxnTooBig {
- // Decrease the batch size to half.
- batchSize = batchSize / 2
- tr.LazyPrintf("Dropped batch size to %d", batchSize)
- continue
- }
- return err
- }
- i += batchSize
- }
- tr.LazyPrintf("Processed %d entries in %d loops", len(wb), loops)
- tr.LazyPrintf("Total entries: %d. Moved: %d", count, moved)
- tr.LazyPrintf("Removing fid: %d", f.fid)
- var deleteFileNow bool
- // Entries written to LSM. Remove the older file now.
- {
- vlog.filesLock.Lock()
- // Just a sanity-check.
- if _, ok := vlog.filesMap[f.fid]; !ok {
- vlog.filesLock.Unlock()
- return errors.Errorf("Unable to find fid: %d", f.fid)
- }
- if vlog.iteratorCount() == 0 {
- delete(vlog.filesMap, f.fid)
- deleteFileNow = true
- } else {
- vlog.filesToBeDeleted = append(vlog.filesToBeDeleted, f.fid)
- }
- vlog.filesLock.Unlock()
- }
-
- if deleteFileNow {
- if err := vlog.deleteLogFile(f); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (vlog *valueLog) deleteMoveKeysFor(fid uint32, tr trace.Trace) error {
- db := vlog.db
- var result []*Entry
- var count, pointers uint64
- tr.LazyPrintf("Iterating over move keys to find invalids for fid: %d", fid)
- err := db.View(func(txn *Txn) error {
- opt := DefaultIteratorOptions
- opt.InternalAccess = true
- opt.PrefetchValues = false
- itr := txn.NewIterator(opt)
- defer itr.Close()
-
- for itr.Seek(badgerMove); itr.ValidForPrefix(badgerMove); itr.Next() {
- count++
- item := itr.Item()
- if item.meta&bitValuePointer == 0 {
- continue
- }
- pointers++
- var vp valuePointer
- vp.Decode(item.vptr)
- if vp.Fid == fid {
- e := &Entry{Key: y.KeyWithTs(item.Key(), item.Version()), meta: bitDelete}
- result = append(result, e)
- }
- }
- return nil
- })
- if err != nil {
- tr.LazyPrintf("Got error while iterating move keys: %v", err)
- tr.SetError()
- return err
- }
- tr.LazyPrintf("Num total move keys: %d. Num pointers: %d", count, pointers)
- tr.LazyPrintf("Number of invalid move keys found: %d", len(result))
- batchSize := 10240
- for i := 0; i < len(result); {
- end := i + batchSize
- if end > len(result) {
- end = len(result)
- }
- if err := db.batchSet(result[i:end]); err != nil {
- if err == ErrTxnTooBig {
- batchSize /= 2
- tr.LazyPrintf("Dropped batch size to %d", batchSize)
- continue
- }
- tr.LazyPrintf("Error while doing batchSet: %v", err)
- tr.SetError()
- return err
- }
- i += batchSize
- }
- tr.LazyPrintf("Move keys deletion done.")
- return nil
-}
-
-func (vlog *valueLog) incrIteratorCount() {
- atomic.AddInt32(&vlog.numActiveIterators, 1)
-}
-
-func (vlog *valueLog) iteratorCount() int {
- return int(atomic.LoadInt32(&vlog.numActiveIterators))
-}
-
-func (vlog *valueLog) decrIteratorCount() error {
- num := atomic.AddInt32(&vlog.numActiveIterators, -1)
- if num != 0 {
- return nil
- }
-
- vlog.filesLock.Lock()
- lfs := make([]*logFile, 0, len(vlog.filesToBeDeleted))
- for _, id := range vlog.filesToBeDeleted {
- lfs = append(lfs, vlog.filesMap[id])
- delete(vlog.filesMap, id)
- }
- vlog.filesToBeDeleted = nil
- vlog.filesLock.Unlock()
-
- for _, lf := range lfs {
- if err := vlog.deleteLogFile(lf); err != nil {
- return err
- }
- }
- return nil
-}
-
-func (vlog *valueLog) deleteLogFile(lf *logFile) error {
- if lf == nil {
- return nil
- }
- path := vlog.fpath(lf.fid)
- if err := lf.munmap(); err != nil {
- _ = lf.fd.Close()
- return err
- }
- if err := lf.fd.Close(); err != nil {
- return err
- }
- return os.Remove(path)
-}
-
-func (vlog *valueLog) dropAll() (int, error) {
- // We don't want to block dropAll on any pending transactions. So, don't worry about iterator
- // count.
- var count int
- deleteAll := func() error {
- vlog.filesLock.Lock()
- defer vlog.filesLock.Unlock()
- for _, lf := range vlog.filesMap {
- if err := vlog.deleteLogFile(lf); err != nil {
- return err
- }
- count++
- }
- vlog.filesMap = make(map[uint32]*logFile)
- return nil
- }
- if err := deleteAll(); err != nil {
- return count, err
- }
-
- vlog.db.opt.Infof("Value logs deleted. Creating value log file: 0")
- if _, err := vlog.createVlogFile(0); err != nil {
- return count, err
- }
- atomic.StoreUint32(&vlog.maxFid, 0)
- return count, nil
-}
-
-// lfDiscardStats keeps track of the amount of data that could be discarded for
-// a given logfile.
-type lfDiscardStats struct {
- sync.Mutex
- m map[uint32]int64
- updatesSinceFlush int
-}
-
-type valueLog struct {
- dirPath string
- elog trace.EventLog
-
- // guards our view of which files exist, which to be deleted, how many active iterators
- filesLock sync.RWMutex
- filesMap map[uint32]*logFile
- filesToBeDeleted []uint32
- // A refcount of iterators -- when this hits zero, we can delete the filesToBeDeleted.
- numActiveIterators int32
-
- db *DB
- maxFid uint32 // accessed via atomics.
- writableLogOffset uint32 // read by read, written by write. Must access via atomics.
- numEntriesWritten uint32
- opt Options
-
- garbageCh chan struct{}
- lfDiscardStats *lfDiscardStats
-}
-
-func vlogFilePath(dirPath string, fid uint32) string {
- return fmt.Sprintf("%s%s%06d.vlog", dirPath, string(os.PathSeparator), fid)
-}
-
-func (vlog *valueLog) fpath(fid uint32) string {
- return vlogFilePath(vlog.dirPath, fid)
-}
-
-func (vlog *valueLog) populateFilesMap() error {
- vlog.filesMap = make(map[uint32]*logFile)
-
- files, err := ioutil.ReadDir(vlog.dirPath)
- if err != nil {
- return errFile(err, vlog.dirPath, "Unable to open log dir.")
- }
-
- found := make(map[uint64]struct{})
- for _, file := range files {
- if !strings.HasSuffix(file.Name(), ".vlog") {
- continue
- }
- fsz := len(file.Name())
- fid, err := strconv.ParseUint(file.Name()[:fsz-5], 10, 32)
- if err != nil {
- return errFile(err, file.Name(), "Unable to parse log id.")
- }
- if _, ok := found[fid]; ok {
- return errFile(err, file.Name(), "Duplicate file found. Please delete one.")
- }
- found[fid] = struct{}{}
-
- lf := &logFile{
- fid: uint32(fid),
- path: vlog.fpath(uint32(fid)),
- loadingMode: vlog.opt.ValueLogLoadingMode,
- }
- vlog.filesMap[uint32(fid)] = lf
- if vlog.maxFid < uint32(fid) {
- vlog.maxFid = uint32(fid)
- }
- }
- return nil
-}
-
-func (vlog *valueLog) createVlogFile(fid uint32) (*logFile, error) {
- path := vlog.fpath(fid)
- lf := &logFile{
- fid: fid,
- path: path,
- loadingMode: vlog.opt.ValueLogLoadingMode,
- }
- // writableLogOffset is only written by write func, by read by Read func.
- // To avoid a race condition, all reads and updates to this variable must be
- // done via atomics.
- atomic.StoreUint32(&vlog.writableLogOffset, 0)
- vlog.numEntriesWritten = 0
-
- var err error
- if lf.fd, err = y.CreateSyncedFile(path, vlog.opt.SyncWrites); err != nil {
- return nil, errFile(err, lf.path, "Create value log file")
- }
- if err = syncDir(vlog.dirPath); err != nil {
- return nil, errFile(err, vlog.dirPath, "Sync value log dir")
- }
- if err = lf.mmap(2 * vlog.opt.ValueLogFileSize); err != nil {
- return nil, errFile(err, lf.path, "Mmap value log file")
- }
-
- vlog.filesLock.Lock()
- vlog.filesMap[fid] = lf
- vlog.filesLock.Unlock()
-
- return lf, nil
-}
-
-func errFile(err error, path string, msg string) error {
- return fmt.Errorf("%s. Path=%s. Error=%v", msg, path, err)
-}
-
-func (vlog *valueLog) replayLog(lf *logFile, offset uint32, replayFn logEntry) error {
- var err error
- mode := os.O_RDONLY
- if vlog.opt.Truncate {
- // We should open the file in RW mode, so it can be truncated.
- mode = os.O_RDWR
- }
- lf.fd, err = os.OpenFile(lf.path, mode, 0)
- if err != nil {
- return errFile(err, lf.path, "Open file")
- }
- defer lf.fd.Close()
-
- fi, err := lf.fd.Stat()
- if err != nil {
- return errFile(err, lf.path, "Unable to run file.Stat")
- }
-
- // Alright, let's iterate now.
- endOffset, err := vlog.iterate(lf, offset, replayFn)
- if err != nil {
- return errFile(err, lf.path, "Unable to replay logfile")
- }
- if int64(endOffset) == fi.Size() {
- return nil
- }
-
- // End offset is different from file size. So, we should truncate the file
- // to that size.
- y.AssertTrue(int64(endOffset) <= fi.Size())
- if !vlog.opt.Truncate {
- return ErrTruncateNeeded
- }
-
- // The entire file should be truncated (i.e. it should be deleted).
- // If fid == maxFid then it's okay to truncate the entire file since it will be
- // used for future additions. Also, it's okay if the last file has size zero.
- // We mmap 2*opt.ValueLogSize for the last file. See vlog.Open() function
- if endOffset == 0 && lf.fid != vlog.maxFid {
- return errDeleteVlogFile
- }
- if err := lf.fd.Truncate(int64(endOffset)); err != nil {
- return errFile(err, lf.path, fmt.Sprintf(
- "Truncation needed at offset %d. Can be done manually as well.", endOffset))
- }
- return nil
-}
-
-func (vlog *valueLog) open(db *DB, ptr valuePointer, replayFn logEntry) error {
- opt := db.opt
- vlog.opt = opt
- vlog.dirPath = opt.ValueDir
- vlog.db = db
- vlog.elog = trace.NewEventLog("Badger", "Valuelog")
- vlog.garbageCh = make(chan struct{}, 1) // Only allow one GC at a time.
- vlog.lfDiscardStats = &lfDiscardStats{m: make(map[uint32]int64)}
- if err := vlog.populateFilesMap(); err != nil {
- return err
- }
- // If no files are found, then create a new file.
- if len(vlog.filesMap) == 0 {
- _, err := vlog.createVlogFile(0)
- return err
- }
-
- fids := vlog.sortedFids()
- for _, fid := range fids {
- lf, ok := vlog.filesMap[fid]
- y.AssertTrue(ok)
-
- // This file is before the value head pointer. So, we don't need to
- // replay it, and can just open it in readonly mode.
- if fid < ptr.Fid {
- if err := lf.openReadOnly(); err != nil {
- return err
- }
- continue
- }
-
- var offset uint32
- if fid == ptr.Fid {
- offset = ptr.Offset + ptr.Len
- }
- vlog.db.opt.Infof("Replaying file id: %d at offset: %d\n", fid, offset)
- now := time.Now()
- // Replay and possible truncation done. Now we can open the file as per
- // user specified options.
- if err := vlog.replayLog(lf, offset, replayFn); err != nil {
- // Log file is corrupted. Delete it.
- if err == errDeleteVlogFile {
- delete(vlog.filesMap, fid)
- path := vlog.fpath(lf.fid)
- if err := os.Remove(path); err != nil {
- return y.Wrapf(err, "failed to delete empty value log file: %q", path)
- }
- continue
- }
- return err
- }
- vlog.db.opt.Infof("Replay took: %s\n", time.Since(now))
-
- if fid < vlog.maxFid {
- if err := lf.openReadOnly(); err != nil {
- return err
- }
- } else {
- var flags uint32
- switch {
- case vlog.opt.ReadOnly:
- // If we have read only, we don't need SyncWrites.
- flags |= y.ReadOnly
- case vlog.opt.SyncWrites:
- flags |= y.Sync
- }
- var err error
- if lf.fd, err = y.OpenExistingFile(vlog.fpath(fid), flags); err != nil {
- return errFile(err, lf.path, "Open existing file")
- }
- }
- }
-
- // Seek to the end to start writing.
- last, ok := vlog.filesMap[vlog.maxFid]
- y.AssertTrue(ok)
- lastOffset, err := last.fd.Seek(0, io.SeekEnd)
- if err != nil {
- return errFile(err, last.path, "file.Seek to end")
- }
- vlog.writableLogOffset = uint32(lastOffset)
-
- // Update the head to point to the updated tail. Otherwise, even after doing a successful
- // replay and closing the DB, the value log head does not get updated, which causes the replay
- // to happen repeatedly.
- vlog.db.vhead = valuePointer{Fid: vlog.maxFid, Offset: uint32(lastOffset)}
-
- // Map the file if needed. When we create a file, it is automatically mapped.
- if err = last.mmap(2 * opt.ValueLogFileSize); err != nil {
- return errFile(err, last.path, "Map log file")
- }
- if err := vlog.populateDiscardStats(); err != nil {
- return err
- }
- return nil
-}
-
-func (vlog *valueLog) Close() error {
- vlog.elog.Printf("Stopping garbage collection of values.")
- defer vlog.elog.Finish()
-
- var err error
- for id, f := range vlog.filesMap {
- f.lock.Lock() // We won’t release the lock.
- if munmapErr := f.munmap(); munmapErr != nil && err == nil {
- err = munmapErr
- }
-
- maxFid := atomic.LoadUint32(&vlog.maxFid)
- if !vlog.opt.ReadOnly && id == maxFid {
- // truncate writable log file to correct offset.
- if truncErr := f.fd.Truncate(
- int64(vlog.woffset())); truncErr != nil && err == nil {
- err = truncErr
- }
- }
-
- if closeErr := f.fd.Close(); closeErr != nil && err == nil {
- err = closeErr
- }
- }
- return err
-}
-
-// sortedFids returns the file id's not pending deletion, sorted. Assumes we have shared access to
-// filesMap.
-func (vlog *valueLog) sortedFids() []uint32 {
- toBeDeleted := make(map[uint32]struct{})
- for _, fid := range vlog.filesToBeDeleted {
- toBeDeleted[fid] = struct{}{}
- }
- ret := make([]uint32, 0, len(vlog.filesMap))
- for fid := range vlog.filesMap {
- if _, ok := toBeDeleted[fid]; !ok {
- ret = append(ret, fid)
- }
- }
- sort.Slice(ret, func(i, j int) bool {
- return ret[i] < ret[j]
- })
- return ret
-}
-
-type request struct {
- // Input values
- Entries []*Entry
- // Output values and wait group stuff below
- Ptrs []valuePointer
- Wg sync.WaitGroup
- Err error
- ref int32
-}
-
-func (req *request) IncrRef() {
- atomic.AddInt32(&req.ref, 1)
-}
-
-func (req *request) DecrRef() {
- nRef := atomic.AddInt32(&req.ref, -1)
- if nRef > 0 {
- return
- }
- req.Entries = nil
- requestPool.Put(req)
-}
-
-func (req *request) Wait() error {
- req.Wg.Wait()
- err := req.Err
- req.DecrRef() // DecrRef after writing to DB.
- return err
-}
-
-type requests []*request
-
-func (reqs requests) DecrRef() {
- for _, req := range reqs {
- req.DecrRef()
- }
-}
-
-// sync function syncs content of latest value log file to disk. Syncing of value log directory is
-// not required here as it happens every time a value log file rotation happens(check createVlogFile
-// function). During rotation, previous value log file also gets synced to disk. It only syncs file
-// if fid >= vlog.maxFid. In some cases such as replay(while openning db), it might be called with
-// fid < vlog.maxFid. To sync irrespective of file id just call it with math.MaxUint32.
-func (vlog *valueLog) sync(fid uint32) error {
- if vlog.opt.SyncWrites {
- return nil
- }
-
- vlog.filesLock.RLock()
- maxFid := atomic.LoadUint32(&vlog.maxFid)
- // During replay it is possible to get sync call with fid less than maxFid.
- // Because older file has already been synced, we can return from here.
- if fid < maxFid || len(vlog.filesMap) == 0 {
- vlog.filesLock.RUnlock()
- return nil
- }
- curlf := vlog.filesMap[maxFid]
- // Sometimes it is possible that vlog.maxFid has been increased but file creation
- // with same id is still in progress and this function is called. In those cases
- // entry for the file might not be present in vlog.filesMap.
- if curlf == nil {
- vlog.filesLock.RUnlock()
- return nil
- }
- curlf.lock.RLock()
- vlog.filesLock.RUnlock()
-
- err := curlf.sync()
- curlf.lock.RUnlock()
- return err
-}
-
-func (vlog *valueLog) woffset() uint32 {
- return atomic.LoadUint32(&vlog.writableLogOffset)
-}
-
-// write is thread-unsafe by design and should not be called concurrently.
-func (vlog *valueLog) write(reqs []*request) error {
- vlog.filesLock.RLock()
- maxFid := atomic.LoadUint32(&vlog.maxFid)
- curlf := vlog.filesMap[maxFid]
- vlog.filesLock.RUnlock()
-
- var buf bytes.Buffer
- toDisk := func() error {
- if buf.Len() == 0 {
- return nil
- }
- vlog.elog.Printf("Flushing %d blocks of total size: %d", len(reqs), buf.Len())
- n, err := curlf.fd.Write(buf.Bytes())
- if err != nil {
- return errors.Wrapf(err, "Unable to write to value log file: %q", curlf.path)
- }
- buf.Reset()
- y.NumWrites.Add(1)
- y.NumBytesWritten.Add(int64(n))
- vlog.elog.Printf("Done")
- atomic.AddUint32(&vlog.writableLogOffset, uint32(n))
-
- if vlog.woffset() > uint32(vlog.opt.ValueLogFileSize) ||
- vlog.numEntriesWritten > vlog.opt.ValueLogMaxEntries {
- var err error
- if err = curlf.doneWriting(vlog.woffset()); err != nil {
- return err
- }
-
- newid := atomic.AddUint32(&vlog.maxFid, 1)
- y.AssertTruef(newid > 0, "newid has overflown uint32: %v", newid)
- newlf, err := vlog.createVlogFile(newid)
- if err != nil {
- return err
- }
- curlf = newlf
- atomic.AddInt32(&vlog.db.logRotates, 1)
- }
- return nil
- }
-
- for i := range reqs {
- b := reqs[i]
- b.Ptrs = b.Ptrs[:0]
- var written int
- for j := range b.Entries {
- e := b.Entries[j]
- if e.skipVlog {
- b.Ptrs = append(b.Ptrs, valuePointer{})
- continue
- }
- var p valuePointer
-
- p.Fid = curlf.fid
- // Use the offset including buffer length so far.
- p.Offset = vlog.woffset() + uint32(buf.Len())
- plen, err := encodeEntry(e, &buf) // Now encode the entry into buffer.
- if err != nil {
- return err
- }
- p.Len = uint32(plen)
- b.Ptrs = append(b.Ptrs, p)
- written++
- }
- vlog.numEntriesWritten += uint32(written)
- // We write to disk here so that all entries that are part of the same transaction are
- // written to the same vlog file.
- writeNow :=
- vlog.woffset()+uint32(buf.Len()) > uint32(vlog.opt.ValueLogFileSize) ||
- vlog.numEntriesWritten > uint32(vlog.opt.ValueLogMaxEntries)
- if writeNow {
- if err := toDisk(); err != nil {
- return err
- }
- }
- }
- return toDisk()
-}
-
-// Gets the logFile and acquires and RLock() for the mmap. You must call RUnlock on the file
-// (if non-nil)
-func (vlog *valueLog) getFileRLocked(fid uint32) (*logFile, error) {
- vlog.filesLock.RLock()
- defer vlog.filesLock.RUnlock()
- ret, ok := vlog.filesMap[fid]
- if !ok {
- // log file has gone away, will need to retry the operation.
- return nil, ErrRetry
- }
- ret.lock.RLock()
- return ret, nil
-}
-
-// Read reads the value log at a given location.
-// TODO: Make this read private.
-func (vlog *valueLog) Read(vp valuePointer, s *y.Slice) ([]byte, func(), error) {
- // Check for valid offset if we are reading to writable log.
- maxFid := atomic.LoadUint32(&vlog.maxFid)
- if vp.Fid == maxFid && vp.Offset >= vlog.woffset() {
- return nil, nil, errors.Errorf(
- "Invalid value pointer offset: %d greater than current offset: %d",
- vp.Offset, vlog.woffset())
- }
-
- buf, cb, err := vlog.readValueBytes(vp, s)
- if err != nil {
- return nil, cb, err
- }
- var h header
- h.Decode(buf)
- n := uint32(headerBufSize) + h.klen
- return buf[n : n+h.vlen], cb, nil
-}
-
-func (vlog *valueLog) readValueBytes(vp valuePointer, s *y.Slice) ([]byte, func(), error) {
- lf, err := vlog.getFileRLocked(vp.Fid)
- if err != nil {
- return nil, nil, err
- }
-
- buf, err := lf.read(vp, s)
- if vlog.opt.ValueLogLoadingMode == options.MemoryMap {
- return buf, lf.lock.RUnlock, err
- }
- // If we are using File I/O we unlock the file immediately
- // and return an empty function as callback.
- lf.lock.RUnlock()
- return buf, nil, err
-}
-
-// Test helper
-func valueBytesToEntry(buf []byte) (e Entry) {
- var h header
- h.Decode(buf)
- n := uint32(headerBufSize)
-
- e.Key = buf[n : n+h.klen]
- n += h.klen
- e.meta = h.meta
- e.UserMeta = h.userMeta
- e.Value = buf[n : n+h.vlen]
- return
-}
-
-func (vlog *valueLog) pickLog(head valuePointer, tr trace.Trace) (files []*logFile) {
- vlog.filesLock.RLock()
- defer vlog.filesLock.RUnlock()
- fids := vlog.sortedFids()
- if len(fids) <= 1 {
- tr.LazyPrintf("Only one or less value log file.")
- return nil
- } else if head.Fid == 0 {
- tr.LazyPrintf("Head pointer is at zero.")
- return nil
- }
-
- // Pick a candidate that contains the largest amount of discardable data
- candidate := struct {
- fid uint32
- discard int64
- }{math.MaxUint32, 0}
- vlog.lfDiscardStats.Lock()
- for _, fid := range fids {
- if fid >= head.Fid {
- break
- }
- if vlog.lfDiscardStats.m[fid] > candidate.discard {
- candidate.fid = fid
- candidate.discard = vlog.lfDiscardStats.m[fid]
- }
- }
- vlog.lfDiscardStats.Unlock()
-
- if candidate.fid != math.MaxUint32 { // Found a candidate
- tr.LazyPrintf("Found candidate via discard stats: %v", candidate)
- files = append(files, vlog.filesMap[candidate.fid])
- } else {
- tr.LazyPrintf("Could not find candidate via discard stats. Randomly picking one.")
- }
-
- // Fallback to randomly picking a log file
- var idxHead int
- for i, fid := range fids {
- if fid == head.Fid {
- idxHead = i
- break
- }
- }
- if idxHead == 0 { // Not found or first file
- tr.LazyPrintf("Could not find any file.")
- return nil
- }
- idx := rand.Intn(idxHead) // Don’t include head.Fid. We pick a random file before it.
- if idx > 0 {
- idx = rand.Intn(idx + 1) // Another level of rand to favor smaller fids.
- }
- tr.LazyPrintf("Randomly chose fid: %d", fids[idx])
- files = append(files, vlog.filesMap[fids[idx]])
- return files
-}
-
-func discardEntry(e Entry, vs y.ValueStruct) bool {
- if vs.Version != y.ParseTs(e.Key) {
- // Version not found. Discard.
- return true
- }
- if isDeletedOrExpired(vs.Meta, vs.ExpiresAt) {
- return true
- }
- if (vs.Meta & bitValuePointer) == 0 {
- // Key also stores the value in LSM. Discard.
- return true
- }
- if (vs.Meta & bitFinTxn) > 0 {
- // Just a txn finish entry. Discard.
- return true
- }
- return false
-}
-
-func (vlog *valueLog) doRunGC(lf *logFile, discardRatio float64, tr trace.Trace) (err error) {
- // Update stats before exiting
- defer func() {
- if err == nil {
- vlog.lfDiscardStats.Lock()
- delete(vlog.lfDiscardStats.m, lf.fid)
- vlog.lfDiscardStats.Unlock()
- }
- }()
-
- type reason struct {
- total float64
- discard float64
- count int
- }
-
- fi, err := lf.fd.Stat()
- if err != nil {
- tr.LazyPrintf("Error while finding file size: %v", err)
- tr.SetError()
- return err
- }
-
- // Set up the sampling window sizes.
- sizeWindow := float64(fi.Size()) * 0.1 // 10% of the file as window.
- sizeWindowM := sizeWindow / (1 << 20) // in MBs.
- countWindow := int(float64(vlog.opt.ValueLogMaxEntries) * 0.01) // 1% of num entries.
- tr.LazyPrintf("Size window: %5.2f. Count window: %d.", sizeWindow, countWindow)
-
- // Pick a random start point for the log.
- skipFirstM := float64(rand.Int63n(fi.Size())) // Pick a random starting location.
- skipFirstM -= sizeWindow // Avoid hitting EOF by moving back by window.
- skipFirstM /= float64(mi) // Convert to MBs.
- tr.LazyPrintf("Skip first %5.2f MB of file of size: %d MB", skipFirstM, fi.Size()/mi)
- var skipped float64
-
- var r reason
- start := time.Now()
- y.AssertTrue(vlog.db != nil)
- s := new(y.Slice)
- var numIterations int
- _, err = vlog.iterate(lf, 0, func(e Entry, vp valuePointer) error {
- numIterations++
- esz := float64(vp.Len) / (1 << 20) // in MBs.
- if skipped < skipFirstM {
- skipped += esz
- return nil
- }
-
- // Sample until we reach the window sizes or exceed 10 seconds.
- if r.count > countWindow {
- tr.LazyPrintf("Stopping sampling after %d entries.", countWindow)
- return errStop
- }
- if r.total > sizeWindowM {
- tr.LazyPrintf("Stopping sampling after reaching window size.")
- return errStop
- }
- if time.Since(start) > 10*time.Second {
- tr.LazyPrintf("Stopping sampling after 10 seconds.")
- return errStop
- }
- r.total += esz
- r.count++
-
- vs, err := vlog.db.get(e.Key)
- if err != nil {
- return err
- }
- if discardEntry(e, vs) {
- r.discard += esz
- return nil
- }
-
- // Value is still present in value log.
- y.AssertTrue(len(vs.Value) > 0)
- vp.Decode(vs.Value)
-
- if vp.Fid > lf.fid {
- // Value is present in a later log. Discard.
- r.discard += esz
- return nil
- }
- if vp.Offset > e.offset {
- // Value is present in a later offset, but in the same log.
- r.discard += esz
- return nil
- }
- if vp.Fid == lf.fid && vp.Offset == e.offset {
- // This is still the active entry. This would need to be rewritten.
-
- } else {
- vlog.elog.Printf("Reason=%+v\n", r)
-
- buf, cb, err := vlog.readValueBytes(vp, s)
- if err != nil {
- return errStop
- }
- ne := valueBytesToEntry(buf)
- ne.offset = vp.Offset
- ne.print("Latest Entry Header in LSM")
- e.print("Latest Entry in Log")
- runCallback(cb)
- return errors.Errorf("This shouldn't happen. Latest Pointer:%+v. Meta:%v.",
- vp, vs.Meta)
- }
- return nil
- })
-
- if err != nil {
- tr.LazyPrintf("Error while iterating for RunGC: %v", err)
- tr.SetError()
- return err
- }
- tr.LazyPrintf("Fid: %d. Skipped: %5.2fMB Num iterations: %d. Data status=%+v\n",
- lf.fid, skipped, numIterations, r)
-
- // If we couldn't sample at least a 1000 KV pairs or at least 75% of the window size,
- // and what we can discard is below the threshold, we should skip the rewrite.
- if (r.count < countWindow && r.total < sizeWindowM*0.75) || r.discard < discardRatio*r.total {
- tr.LazyPrintf("Skipping GC on fid: %d", lf.fid)
- return ErrNoRewrite
- }
- if err = vlog.rewrite(lf, tr); err != nil {
- return err
- }
- tr.LazyPrintf("Done rewriting.")
- return nil
-}
-
-func (vlog *valueLog) waitOnGC(lc *y.Closer) {
- defer lc.Done()
-
- <-lc.HasBeenClosed() // Wait for lc to be closed.
-
- // Block any GC in progress to finish, and don't allow any more writes to runGC by filling up
- // the channel of size 1.
- vlog.garbageCh <- struct{}{}
-}
-
-func (vlog *valueLog) runGC(discardRatio float64, head valuePointer) error {
- select {
- case vlog.garbageCh <- struct{}{}:
- // Pick a log file for GC.
- tr := trace.New("Badger.ValueLog", "GC")
- tr.SetMaxEvents(100)
- defer func() {
- tr.Finish()
- <-vlog.garbageCh
- }()
-
- var err error
- files := vlog.pickLog(head, tr)
- if len(files) == 0 {
- tr.LazyPrintf("PickLog returned zero results.")
- return ErrNoRewrite
- }
- tried := make(map[uint32]bool)
- for _, lf := range files {
- if _, done := tried[lf.fid]; done {
- continue
- }
- tried[lf.fid] = true
- err = vlog.doRunGC(lf, discardRatio, tr)
- if err == nil {
- return vlog.deleteMoveKeysFor(lf.fid, tr)
- }
- }
- return err
- default:
- return ErrRejected
- }
-}
-
-func (vlog *valueLog) updateDiscardStats(stats map[uint32]int64) error {
- vlog.lfDiscardStats.Lock()
- for fid, sz := range stats {
- vlog.lfDiscardStats.m[fid] += sz
- vlog.lfDiscardStats.updatesSinceFlush++
- }
- vlog.lfDiscardStats.Unlock()
- if vlog.lfDiscardStats.updatesSinceFlush > discardStatsFlushThreshold {
- if err := vlog.flushDiscardStats(); err != nil {
- return err
- }
- vlog.lfDiscardStats.updatesSinceFlush = 0
- }
- return nil
-}
-
-// flushDiscardStats inserts discard stats into badger. Returns error on failure.
-func (vlog *valueLog) flushDiscardStats() error {
- if len(vlog.lfDiscardStats.m) == 0 {
- return nil
- }
- entries := []*Entry{{
- Key: y.KeyWithTs(lfDiscardStatsKey, 1),
- Value: vlog.encodedDiscardStats(),
- }}
- req, err := vlog.db.sendToWriteCh(entries)
- if err != nil {
- return errors.Wrapf(err, "failed to push discard stats to write channel")
- }
- return req.Wait()
-}
-
-// encodedDiscardStats returns []byte representation of lfDiscardStats
-// This will be called while storing stats in BadgerDB
-func (vlog *valueLog) encodedDiscardStats() []byte {
- vlog.lfDiscardStats.Lock()
- defer vlog.lfDiscardStats.Unlock()
-
- encodedStats, _ := json.Marshal(vlog.lfDiscardStats.m)
- return encodedStats
-}
-
-// populateDiscardStats populates vlog.lfDiscardStats
-// This function will be called while initializing valueLog
-func (vlog *valueLog) populateDiscardStats() error {
- discardStatsKey := y.KeyWithTs(lfDiscardStatsKey, math.MaxUint64)
- vs, err := vlog.db.get(discardStatsKey)
- if err != nil {
- return err
- }
-
- // check if value is Empty
- if vs.Value == nil || len(vs.Value) == 0 {
- return nil
- }
-
- var statsMap map[uint32]int64
- // discard map is stored in the vlog file.
- if vs.Meta&bitValuePointer > 0 {
- var vp valuePointer
- vp.Decode(vs.Value)
- result, cb, err := vlog.Read(vp, new(y.Slice))
- if err != nil {
- return errors.Wrapf(err, "failed to read value pointer from vlog file: %+v", vp)
- }
- defer runCallback(cb)
- if err := json.Unmarshal(result, &statsMap); err != nil {
- return errors.Wrapf(err, "failed to unmarshal discard stats")
- }
- } else {
- if err := json.Unmarshal(vs.Value, &statsMap); err != nil {
- return errors.Wrapf(err, "failed to unmarshal discard stats")
- }
- }
- vlog.opt.Debugf("Value Log Discard stats: %v", statsMap)
- vlog.lfDiscardStats = &lfDiscardStats{m: statsMap}
- return nil
-}
diff --git a/vendor/github.com/dgraph-io/badger/y/error.go b/vendor/github.com/dgraph-io/badger/y/error.go
deleted file mode 100644
index 59bb2835..00000000
--- a/vendor/github.com/dgraph-io/badger/y/error.go
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package y
-
-// This file contains some functions for error handling. Note that we are moving
-// towards using x.Trace, i.e., rpc tracing using net/tracer. But for now, these
-// functions are useful for simple checks logged on one machine.
-// Some common use cases are:
-// (1) You receive an error from external lib, and would like to check/log fatal.
-// For this, use x.Check, x.Checkf. These will check for err != nil, which is
-// more common in Go. If you want to check for boolean being true, use
-// x.Assert, x.Assertf.
-// (2) You receive an error from external lib, and would like to pass on with some
-// stack trace information. In this case, use x.Wrap or x.Wrapf.
-// (3) You want to generate a new error with stack trace info. Use x.Errorf.
-
-import (
- "fmt"
- "log"
-
- "github.com/pkg/errors"
-)
-
-var debugMode = true
-
-// Check logs fatal if err != nil.
-func Check(err error) {
- if err != nil {
- log.Fatalf("%+v", Wrap(err))
- }
-}
-
-// Check2 acts as convenience wrapper around Check, using the 2nd argument as error.
-func Check2(_ interface{}, err error) {
- Check(err)
-}
-
-// AssertTrue asserts that b is true. Otherwise, it would log fatal.
-func AssertTrue(b bool) {
- if !b {
- log.Fatalf("%+v", errors.Errorf("Assert failed"))
- }
-}
-
-// AssertTruef is AssertTrue with extra info.
-func AssertTruef(b bool, format string, args ...interface{}) {
- if !b {
- log.Fatalf("%+v", errors.Errorf(format, args...))
- }
-}
-
-// Wrap wraps errors from external lib.
-func Wrap(err error) error {
- if !debugMode {
- return err
- }
- return errors.Wrap(err, "")
-}
-
-// Wrapf is Wrap with extra info.
-func Wrapf(err error, format string, args ...interface{}) error {
- if !debugMode {
- if err == nil {
- return nil
- }
- return fmt.Errorf(format+" error: %+v", append(args, err)...)
- }
- return errors.Wrapf(err, format, args...)
-}
diff --git a/vendor/github.com/dgraph-io/badger/y/file_dsync.go b/vendor/github.com/dgraph-io/badger/y/file_dsync.go
deleted file mode 100644
index 3f3445e2..00000000
--- a/vendor/github.com/dgraph-io/badger/y/file_dsync.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// +build !dragonfly,!freebsd,!windows
-
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package y
-
-import "golang.org/x/sys/unix"
-
-func init() {
- datasyncFileFlag = unix.O_DSYNC
-}
diff --git a/vendor/github.com/dgraph-io/badger/y/file_nodsync.go b/vendor/github.com/dgraph-io/badger/y/file_nodsync.go
deleted file mode 100644
index b68be7ab..00000000
--- a/vendor/github.com/dgraph-io/badger/y/file_nodsync.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// +build dragonfly freebsd windows
-
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package y
-
-import "syscall"
-
-func init() {
- datasyncFileFlag = syscall.O_SYNC
-}
diff --git a/vendor/github.com/dgraph-io/badger/y/file_sync.go b/vendor/github.com/dgraph-io/badger/y/file_sync.go
deleted file mode 100644
index 19016ef6..00000000
--- a/vendor/github.com/dgraph-io/badger/y/file_sync.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// +build !darwin go1.12
-
-/*
- * Copyright 2019 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package y
-
-import "os"
-
-// FileSync calls os.File.Sync with the right parameters.
-// This function can be removed once we stop supporting Go 1.11
-// on MacOS.
-//
-// More info: https://golang.org/issue/26650.
-func FileSync(f *os.File) error { return f.Sync() }
diff --git a/vendor/github.com/dgraph-io/badger/y/file_sync_darwin.go b/vendor/github.com/dgraph-io/badger/y/file_sync_darwin.go
deleted file mode 100644
index 01c79f23..00000000
--- a/vendor/github.com/dgraph-io/badger/y/file_sync_darwin.go
+++ /dev/null
@@ -1,37 +0,0 @@
-// +build darwin,!go1.12
-
-/*
- * Copyright 2019 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package y
-
-import (
- "os"
- "syscall"
-)
-
-// FileSync calls os.File.Sync with the right parameters.
-// This function can be removed once we stop supporting Go 1.11
-// on MacOS.
-//
-// More info: https://golang.org/issue/26650.
-func FileSync(f *os.File) error {
- _, _, err := syscall.Syscall(syscall.SYS_FCNTL, f.Fd(), syscall.F_FULLFSYNC, 0)
- if err == 0 {
- return nil
- }
- return err
-}
diff --git a/vendor/github.com/dgraph-io/badger/y/iterator.go b/vendor/github.com/dgraph-io/badger/y/iterator.go
deleted file mode 100644
index 719e8ec8..00000000
--- a/vendor/github.com/dgraph-io/badger/y/iterator.go
+++ /dev/null
@@ -1,264 +0,0 @@
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package y
-
-import (
- "bytes"
- "container/heap"
- "encoding/binary"
-
- "github.com/pkg/errors"
-)
-
-// ValueStruct represents the value info that can be associated with a key, but also the internal
-// Meta field.
-type ValueStruct struct {
- Meta byte
- UserMeta byte
- ExpiresAt uint64
- Value []byte
-
- Version uint64 // This field is not serialized. Only for internal usage.
-}
-
-func sizeVarint(x uint64) (n int) {
- for {
- n++
- x >>= 7
- if x == 0 {
- break
- }
- }
- return n
-}
-
-// EncodedSize is the size of the ValueStruct when encoded
-func (v *ValueStruct) EncodedSize() uint16 {
- sz := len(v.Value) + 2 // meta, usermeta.
- if v.ExpiresAt == 0 {
- return uint16(sz + 1)
- }
-
- enc := sizeVarint(v.ExpiresAt)
- return uint16(sz + enc)
-}
-
-// Decode uses the length of the slice to infer the length of the Value field.
-func (v *ValueStruct) Decode(b []byte) {
- v.Meta = b[0]
- v.UserMeta = b[1]
- var sz int
- v.ExpiresAt, sz = binary.Uvarint(b[2:])
- v.Value = b[2+sz:]
-}
-
-// Encode expects a slice of length at least v.EncodedSize().
-func (v *ValueStruct) Encode(b []byte) {
- b[0] = v.Meta
- b[1] = v.UserMeta
- sz := binary.PutUvarint(b[2:], v.ExpiresAt)
- copy(b[2+sz:], v.Value)
-}
-
-// EncodeTo should be kept in sync with the Encode function above. The reason
-// this function exists is to avoid creating byte arrays per key-value pair in
-// table/builder.go.
-func (v *ValueStruct) EncodeTo(buf *bytes.Buffer) {
- buf.WriteByte(v.Meta)
- buf.WriteByte(v.UserMeta)
- var enc [binary.MaxVarintLen64]byte
- sz := binary.PutUvarint(enc[:], v.ExpiresAt)
- buf.Write(enc[:sz])
- buf.Write(v.Value)
-}
-
-// Iterator is an interface for a basic iterator.
-type Iterator interface {
- Next()
- Rewind()
- Seek(key []byte)
- Key() []byte
- Value() ValueStruct
- Valid() bool
-
- // All iterators should be closed so that file garbage collection works.
- Close() error
-}
-
-type elem struct {
- itr Iterator
- nice int
- reversed bool
-}
-
-type elemHeap []*elem
-
-func (eh elemHeap) Len() int { return len(eh) }
-func (eh elemHeap) Swap(i, j int) { eh[i], eh[j] = eh[j], eh[i] }
-func (eh *elemHeap) Push(x interface{}) { *eh = append(*eh, x.(*elem)) }
-func (eh *elemHeap) Pop() interface{} {
- // Remove the last element, because Go has already swapped 0th elem <-> last.
- old := *eh
- n := len(old)
- x := old[n-1]
- *eh = old[0 : n-1]
- return x
-}
-func (eh elemHeap) Less(i, j int) bool {
- cmp := CompareKeys(eh[i].itr.Key(), eh[j].itr.Key())
- if cmp < 0 {
- return !eh[i].reversed
- }
- if cmp > 0 {
- return eh[i].reversed
- }
- // The keys are equal. In this case, lower nice take precedence. This is important.
- return eh[i].nice < eh[j].nice
-}
-
-// MergeIterator merges multiple iterators.
-// NOTE: MergeIterator owns the array of iterators and is responsible for closing them.
-type MergeIterator struct {
- h elemHeap
- curKey []byte
- reversed bool
-
- all []Iterator
-}
-
-// NewMergeIterator returns a new MergeIterator from a list of Iterators.
-func NewMergeIterator(iters []Iterator, reversed bool) *MergeIterator {
- m := &MergeIterator{all: iters, reversed: reversed}
- m.h = make(elemHeap, 0, len(iters))
- m.initHeap()
- return m
-}
-
-func (s *MergeIterator) storeKey(smallest Iterator) {
- if cap(s.curKey) < len(smallest.Key()) {
- s.curKey = make([]byte, 2*len(smallest.Key()))
- }
- s.curKey = s.curKey[:len(smallest.Key())]
- copy(s.curKey, smallest.Key())
-}
-
-// initHeap checks all iterators and initializes our heap and array of keys.
-// Whenever we reverse direction, we need to run this.
-func (s *MergeIterator) initHeap() {
- s.h = s.h[:0]
- for idx, itr := range s.all {
- if !itr.Valid() {
- continue
- }
- e := &elem{itr: itr, nice: idx, reversed: s.reversed}
- s.h = append(s.h, e)
- }
- heap.Init(&s.h)
- for len(s.h) > 0 {
- it := s.h[0].itr
- if it == nil || !it.Valid() {
- heap.Pop(&s.h)
- continue
- }
- s.storeKey(s.h[0].itr)
- break
- }
-}
-
-// Valid returns whether the MergeIterator is at a valid element.
-func (s *MergeIterator) Valid() bool {
- if s == nil {
- return false
- }
- if len(s.h) == 0 {
- return false
- }
- return s.h[0].itr.Valid()
-}
-
-// Key returns the key associated with the current iterator
-func (s *MergeIterator) Key() []byte {
- if len(s.h) == 0 {
- return nil
- }
- return s.h[0].itr.Key()
-}
-
-// Value returns the value associated with the iterator.
-func (s *MergeIterator) Value() ValueStruct {
- if len(s.h) == 0 {
- return ValueStruct{}
- }
- return s.h[0].itr.Value()
-}
-
-// Next returns the next element. If it is the same as the current key, ignore it.
-func (s *MergeIterator) Next() {
- if len(s.h) == 0 {
- return
- }
-
- smallest := s.h[0].itr
- smallest.Next()
-
- for len(s.h) > 0 {
- smallest = s.h[0].itr
- if !smallest.Valid() {
- heap.Pop(&s.h)
- continue
- }
-
- heap.Fix(&s.h, 0)
- smallest = s.h[0].itr
- if smallest.Valid() {
- if !bytes.Equal(smallest.Key(), s.curKey) {
- break
- }
- smallest.Next()
- }
- }
- if !smallest.Valid() {
- return
- }
- s.storeKey(smallest)
-}
-
-// Rewind seeks to first element (or last element for reverse iterator).
-func (s *MergeIterator) Rewind() {
- for _, itr := range s.all {
- itr.Rewind()
- }
- s.initHeap()
-}
-
-// Seek brings us to element with key >= given key.
-func (s *MergeIterator) Seek(key []byte) {
- for _, itr := range s.all {
- itr.Seek(key)
- }
- s.initHeap()
-}
-
-// Close implements y.Iterator
-func (s *MergeIterator) Close() error {
- for _, itr := range s.all {
- if err := itr.Close(); err != nil {
- return errors.Wrap(err, "MergeIterator")
- }
- }
- return nil
-}
diff --git a/vendor/github.com/dgraph-io/badger/y/metrics.go b/vendor/github.com/dgraph-io/badger/y/metrics.go
deleted file mode 100644
index 2de17d10..00000000
--- a/vendor/github.com/dgraph-io/badger/y/metrics.go
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (C) 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package y
-
-import "expvar"
-
-var (
- // LSMSize has size of the LSM in bytes
- LSMSize *expvar.Map
- // VlogSize has size of the value log in bytes
- VlogSize *expvar.Map
- // PendingWrites tracks the number of pending writes.
- PendingWrites *expvar.Map
-
- // These are cumulative
-
- // NumReads has cumulative number of reads
- NumReads *expvar.Int
- // NumWrites has cumulative number of writes
- NumWrites *expvar.Int
- // NumBytesRead has cumulative number of bytes read
- NumBytesRead *expvar.Int
- // NumBytesWritten has cumulative number of bytes written
- NumBytesWritten *expvar.Int
- // NumLSMGets is number of LMS gets
- NumLSMGets *expvar.Map
- // NumLSMBloomHits is number of LMS bloom hits
- NumLSMBloomHits *expvar.Map
- // NumGets is number of gets
- NumGets *expvar.Int
- // NumPuts is number of puts
- NumPuts *expvar.Int
- // NumBlockedPuts is number of blocked puts
- NumBlockedPuts *expvar.Int
- // NumMemtableGets is number of memtable gets
- NumMemtableGets *expvar.Int
-)
-
-// These variables are global and have cumulative values for all kv stores.
-func init() {
- NumReads = expvar.NewInt("badger_disk_reads_total")
- NumWrites = expvar.NewInt("badger_disk_writes_total")
- NumBytesRead = expvar.NewInt("badger_read_bytes")
- NumBytesWritten = expvar.NewInt("badger_written_bytes")
- NumLSMGets = expvar.NewMap("badger_lsm_level_gets_total")
- NumLSMBloomHits = expvar.NewMap("badger_lsm_bloom_hits_total")
- NumGets = expvar.NewInt("badger_gets_total")
- NumPuts = expvar.NewInt("badger_puts_total")
- NumBlockedPuts = expvar.NewInt("badger_blocked_puts_total")
- NumMemtableGets = expvar.NewInt("badger_memtable_gets_total")
- LSMSize = expvar.NewMap("badger_lsm_size_bytes")
- VlogSize = expvar.NewMap("badger_vlog_size_bytes")
- PendingWrites = expvar.NewMap("badger_pending_writes_total")
-}
diff --git a/vendor/github.com/dgraph-io/badger/y/mmap_unix.go b/vendor/github.com/dgraph-io/badger/y/mmap_unix.go
deleted file mode 100644
index f9203a01..00000000
--- a/vendor/github.com/dgraph-io/badger/y/mmap_unix.go
+++ /dev/null
@@ -1,63 +0,0 @@
-// +build !windows
-
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package y
-
-import (
- "os"
- "syscall"
- "unsafe"
-
- "golang.org/x/sys/unix"
-)
-
-// Mmap uses the mmap system call to memory-map a file. If writable is true,
-// memory protection of the pages is set so that they may be written to as well.
-func Mmap(fd *os.File, writable bool, size int64) ([]byte, error) {
- mtype := unix.PROT_READ
- if writable {
- mtype |= unix.PROT_WRITE
- }
- return unix.Mmap(int(fd.Fd()), 0, int(size), mtype, unix.MAP_SHARED)
-}
-
-// Munmap unmaps a previously mapped slice.
-func Munmap(b []byte) error {
- return unix.Munmap(b)
-}
-
-// Madvise uses the madvise system call to give advise about the use of memory
-// when using a slice that is memory-mapped to a file. Set the readahead flag to
-// false if page references are expected in random order.
-func Madvise(b []byte, readahead bool) error {
- flags := unix.MADV_NORMAL
- if !readahead {
- flags = unix.MADV_RANDOM
- }
- return madvise(b, flags)
-}
-
-// This is required because the unix package does not support the madvise system call on OS X.
-func madvise(b []byte, advice int) (err error) {
- _, _, e1 := syscall.Syscall(syscall.SYS_MADVISE, uintptr(unsafe.Pointer(&b[0])),
- uintptr(len(b)), uintptr(advice))
- if e1 != 0 {
- err = e1
- }
- return
-}
diff --git a/vendor/github.com/dgraph-io/badger/y/mmap_windows.go b/vendor/github.com/dgraph-io/badger/y/mmap_windows.go
deleted file mode 100644
index 0efb2d0f..00000000
--- a/vendor/github.com/dgraph-io/badger/y/mmap_windows.go
+++ /dev/null
@@ -1,90 +0,0 @@
-// +build windows
-
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package y
-
-import (
- "fmt"
- "os"
- "syscall"
- "unsafe"
-)
-
-func Mmap(fd *os.File, write bool, size int64) ([]byte, error) {
- protect := syscall.PAGE_READONLY
- access := syscall.FILE_MAP_READ
-
- if write {
- protect = syscall.PAGE_READWRITE
- access = syscall.FILE_MAP_WRITE
- }
- fi, err := fd.Stat()
- if err != nil {
- return nil, err
- }
-
- // Truncate the database to the size of the mmap.
- if fi.Size() < size {
- if err := fd.Truncate(size); err != nil {
- return nil, fmt.Errorf("truncate: %s", err)
- }
- }
-
- // Open a file mapping handle.
- sizelo := uint32(size >> 32)
- sizehi := uint32(size) & 0xffffffff
-
- handler, err := syscall.CreateFileMapping(syscall.Handle(fd.Fd()), nil,
- uint32(protect), sizelo, sizehi, nil)
- if err != nil {
- return nil, os.NewSyscallError("CreateFileMapping", err)
- }
-
- // Create the memory map.
- addr, err := syscall.MapViewOfFile(handler, uint32(access), 0, 0, uintptr(size))
- if addr == 0 {
- return nil, os.NewSyscallError("MapViewOfFile", err)
- }
-
- // Close mapping handle.
- if err := syscall.CloseHandle(syscall.Handle(handler)); err != nil {
- return nil, os.NewSyscallError("CloseHandle", err)
- }
-
- // Slice memory layout
- // Copied this snippet from golang/sys package
- var sl = struct {
- addr uintptr
- len int
- cap int
- }{addr, int(size), int(size)}
-
- // Use unsafe to turn sl into a []byte.
- data := *(*[]byte)(unsafe.Pointer(&sl))
-
- return data, nil
-}
-
-func Munmap(b []byte) error {
- return syscall.UnmapViewOfFile(uintptr(unsafe.Pointer(&b[0])))
-}
-
-func Madvise(b []byte, readahead bool) error {
- // Do Nothing. We don’t care about this setting on Windows
- return nil
-}
diff --git a/vendor/github.com/dgraph-io/badger/y/watermark.go b/vendor/github.com/dgraph-io/badger/y/watermark.go
deleted file mode 100644
index 10ca00e7..00000000
--- a/vendor/github.com/dgraph-io/badger/y/watermark.go
+++ /dev/null
@@ -1,233 +0,0 @@
-/*
- * Copyright 2016-2018 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package y
-
-import (
- "container/heap"
- "context"
- "sync/atomic"
-
- "golang.org/x/net/trace"
-)
-
-type uint64Heap []uint64
-
-func (u uint64Heap) Len() int { return len(u) }
-func (u uint64Heap) Less(i, j int) bool { return u[i] < u[j] }
-func (u uint64Heap) Swap(i, j int) { u[i], u[j] = u[j], u[i] }
-func (u *uint64Heap) Push(x interface{}) { *u = append(*u, x.(uint64)) }
-func (u *uint64Heap) Pop() interface{} {
- old := *u
- n := len(old)
- x := old[n-1]
- *u = old[0 : n-1]
- return x
-}
-
-// mark contains one of more indices, along with a done boolean to indicate the
-// status of the index: begin or done. It also contains waiters, who could be
-// waiting for the watermark to reach >= a certain index.
-type mark struct {
- // Either this is an (index, waiter) pair or (index, done) or (indices, done).
- index uint64
- waiter chan struct{}
- indices []uint64
- done bool // Set to true if the index is done.
-}
-
-// WaterMark is used to keep track of the minimum un-finished index. Typically, an index k becomes
-// finished or "done" according to a WaterMark once Done(k) has been called
-// 1. as many times as Begin(k) has, AND
-// 2. a positive number of times.
-//
-// An index may also become "done" by calling SetDoneUntil at a time such that it is not
-// inter-mingled with Begin/Done calls.
-//
-// Since doneUntil and lastIndex addresses are passed to sync/atomic packages, we ensure that they
-// are 64-bit aligned by putting them at the beginning of the structure.
-type WaterMark struct {
- doneUntil uint64
- lastIndex uint64
- Name string
- markCh chan mark
- elog trace.EventLog
-}
-
-// Init initializes a WaterMark struct. MUST be called before using it.
-func (w *WaterMark) Init(closer *Closer) {
- w.markCh = make(chan mark, 100)
- w.elog = trace.NewEventLog("Watermark", w.Name)
- go w.process(closer)
-}
-
-// Begin sets the last index to the given value.
-func (w *WaterMark) Begin(index uint64) {
- atomic.StoreUint64(&w.lastIndex, index)
- w.markCh <- mark{index: index, done: false}
-}
-
-// BeginMany works like Begin but accepts multiple indices.
-func (w *WaterMark) BeginMany(indices []uint64) {
- atomic.StoreUint64(&w.lastIndex, indices[len(indices)-1])
- w.markCh <- mark{index: 0, indices: indices, done: false}
-}
-
-// Done sets a single index as done.
-func (w *WaterMark) Done(index uint64) {
- w.markCh <- mark{index: index, done: true}
-}
-
-// DoneMany works like Done but accepts multiple indices.
-func (w *WaterMark) DoneMany(indices []uint64) {
- w.markCh <- mark{index: 0, indices: indices, done: true}
-}
-
-// DoneUntil returns the maximum index that has the property that all indices
-// less than or equal to it are done.
-func (w *WaterMark) DoneUntil() uint64 {
- return atomic.LoadUint64(&w.doneUntil)
-}
-
-// SetDoneUntil sets the maximum index that has the property that all indices
-// less than or equal to it are done.
-func (w *WaterMark) SetDoneUntil(val uint64) {
- atomic.StoreUint64(&w.doneUntil, val)
-}
-
-// LastIndex returns the last index for which Begin has been called.
-func (w *WaterMark) LastIndex() uint64 {
- return atomic.LoadUint64(&w.lastIndex)
-}
-
-// WaitForMark waits until the given index is marked as done.
-func (w *WaterMark) WaitForMark(ctx context.Context, index uint64) error {
- if w.DoneUntil() >= index {
- return nil
- }
- waitCh := make(chan struct{})
- w.markCh <- mark{index: index, waiter: waitCh}
-
- select {
- case <-ctx.Done():
- return ctx.Err()
- case <-waitCh:
- return nil
- }
-}
-
-// process is used to process the Mark channel. This is not thread-safe,
-// so only run one goroutine for process. One is sufficient, because
-// all goroutine ops use purely memory and cpu.
-// Each index has to emit atleast one begin watermark in serial order otherwise waiters
-// can get blocked idefinitely. Example: We had an watermark at 100 and a waiter at 101,
-// if no watermark is emitted at index 101 then waiter would get stuck indefinitely as it
-// can't decide whether the task at 101 has decided not to emit watermark or it didn't get
-// scheduled yet.
-func (w *WaterMark) process(closer *Closer) {
- defer closer.Done()
-
- var indices uint64Heap
- // pending maps raft proposal index to the number of pending mutations for this proposal.
- pending := make(map[uint64]int)
- waiters := make(map[uint64][]chan struct{})
-
- heap.Init(&indices)
- var loop uint64
-
- processOne := func(index uint64, done bool) {
- // If not already done, then set. Otherwise, don't undo a done entry.
- prev, present := pending[index]
- if !present {
- heap.Push(&indices, index)
- }
-
- delta := 1
- if done {
- delta = -1
- }
- pending[index] = prev + delta
-
- loop++
- if len(indices) > 0 && loop%10000 == 0 {
- min := indices[0]
- w.elog.Printf("WaterMark %s: Done entry %4d. Size: %4d Watermark: %-4d Looking for: "+
- "%-4d. Value: %d\n", w.Name, index, len(indices), w.DoneUntil(), min, pending[min])
- }
-
- // Update mark by going through all indices in order; and checking if they have
- // been done. Stop at the first index, which isn't done.
- doneUntil := w.DoneUntil()
- if doneUntil > index {
- AssertTruef(false, "Name: %s doneUntil: %d. Index: %d", w.Name, doneUntil, index)
- }
-
- until := doneUntil
- loops := 0
-
- for len(indices) > 0 {
- min := indices[0]
- if done := pending[min]; done > 0 {
- break // len(indices) will be > 0.
- }
- // Even if done is called multiple times causing it to become
- // negative, we should still pop the index.
- heap.Pop(&indices)
- delete(pending, min)
- until = min
- loops++
- }
- for i := doneUntil + 1; i <= until; i++ {
- toNotify := waiters[i]
- for _, ch := range toNotify {
- close(ch)
- }
- delete(waiters, i) // Release the memory back.
- }
- if until != doneUntil {
- AssertTrue(atomic.CompareAndSwapUint64(&w.doneUntil, doneUntil, until))
- w.elog.Printf("%s: Done until %d. Loops: %d\n", w.Name, until, loops)
- }
- }
-
- for {
- select {
- case <-closer.HasBeenClosed():
- return
- case mark := <-w.markCh:
- if mark.waiter != nil {
- doneUntil := atomic.LoadUint64(&w.doneUntil)
- if doneUntil >= mark.index {
- close(mark.waiter)
- } else {
- ws, ok := waiters[mark.index]
- if !ok {
- waiters[mark.index] = []chan struct{}{mark.waiter}
- } else {
- waiters[mark.index] = append(ws, mark.waiter)
- }
- }
- } else {
- if mark.index > 0 {
- processOne(mark.index, mark.done)
- }
- for _, index := range mark.indices {
- processOne(index, mark.done)
- }
- }
- }
- }
-}
diff --git a/vendor/github.com/dgraph-io/badger/y/y.go b/vendor/github.com/dgraph-io/badger/y/y.go
deleted file mode 100644
index 4948315a..00000000
--- a/vendor/github.com/dgraph-io/badger/y/y.go
+++ /dev/null
@@ -1,302 +0,0 @@
-/*
- * Copyright 2017 Dgraph Labs, Inc. and Contributors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package y
-
-import (
- "bytes"
- "encoding/binary"
- "fmt"
- "hash/crc32"
- "math"
- "os"
- "sync"
- "time"
-
- "github.com/pkg/errors"
-)
-
-// ErrEOF indicates an end of file when trying to read from a memory mapped file
-// and encountering the end of slice.
-var ErrEOF = errors.New("End of mapped region")
-
-const (
- // Sync indicates that O_DSYNC should be set on the underlying file,
- // ensuring that data writes do not return until the data is flushed
- // to disk.
- Sync = 1 << iota
- // ReadOnly opens the underlying file on a read-only basis.
- ReadOnly
-)
-
-var (
- // This is O_DSYNC (datasync) on platforms that support it -- see file_unix.go
- datasyncFileFlag = 0x0
-
- // CastagnoliCrcTable is a CRC32 polynomial table
- CastagnoliCrcTable = crc32.MakeTable(crc32.Castagnoli)
-
- // Dummy channel for nil closers.
- dummyCloserChan = make(chan struct{})
-)
-
-// OpenExistingFile opens an existing file, errors if it doesn't exist.
-func OpenExistingFile(filename string, flags uint32) (*os.File, error) {
- openFlags := os.O_RDWR
- if flags&ReadOnly != 0 {
- openFlags = os.O_RDONLY
- }
-
- if flags&Sync != 0 {
- openFlags |= datasyncFileFlag
- }
- return os.OpenFile(filename, openFlags, 0)
-}
-
-// CreateSyncedFile creates a new file (using O_EXCL), errors if it already existed.
-func CreateSyncedFile(filename string, sync bool) (*os.File, error) {
- flags := os.O_RDWR | os.O_CREATE | os.O_EXCL
- if sync {
- flags |= datasyncFileFlag
- }
- return os.OpenFile(filename, flags, 0666)
-}
-
-// OpenSyncedFile creates the file if one doesn't exist.
-func OpenSyncedFile(filename string, sync bool) (*os.File, error) {
- flags := os.O_RDWR | os.O_CREATE
- if sync {
- flags |= datasyncFileFlag
- }
- return os.OpenFile(filename, flags, 0666)
-}
-
-// OpenTruncFile opens the file with O_RDWR | O_CREATE | O_TRUNC
-func OpenTruncFile(filename string, sync bool) (*os.File, error) {
- flags := os.O_RDWR | os.O_CREATE | os.O_TRUNC
- if sync {
- flags |= datasyncFileFlag
- }
- return os.OpenFile(filename, flags, 0666)
-}
-
-// SafeCopy does append(a[:0], src...).
-func SafeCopy(a, src []byte) []byte {
- return append(a[:0], src...)
-}
-
-// Copy copies a byte slice and returns the copied slice.
-func Copy(a []byte) []byte {
- b := make([]byte, len(a))
- copy(b, a)
- return b
-}
-
-// KeyWithTs generates a new key by appending ts to key.
-func KeyWithTs(key []byte, ts uint64) []byte {
- out := make([]byte, len(key)+8)
- copy(out, key)
- binary.BigEndian.PutUint64(out[len(key):], math.MaxUint64-ts)
- return out
-}
-
-// ParseTs parses the timestamp from the key bytes.
-func ParseTs(key []byte) uint64 {
- if len(key) <= 8 {
- return 0
- }
- return math.MaxUint64 - binary.BigEndian.Uint64(key[len(key)-8:])
-}
-
-// CompareKeys checks the key without timestamp and checks the timestamp if keyNoTs
-// is same.
-// a would be sorted higher than aa if we use bytes.compare
-// All keys should have timestamp.
-func CompareKeys(key1, key2 []byte) int {
- AssertTrue(len(key1) > 8 && len(key2) > 8)
- if cmp := bytes.Compare(key1[:len(key1)-8], key2[:len(key2)-8]); cmp != 0 {
- return cmp
- }
- return bytes.Compare(key1[len(key1)-8:], key2[len(key2)-8:])
-}
-
-// ParseKey parses the actual key from the key bytes.
-func ParseKey(key []byte) []byte {
- if key == nil {
- return nil
- }
-
- AssertTrue(len(key) > 8)
- return key[:len(key)-8]
-}
-
-// SameKey checks for key equality ignoring the version timestamp suffix.
-func SameKey(src, dst []byte) bool {
- if len(src) != len(dst) {
- return false
- }
- return bytes.Equal(ParseKey(src), ParseKey(dst))
-}
-
-// Slice holds a reusable buf, will reallocate if you request a larger size than ever before.
-// One problem is with n distinct sizes in random order it'll reallocate log(n) times.
-type Slice struct {
- buf []byte
-}
-
-// Resize reuses the Slice's buffer (or makes a new one) and returns a slice in that buffer of
-// length sz.
-func (s *Slice) Resize(sz int) []byte {
- if cap(s.buf) < sz {
- s.buf = make([]byte, sz)
- }
- return s.buf[0:sz]
-}
-
-// FixedDuration returns a string representation of the given duration with the
-// hours, minutes, and seconds.
-func FixedDuration(d time.Duration) string {
- str := fmt.Sprintf("%02ds", int(d.Seconds())%60)
- if d >= time.Minute {
- str = fmt.Sprintf("%02dm", int(d.Minutes())%60) + str
- }
- if d >= time.Hour {
- str = fmt.Sprintf("%02dh", int(d.Hours())) + str
- }
- return str
-}
-
-// Closer holds the two things we need to close a goroutine and wait for it to finish: a chan
-// to tell the goroutine to shut down, and a WaitGroup with which to wait for it to finish shutting
-// down.
-type Closer struct {
- closed chan struct{}
- waiting sync.WaitGroup
-}
-
-// NewCloser constructs a new Closer, with an initial count on the WaitGroup.
-func NewCloser(initial int) *Closer {
- ret := &Closer{closed: make(chan struct{})}
- ret.waiting.Add(initial)
- return ret
-}
-
-// AddRunning Add()'s delta to the WaitGroup.
-func (lc *Closer) AddRunning(delta int) {
- lc.waiting.Add(delta)
-}
-
-// Signal signals the HasBeenClosed signal.
-func (lc *Closer) Signal() {
- close(lc.closed)
-}
-
-// HasBeenClosed gets signaled when Signal() is called.
-func (lc *Closer) HasBeenClosed() <-chan struct{} {
- if lc == nil {
- return dummyCloserChan
- }
- return lc.closed
-}
-
-// Done calls Done() on the WaitGroup.
-func (lc *Closer) Done() {
- if lc == nil {
- return
- }
- lc.waiting.Done()
-}
-
-// Wait waits on the WaitGroup. (It waits for NewCloser's initial value, AddRunning, and Done
-// calls to balance out.)
-func (lc *Closer) Wait() {
- lc.waiting.Wait()
-}
-
-// SignalAndWait calls Signal(), then Wait().
-func (lc *Closer) SignalAndWait() {
- lc.Signal()
- lc.Wait()
-}
-
-// Throttle allows a limited number of workers to run at a time. It also
-// provides a mechanism to check for errors encountered by workers and wait for
-// them to finish.
-type Throttle struct {
- once sync.Once
- wg sync.WaitGroup
- ch chan struct{}
- errCh chan error
- finishErr error
-}
-
-// NewThrottle creates a new throttle with a max number of workers.
-func NewThrottle(max int) *Throttle {
- return &Throttle{
- ch: make(chan struct{}, max),
- errCh: make(chan error, max),
- }
-}
-
-// Do should be called by workers before they start working. It blocks if there
-// are already maximum number of workers working. If it detects an error from
-// previously Done workers, it would return it.
-func (t *Throttle) Do() error {
- for {
- select {
- case t.ch <- struct{}{}:
- t.wg.Add(1)
- return nil
- case err := <-t.errCh:
- if err != nil {
- return err
- }
- }
- }
-}
-
-// Done should be called by workers when they finish working. They can also
-// pass the error status of work done.
-func (t *Throttle) Done(err error) {
- if err != nil {
- t.errCh <- err
- }
- select {
- case <-t.ch:
- default:
- panic("Throttle Do Done mismatch")
- }
- t.wg.Done()
-}
-
-// Finish waits until all workers have finished working. It would return any error passed by Done.
-// If Finish is called multiple time, it will wait for workers to finish only once(first time).
-// From next calls, it will return same error as found on first call.
-func (t *Throttle) Finish() error {
- t.once.Do(func() {
- t.wg.Wait()
- close(t.ch)
- close(t.errCh)
- for err := range t.errCh {
- if err != nil {
- t.finishErr = err
- return
- }
- }
- })
-
- return t.finishErr
-}
diff --git a/vendor/github.com/dgryski/go-farm/.gitignore b/vendor/github.com/dgryski/go-farm/.gitignore
deleted file mode 100644
index 36029ab5..00000000
--- a/vendor/github.com/dgryski/go-farm/.gitignore
+++ /dev/null
@@ -1,24 +0,0 @@
-# Compiled Object files, Static and Dynamic libs (Shared Objects)
-*.o
-*.a
-*.so
-
-# Folders
-_obj
-_test
-
-# Architecture specific extensions/prefixes
-*.[568vq]
-[568vq].out
-
-*.cgo1.go
-*.cgo2.c
-_cgo_defun.c
-_cgo_gotypes.go
-_cgo_export.*
-
-*.exe
-*.test
-*.prof
-
-target
diff --git a/vendor/github.com/dgryski/go-farm/.travis.yml b/vendor/github.com/dgryski/go-farm/.travis.yml
deleted file mode 100644
index bc89a55d..00000000
--- a/vendor/github.com/dgryski/go-farm/.travis.yml
+++ /dev/null
@@ -1,39 +0,0 @@
-language: go
-
-sudo: false
-
-branches:
- except:
- - release
-
-branches:
- only:
- - master
- - develop
- - travis
-
-go:
- - 1.11.x
- - 1.12.x
- - tip
-
-matrix:
- allow_failures:
- - go: tip
-
-before_install:
- - if [ -n "$GH_USER" ]; then git config --global github.user ${GH_USER}; fi;
- - if [ -n "$GH_TOKEN" ]; then git config --global github.token ${GH_TOKEN}; fi;
- - go get github.com/mattn/goveralls
-
-before_script:
- - make deps
-
-script:
- - make qa
-
-after_failure:
- - cat ./target/test/report.xml
-
-after_success:
- - if [ "$TRAVIS_GO_VERSION" = "1.9" ]; then $HOME/gopath/bin/goveralls -covermode=count -coverprofile=target/report/coverage.out -service=travis-ci; fi;
diff --git a/vendor/github.com/dgryski/go-farm/LICENSE b/vendor/github.com/dgryski/go-farm/LICENSE
deleted file mode 100644
index 3d07f666..00000000
--- a/vendor/github.com/dgryski/go-farm/LICENSE
+++ /dev/null
@@ -1,23 +0,0 @@
-As this is a highly derivative work, I have placed it under the same license as the original implementation:
-
-Copyright (c) 2014-2017 Damian Gryski
-Copyright (c) 2016-2017 Nicola Asuni - Tecnick.com
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
diff --git a/vendor/github.com/dgryski/go-farm/Makefile b/vendor/github.com/dgryski/go-farm/Makefile
deleted file mode 100644
index c189c95d..00000000
--- a/vendor/github.com/dgryski/go-farm/Makefile
+++ /dev/null
@@ -1,203 +0,0 @@
-# MAKEFILE
-#
-# @author Nicola Asuni
-# @link https://github.com/dgryski/go-farm
-#
-# This file is intended to be executed in a Linux-compatible system.
-# It also assumes that the project has been cloned in the right path under GOPATH:
-# $GOPATH/src/github.com/dgryski/go-farm
-#
-# ------------------------------------------------------------------------------
-
-# List special make targets that are not associated with files
-.PHONY: help all test format fmtcheck vet lint coverage cyclo ineffassign misspell structcheck varcheck errcheck gosimple astscan qa deps clean nuke
-
-# Use bash as shell (Note: Ubuntu now uses dash which doesn't support PIPESTATUS).
-SHELL=/bin/bash
-
-# CVS path (path to the parent dir containing the project)
-CVSPATH=github.com/dgryski
-
-# Project owner
-OWNER=dgryski
-
-# Project vendor
-VENDOR=dgryski
-
-# Project name
-PROJECT=go-farm
-
-# Project version
-VERSION=$(shell cat VERSION)
-
-# Name of RPM or DEB package
-PKGNAME=${VENDOR}-${PROJECT}
-
-# Current directory
-CURRENTDIR=$(shell pwd)
-
-# GO lang path
-ifneq ($(GOPATH),)
- ifeq ($(findstring $(GOPATH),$(CURRENTDIR)),)
- # the defined GOPATH is not valid
- GOPATH=
- endif
-endif
-ifeq ($(GOPATH),)
- # extract the GOPATH
- GOPATH=$(firstword $(subst /src/, ,$(CURRENTDIR)))
-endif
-
-# --- MAKE TARGETS ---
-
-# Display general help about this command
-help:
- @echo ""
- @echo "$(PROJECT) Makefile."
- @echo "GOPATH=$(GOPATH)"
- @echo "The following commands are available:"
- @echo ""
- @echo " make qa : Run all the tests"
- @echo " make test : Run the unit tests"
- @echo ""
- @echo " make format : Format the source code"
- @echo " make fmtcheck : Check if the source code has been formatted"
- @echo " make vet : Check for suspicious constructs"
- @echo " make lint : Check for style errors"
- @echo " make coverage : Generate the coverage report"
- @echo " make cyclo : Generate the cyclomatic complexity report"
- @echo " make ineffassign : Detect ineffectual assignments"
- @echo " make misspell : Detect commonly misspelled words in source files"
- @echo " make structcheck : Find unused struct fields"
- @echo " make varcheck : Find unused global variables and constants"
- @echo " make errcheck : Check that error return values are used"
- @echo " make gosimple : Suggest code simplifications"
- @echo " make astscan : GO AST scanner"
- @echo ""
- @echo " make docs : Generate source code documentation"
- @echo ""
- @echo " make deps : Get the dependencies"
- @echo " make clean : Remove any build artifact"
- @echo " make nuke : Deletes any intermediate file"
- @echo ""
-
-
-# Alias for help target
-all: help
-
-# Run the unit tests
-test:
- @mkdir -p target/test
- @mkdir -p target/report
- GOPATH=$(GOPATH) \
- go test \
- -covermode=atomic \
- -bench=. \
- -race \
- -cpuprofile=target/report/cpu.out \
- -memprofile=target/report/mem.out \
- -mutexprofile=target/report/mutex.out \
- -coverprofile=target/report/coverage.out \
- -v ./... | \
- tee >(PATH=$(GOPATH)/bin:$(PATH) go-junit-report > target/test/report.xml); \
- test $${PIPESTATUS[0]} -eq 0
-
-# Format the source code
-format:
- @find . -type f -name "*.go" -exec gofmt -s -w {} \;
-
-# Check if the source code has been formatted
-fmtcheck:
- @mkdir -p target
- @find . -type f -name "*.go" -exec gofmt -s -d {} \; | tee target/format.diff
- @test ! -s target/format.diff || { echo "ERROR: the source code has not been formatted - please use 'make format' or 'gofmt'"; exit 1; }
-
-# Check for syntax errors
-vet:
- GOPATH=$(GOPATH) go vet .
-
-# Check for style errors
-lint:
- GOPATH=$(GOPATH) PATH=$(GOPATH)/bin:$(PATH) golint .
-
-# Generate the coverage report
-coverage:
- @mkdir -p target/report
- GOPATH=$(GOPATH) \
- go tool cover -html=target/report/coverage.out -o target/report/coverage.html
-
-# Report cyclomatic complexity
-cyclo:
- @mkdir -p target/report
- GOPATH=$(GOPATH) gocyclo -avg ./ | tee target/report/cyclo.txt ; test $${PIPESTATUS[0]} -eq 0
-
-# Detect ineffectual assignments
-ineffassign:
- @mkdir -p target/report
- GOPATH=$(GOPATH) ineffassign ./ | tee target/report/ineffassign.txt ; test $${PIPESTATUS[0]} -eq 0
-
-# Detect commonly misspelled words in source files
-misspell:
- @mkdir -p target/report
- GOPATH=$(GOPATH) misspell -error ./ | tee target/report/misspell.txt ; test $${PIPESTATUS[0]} -eq 0
-
-# Find unused struct fields
-structcheck:
- @mkdir -p target/report
- GOPATH=$(GOPATH) structcheck -a ./ | tee target/report/structcheck.txt
-
-# Find unused global variables and constants
-varcheck:
- @mkdir -p target/report
- GOPATH=$(GOPATH) varcheck -e ./ | tee target/report/varcheck.txt
-
-# Check that error return values are used
-errcheck:
- @mkdir -p target/report
- GOPATH=$(GOPATH) errcheck ./ | tee target/report/errcheck.txt
-
-# Suggest code simplifications
-gosimple:
- @mkdir -p target/report
- GOPATH=$(GOPATH) gosimple ./ | tee target/report/gosimple.txt
-
-# AST scanner
-astscan:
- @mkdir -p target/report
- GOPATH=$(GOPATH) gas .//*.go | tee target/report/astscan.txt
-
-# Generate source docs
-docs:
- @mkdir -p target/docs
- nohup sh -c 'GOPATH=$(GOPATH) godoc -http=127.0.0.1:6060' > target/godoc_server.log 2>&1 &
- wget --directory-prefix=target/docs/ --execute robots=off --retry-connrefused --recursive --no-parent --adjust-extension --page-requisites --convert-links http://127.0.0.1:6060/pkg/github.com/${VENDOR}/${PROJECT}/ ; kill -9 `lsof -ti :6060`
- @echo ''${PKGNAME}' Documentation ... ' > target/docs/index.html
-
-# Alias to run all quality-assurance checks
-qa: fmtcheck test vet lint coverage cyclo ineffassign misspell structcheck varcheck errcheck gosimple astscan
-
-# --- INSTALL ---
-
-# Get the dependencies
-deps:
- GOPATH=$(GOPATH) go get ./...
- GOPATH=$(GOPATH) go get golang.org/x/lint/golint
- GOPATH=$(GOPATH) go get github.com/jstemmer/go-junit-report
- GOPATH=$(GOPATH) go get github.com/axw/gocov/gocov
- GOPATH=$(GOPATH) go get github.com/fzipp/gocyclo
- GOPATH=$(GOPATH) go get github.com/gordonklaus/ineffassign
- GOPATH=$(GOPATH) go get github.com/client9/misspell/cmd/misspell
- GOPATH=$(GOPATH) go get github.com/opennota/check/cmd/structcheck
- GOPATH=$(GOPATH) go get github.com/opennota/check/cmd/varcheck
- GOPATH=$(GOPATH) go get github.com/kisielk/errcheck
- GOPATH=$(GOPATH) go get honnef.co/go/tools/cmd/gosimple
- GOPATH=$(GOPATH) go get github.com/GoASTScanner/gas
-
-# Remove any build artifact
-clean:
- GOPATH=$(GOPATH) go clean ./...
-
-# Deletes any intermediate file
-nuke:
- rm -rf ./target
- GOPATH=$(GOPATH) go clean -i ./...
diff --git a/vendor/github.com/dgryski/go-farm/README.md b/vendor/github.com/dgryski/go-farm/README.md
deleted file mode 100644
index dd07d6f9..00000000
--- a/vendor/github.com/dgryski/go-farm/README.md
+++ /dev/null
@@ -1,41 +0,0 @@
-# go-farm
-
-*Google's FarmHash hash functions implemented in Go*
-
-[![Master Branch](https://img.shields.io/badge/-master:-gray.svg)](https://github.com/dgryski/go-farm/tree/master)
-[![Master Build Status](https://secure.travis-ci.org/dgryski/go-farm.png?branch=master)](https://travis-ci.org/dgryski/go-farm?branch=master)
-[![Master Coverage Status](https://coveralls.io/repos/dgryski/go-farm/badge.svg?branch=master&service=github)](https://coveralls.io/github/dgryski/go-farm?branch=master)
-[![Go Report Card](https://goreportcard.com/badge/github.com/dgryski/go-farm)](https://goreportcard.com/report/github.com/dgryski/go-farm)
-[![GoDoc](https://godoc.org/github.com/dgryski/go-farm?status.svg)](http://godoc.org/github.com/dgryski/go-farm)
-
-## Description
-
-FarmHash, a family of hash functions.
-
-This is a (mechanical) translation of the non-SSE4/non-AESNI hash functions from Google's FarmHash (https://github.com/google/farmhash).
-
-
-FarmHash provides hash functions for strings and other data.
-The functions mix the input bits thoroughly but are not suitable for cryptography.
-
-All members of the FarmHash family were designed with heavy reliance on previous work by Jyrki Alakuijala, Austin Appleby, Bob Jenkins, and others.
-
-For more information please consult https://github.com/google/farmhash
-
-
-## Getting started
-
-This application is written in Go language, please refer to the guides in https://golang.org for getting started.
-
-This project include a Makefile that allows you to test and build the project with simple commands.
-To see all available options:
-```bash
-make help
-```
-
-## Running all tests
-
-Before committing the code, please check if it passes all tests using
-```bash
-make qa
-```
diff --git a/vendor/github.com/dgryski/go-farm/VERSION b/vendor/github.com/dgryski/go-farm/VERSION
deleted file mode 100644
index 38f77a65..00000000
--- a/vendor/github.com/dgryski/go-farm/VERSION
+++ /dev/null
@@ -1 +0,0 @@
-2.0.1
diff --git a/vendor/github.com/dgryski/go-farm/basics.go b/vendor/github.com/dgryski/go-farm/basics.go
deleted file mode 100644
index ec7076c0..00000000
--- a/vendor/github.com/dgryski/go-farm/basics.go
+++ /dev/null
@@ -1,32 +0,0 @@
-package farm
-
-import "math/bits"
-
-// Some primes between 2^63 and 2^64 for various uses.
-const k0 uint64 = 0xc3a5c85c97cb3127
-const k1 uint64 = 0xb492b66fbe98f273
-const k2 uint64 = 0x9ae16a3b2f90404f
-
-// Magic numbers for 32-bit hashing. Copied from Murmur3.
-const c1 uint32 = 0xcc9e2d51
-const c2 uint32 = 0x1b873593
-
-// A 32-bit to 32-bit integer hash copied from Murmur3.
-func fmix(h uint32) uint32 {
- h ^= h >> 16
- h *= 0x85ebca6b
- h ^= h >> 13
- h *= 0xc2b2ae35
- h ^= h >> 16
- return h
-}
-
-func mur(a, h uint32) uint32 {
- // Helper from Murmur3 for combining two 32-bit values.
- a *= c1
- a = bits.RotateLeft32(a, -17)
- a *= c2
- h ^= a
- h = bits.RotateLeft32(h, -19)
- return h*5 + 0xe6546b64
-}
diff --git a/vendor/github.com/dgryski/go-farm/farmhashcc.go b/vendor/github.com/dgryski/go-farm/farmhashcc.go
deleted file mode 100644
index 3e68ae3a..00000000
--- a/vendor/github.com/dgryski/go-farm/farmhashcc.go
+++ /dev/null
@@ -1,194 +0,0 @@
-package farm
-
-import (
- "encoding/binary"
- "math/bits"
-)
-
-// This file provides a 32-bit hash equivalent to CityHash32 (v1.1.1)
-// and a 128-bit hash equivalent to CityHash128 (v1.1.1). It also provides
-// a seeded 32-bit hash function similar to CityHash32.
-
-func hash32Len13to24Seed(s []byte, seed uint32) uint32 {
- slen := len(s)
- a := binary.LittleEndian.Uint32(s[-4+(slen>>1) : -4+(slen>>1)+4])
- b := binary.LittleEndian.Uint32(s[4 : 4+4])
- c := binary.LittleEndian.Uint32(s[slen-8 : slen-8+4])
- d := binary.LittleEndian.Uint32(s[(slen >> 1) : (slen>>1)+4])
- e := binary.LittleEndian.Uint32(s[0 : 0+4])
- f := binary.LittleEndian.Uint32(s[slen-4 : slen-4+4])
- h := d*c1 + uint32(slen) + seed
- a = bits.RotateLeft32(a, -12) + f
- h = mur(c, h) + a
- a = bits.RotateLeft32(a, -3) + c
- h = mur(e, h) + a
- a = bits.RotateLeft32(a+f, -12) + d
- h = mur(b^seed, h) + a
- return fmix(h)
-}
-
-func hash32Len0to4(s []byte, seed uint32) uint32 {
- slen := len(s)
- b := seed
- c := uint32(9)
- for i := 0; i < slen; i++ {
- v := int8(s[i])
- b = (b * c1) + uint32(v)
- c ^= b
- }
- return fmix(mur(b, mur(uint32(slen), c)))
-}
-
-func hash128to64(x uint128) uint64 {
- // Murmur-inspired hashing.
- const mul uint64 = 0x9ddfea08eb382d69
- a := (x.lo ^ x.hi) * mul
- a ^= (a >> 47)
- b := (x.hi ^ a) * mul
- b ^= (b >> 47)
- b *= mul
- return b
-}
-
-type uint128 struct {
- lo uint64
- hi uint64
-}
-
-// A subroutine for CityHash128(). Returns a decent 128-bit hash for strings
-// of any length representable in signed long. Based on City and Murmur.
-func cityMurmur(s []byte, seed uint128) uint128 {
- slen := len(s)
- a := seed.lo
- b := seed.hi
- var c uint64
- var d uint64
- l := slen - 16
- if l <= 0 { // len <= 16
- a = shiftMix(a*k1) * k1
- c = b*k1 + hashLen0to16(s)
- if slen >= 8 {
- d = shiftMix(a + binary.LittleEndian.Uint64(s[0:0+8]))
- } else {
- d = shiftMix(a + c)
- }
- } else { // len > 16
- c = hashLen16(binary.LittleEndian.Uint64(s[slen-8:slen-8+8])+k1, a)
- d = hashLen16(b+uint64(slen), c+binary.LittleEndian.Uint64(s[slen-16:slen-16+8]))
- a += d
- for {
- a ^= shiftMix(binary.LittleEndian.Uint64(s[0:0+8])*k1) * k1
- a *= k1
- b ^= a
- c ^= shiftMix(binary.LittleEndian.Uint64(s[8:8+8])*k1) * k1
- c *= k1
- d ^= c
- s = s[16:]
- l -= 16
- if l <= 0 {
- break
- }
- }
- }
- a = hashLen16(a, c)
- b = hashLen16(d, b)
- return uint128{a ^ b, hashLen16(b, a)}
-}
-
-func cityHash128WithSeed(s []byte, seed uint128) uint128 {
- slen := len(s)
- if slen < 128 {
- return cityMurmur(s, seed)
- }
-
- endIdx := ((slen - 1) / 128) * 128
- lastBlockIdx := endIdx + ((slen - 1) & 127) - 127
- last := s[lastBlockIdx:]
-
- // We expect len >= 128 to be the common case. Keep 56 bytes of state:
- // v, w, x, y, and z.
- var v1, v2 uint64
- var w1, w2 uint64
- x := seed.lo
- y := seed.hi
- z := uint64(slen) * k1
- v1 = bits.RotateLeft64(y^k1, -49)*k1 + binary.LittleEndian.Uint64(s[0:0+8])
- v2 = bits.RotateLeft64(v1, -42)*k1 + binary.LittleEndian.Uint64(s[8:8+8])
- w1 = bits.RotateLeft64(y+z, -35)*k1 + x
- w2 = bits.RotateLeft64(x+binary.LittleEndian.Uint64(s[88:88+8]), -53) * k1
-
- // This is the same inner loop as CityHash64(), manually unrolled.
- for {
- x = bits.RotateLeft64(x+y+v1+binary.LittleEndian.Uint64(s[8:8+8]), -37) * k1
- y = bits.RotateLeft64(y+v2+binary.LittleEndian.Uint64(s[48:48+8]), -42) * k1
- x ^= w2
- y += v1 + binary.LittleEndian.Uint64(s[40:40+8])
- z = bits.RotateLeft64(z+w1, -33) * k1
- v1, v2 = weakHashLen32WithSeeds(s, v2*k1, x+w1)
- w1, w2 = weakHashLen32WithSeeds(s[32:], z+w2, y+binary.LittleEndian.Uint64(s[16:16+8]))
- z, x = x, z
- s = s[64:]
- x = bits.RotateLeft64(x+y+v1+binary.LittleEndian.Uint64(s[8:8+8]), -37) * k1
- y = bits.RotateLeft64(y+v2+binary.LittleEndian.Uint64(s[48:48+8]), -42) * k1
- x ^= w2
- y += v1 + binary.LittleEndian.Uint64(s[40:40+8])
- z = bits.RotateLeft64(z+w1, -33) * k1
- v1, v2 = weakHashLen32WithSeeds(s, v2*k1, x+w1)
- w1, w2 = weakHashLen32WithSeeds(s[32:], z+w2, y+binary.LittleEndian.Uint64(s[16:16+8]))
- z, x = x, z
- s = s[64:]
- slen -= 128
- if slen < 128 {
- break
- }
- }
- x += bits.RotateLeft64(v1+z, -49) * k0
- y = y*k0 + bits.RotateLeft64(w2, -37)
- z = z*k0 + bits.RotateLeft64(w1, -27)
- w1 *= 9
- v1 *= k0
- // If 0 < len < 128, hash up to 4 chunks of 32 bytes each from the end of s.
- for tailDone := 0; tailDone < slen; {
- tailDone += 32
- y = bits.RotateLeft64(x+y, -42)*k0 + v2
- w1 += binary.LittleEndian.Uint64(last[128-tailDone+16 : 128-tailDone+16+8])
- x = x*k0 + w1
- z += w2 + binary.LittleEndian.Uint64(last[128-tailDone:128-tailDone+8])
- w2 += v1
- v1, v2 = weakHashLen32WithSeeds(last[128-tailDone:], v1+z, v2)
- v1 *= k0
- }
-
- // At this point our 56 bytes of state should contain more than
- // enough information for a strong 128-bit hash. We use two
- // different 56-byte-to-8-byte hashes to get a 16-byte final result.
- x = hashLen16(x, v1)
- y = hashLen16(y+z, w1)
- return uint128{hashLen16(x+v2, w2) + y,
- hashLen16(x+w2, y+v2)}
-}
-
-func cityHash128(s []byte) uint128 {
- slen := len(s)
- if slen >= 16 {
- return cityHash128WithSeed(s[16:], uint128{binary.LittleEndian.Uint64(s[0 : 0+8]), binary.LittleEndian.Uint64(s[8:8+8]) + k0})
- }
- return cityHash128WithSeed(s, uint128{k0, k1})
-}
-
-// Fingerprint128 is a 128-bit fingerprint function for byte-slices
-func Fingerprint128(s []byte) (lo, hi uint64) {
- h := cityHash128(s)
- return h.lo, h.hi
-}
-
-// Hash128 is a 128-bit hash function for byte-slices
-func Hash128(s []byte) (lo, hi uint64) {
- return Fingerprint128(s)
-}
-
-// Hash128WithSeed is a 128-bit hash function for byte-slices and a 128-bit seed
-func Hash128WithSeed(s []byte, seed0, seed1 uint64) (lo, hi uint64) {
- h := cityHash128WithSeed(s, uint128{seed0, seed1})
- return h.lo, h.hi
-}
diff --git a/vendor/github.com/dgryski/go-farm/farmhashmk.go b/vendor/github.com/dgryski/go-farm/farmhashmk.go
deleted file mode 100644
index 8e4c7428..00000000
--- a/vendor/github.com/dgryski/go-farm/farmhashmk.go
+++ /dev/null
@@ -1,102 +0,0 @@
-package farm
-
-import (
- "encoding/binary"
- "math/bits"
-)
-
-func hash32Len5to12(s []byte, seed uint32) uint32 {
- slen := len(s)
- a := uint32(len(s))
- b := uint32(len(s) * 5)
- c := uint32(9)
- d := b + seed
- a += binary.LittleEndian.Uint32(s[0 : 0+4])
- b += binary.LittleEndian.Uint32(s[slen-4 : slen-4+4])
- c += binary.LittleEndian.Uint32(s[((slen >> 1) & 4) : ((slen>>1)&4)+4])
- return fmix(seed ^ mur(c, mur(b, mur(a, d))))
-}
-
-// Hash32 hashes a byte slice and returns a uint32 hash value
-func Hash32(s []byte) uint32 {
-
- slen := len(s)
-
- if slen <= 24 {
- if slen <= 12 {
- if slen <= 4 {
- return hash32Len0to4(s, 0)
- }
- return hash32Len5to12(s, 0)
- }
- return hash32Len13to24Seed(s, 0)
- }
-
- // len > 24
- h := uint32(slen)
- g := c1 * uint32(slen)
- f := g
- a0 := bits.RotateLeft32(binary.LittleEndian.Uint32(s[slen-4:slen-4+4])*c1, -17) * c2
- a1 := bits.RotateLeft32(binary.LittleEndian.Uint32(s[slen-8:slen-8+4])*c1, -17) * c2
- a2 := bits.RotateLeft32(binary.LittleEndian.Uint32(s[slen-16:slen-16+4])*c1, -17) * c2
- a3 := bits.RotateLeft32(binary.LittleEndian.Uint32(s[slen-12:slen-12+4])*c1, -17) * c2
- a4 := bits.RotateLeft32(binary.LittleEndian.Uint32(s[slen-20:slen-20+4])*c1, -17) * c2
- h ^= a0
- h = bits.RotateLeft32(h, -19)
- h = h*5 + 0xe6546b64
- h ^= a2
- h = bits.RotateLeft32(h, -19)
- h = h*5 + 0xe6546b64
- g ^= a1
- g = bits.RotateLeft32(g, -19)
- g = g*5 + 0xe6546b64
- g ^= a3
- g = bits.RotateLeft32(g, -19)
- g = g*5 + 0xe6546b64
- f += a4
- f = bits.RotateLeft32(f, -19) + 113
- for len(s) > 20 {
- a := binary.LittleEndian.Uint32(s[0 : 0+4])
- b := binary.LittleEndian.Uint32(s[4 : 4+4])
- c := binary.LittleEndian.Uint32(s[8 : 8+4])
- d := binary.LittleEndian.Uint32(s[12 : 12+4])
- e := binary.LittleEndian.Uint32(s[16 : 16+4])
- h += a
- g += b
- f += c
- h = mur(d, h) + e
- g = mur(c, g) + a
- f = mur(b+e*c1, f) + d
- f += g
- g += f
- s = s[20:]
- }
- g = bits.RotateLeft32(g, -11) * c1
- g = bits.RotateLeft32(g, -17) * c1
- f = bits.RotateLeft32(f, -11) * c1
- f = bits.RotateLeft32(f, -17) * c1
- h = bits.RotateLeft32(h+g, -19)
- h = h*5 + 0xe6546b64
- h = bits.RotateLeft32(h, -17) * c1
- h = bits.RotateLeft32(h+f, -19)
- h = h*5 + 0xe6546b64
- h = bits.RotateLeft32(h, -17) * c1
- return h
-}
-
-// Hash32WithSeed hashes a byte slice and a uint32 seed and returns a uint32 hash value
-func Hash32WithSeed(s []byte, seed uint32) uint32 {
- slen := len(s)
-
- if slen <= 24 {
- if slen >= 13 {
- return hash32Len13to24Seed(s, seed*c1)
- }
- if slen >= 5 {
- return hash32Len5to12(s, seed)
- }
- return hash32Len0to4(s, seed)
- }
- h := hash32Len13to24Seed(s[:24], seed^uint32(slen))
- return mur(Hash32(s[24:])+seed, h)
-}
diff --git a/vendor/github.com/dgryski/go-farm/farmhashna.go b/vendor/github.com/dgryski/go-farm/farmhashna.go
deleted file mode 100644
index ac62edd3..00000000
--- a/vendor/github.com/dgryski/go-farm/farmhashna.go
+++ /dev/null
@@ -1,161 +0,0 @@
-package farm
-
-import (
- "encoding/binary"
- "math/bits"
-)
-
-func shiftMix(val uint64) uint64 {
- return val ^ (val >> 47)
-}
-
-func hashLen16(u, v uint64) uint64 {
- return hash128to64(uint128{u, v})
-}
-
-func hashLen16Mul(u, v, mul uint64) uint64 {
- // Murmur-inspired hashing.
- a := (u ^ v) * mul
- a ^= (a >> 47)
- b := (v ^ a) * mul
- b ^= (b >> 47)
- b *= mul
- return b
-}
-
-func hashLen0to16(s []byte) uint64 {
- slen := uint64(len(s))
- if slen >= 8 {
- mul := k2 + slen*2
- a := binary.LittleEndian.Uint64(s[0:0+8]) + k2
- b := binary.LittleEndian.Uint64(s[int(slen-8) : int(slen-8)+8])
- c := bits.RotateLeft64(b, -37)*mul + a
- d := (bits.RotateLeft64(a, -25) + b) * mul
- return hashLen16Mul(c, d, mul)
- }
-
- if slen >= 4 {
- mul := k2 + slen*2
- a := binary.LittleEndian.Uint32(s[0 : 0+4])
- return hashLen16Mul(slen+(uint64(a)<<3), uint64(binary.LittleEndian.Uint32(s[int(slen-4):int(slen-4)+4])), mul)
- }
- if slen > 0 {
- a := s[0]
- b := s[slen>>1]
- c := s[slen-1]
- y := uint32(a) + (uint32(b) << 8)
- z := uint32(slen) + (uint32(c) << 2)
- return shiftMix(uint64(y)*k2^uint64(z)*k0) * k2
- }
- return k2
-}
-
-// This probably works well for 16-byte strings as well, but it may be overkill
-// in that case.
-func hashLen17to32(s []byte) uint64 {
- slen := len(s)
- mul := k2 + uint64(slen*2)
- a := binary.LittleEndian.Uint64(s[0:0+8]) * k1
- b := binary.LittleEndian.Uint64(s[8 : 8+8])
- c := binary.LittleEndian.Uint64(s[slen-8:slen-8+8]) * mul
- d := binary.LittleEndian.Uint64(s[slen-16:slen-16+8]) * k2
- return hashLen16Mul(bits.RotateLeft64(a+b, -43)+bits.RotateLeft64(c, -30)+d, a+bits.RotateLeft64(b+k2, -18)+c, mul)
-}
-
-// Return a 16-byte hash for 48 bytes. Quick and dirty.
-// Callers do best to use "random-looking" values for a and b.
-func weakHashLen32WithSeedsWords(w, x, y, z, a, b uint64) (uint64, uint64) {
- a += w
- b = bits.RotateLeft64(b+a+z, -21)
- c := a
- a += x
- a += y
- b += bits.RotateLeft64(a, -44)
- return a + z, b + c
-}
-
-// Return a 16-byte hash for s[0] ... s[31], a, and b. Quick and dirty.
-func weakHashLen32WithSeeds(s []byte, a, b uint64) (uint64, uint64) {
- return weakHashLen32WithSeedsWords(binary.LittleEndian.Uint64(s[0:0+8]),
- binary.LittleEndian.Uint64(s[8:8+8]),
- binary.LittleEndian.Uint64(s[16:16+8]),
- binary.LittleEndian.Uint64(s[24:24+8]),
- a,
- b)
-}
-
-// Return an 8-byte hash for 33 to 64 bytes.
-func hashLen33to64(s []byte) uint64 {
- slen := len(s)
- mul := k2 + uint64(slen)*2
- a := binary.LittleEndian.Uint64(s[0:0+8]) * k2
- b := binary.LittleEndian.Uint64(s[8 : 8+8])
- c := binary.LittleEndian.Uint64(s[slen-8:slen-8+8]) * mul
- d := binary.LittleEndian.Uint64(s[slen-16:slen-16+8]) * k2
- y := bits.RotateLeft64(a+b, -43) + bits.RotateLeft64(c, -30) + d
- z := hashLen16Mul(y, a+bits.RotateLeft64(b+k2, -18)+c, mul)
- e := binary.LittleEndian.Uint64(s[16:16+8]) * mul
- f := binary.LittleEndian.Uint64(s[24 : 24+8])
- g := (y + binary.LittleEndian.Uint64(s[slen-32:slen-32+8])) * mul
- h := (z + binary.LittleEndian.Uint64(s[slen-24:slen-24+8])) * mul
- return hashLen16Mul(bits.RotateLeft64(e+f, -43)+bits.RotateLeft64(g, -30)+h, e+bits.RotateLeft64(f+a, -18)+g, mul)
-}
-
-func naHash64(s []byte) uint64 {
- slen := len(s)
- var seed uint64 = 81
- if slen <= 32 {
- if slen <= 16 {
- return hashLen0to16(s)
- }
- return hashLen17to32(s)
- }
- if slen <= 64 {
- return hashLen33to64(s)
- }
- // For strings over 64 bytes we loop.
- // Internal state consists of 56 bytes: v, w, x, y, and z.
- v := uint128{0, 0}
- w := uint128{0, 0}
- x := seed*k2 + binary.LittleEndian.Uint64(s[0:0+8])
- y := seed*k1 + 113
- z := shiftMix(y*k2+113) * k2
- // Set end so that after the loop we have 1 to 64 bytes left to process.
- endIdx := ((slen - 1) / 64) * 64
- last64Idx := endIdx + ((slen - 1) & 63) - 63
- last64 := s[last64Idx:]
- for len(s) > 64 {
- x = bits.RotateLeft64(x+y+v.lo+binary.LittleEndian.Uint64(s[8:8+8]), -37) * k1
- y = bits.RotateLeft64(y+v.hi+binary.LittleEndian.Uint64(s[48:48+8]), -42) * k1
- x ^= w.hi
- y += v.lo + binary.LittleEndian.Uint64(s[40:40+8])
- z = bits.RotateLeft64(z+w.lo, -33) * k1
- v.lo, v.hi = weakHashLen32WithSeeds(s, v.hi*k1, x+w.lo)
- w.lo, w.hi = weakHashLen32WithSeeds(s[32:], z+w.hi, y+binary.LittleEndian.Uint64(s[16:16+8]))
- x, z = z, x
- s = s[64:]
- }
- mul := k1 + ((z & 0xff) << 1)
- // Make s point to the last 64 bytes of input.
- s = last64
- w.lo += (uint64(slen-1) & 63)
- v.lo += w.lo
- w.lo += v.lo
- x = bits.RotateLeft64(x+y+v.lo+binary.LittleEndian.Uint64(s[8:8+8]), -37) * mul
- y = bits.RotateLeft64(y+v.hi+binary.LittleEndian.Uint64(s[48:48+8]), -42) * mul
- x ^= w.hi * 9
- y += v.lo*9 + binary.LittleEndian.Uint64(s[40:40+8])
- z = bits.RotateLeft64(z+w.lo, -33) * mul
- v.lo, v.hi = weakHashLen32WithSeeds(s, v.hi*mul, x+w.lo)
- w.lo, w.hi = weakHashLen32WithSeeds(s[32:], z+w.hi, y+binary.LittleEndian.Uint64(s[16:16+8]))
- x, z = z, x
- return hashLen16Mul(hashLen16Mul(v.lo, w.lo, mul)+shiftMix(y)*k0+z, hashLen16Mul(v.hi, w.hi, mul)+x, mul)
-}
-
-func naHash64WithSeed(s []byte, seed uint64) uint64 {
- return naHash64WithSeeds(s, k2, seed)
-}
-
-func naHash64WithSeeds(s []byte, seed0, seed1 uint64) uint64 {
- return hashLen16(naHash64(s)-seed0, seed1)
-}
diff --git a/vendor/github.com/dgryski/go-farm/farmhashuo.go b/vendor/github.com/dgryski/go-farm/farmhashuo.go
deleted file mode 100644
index 474b74e0..00000000
--- a/vendor/github.com/dgryski/go-farm/farmhashuo.go
+++ /dev/null
@@ -1,122 +0,0 @@
-package farm
-
-import (
- "encoding/binary"
- "math/bits"
-)
-
-func uoH(x, y, mul uint64, r uint) uint64 {
- a := (x ^ y) * mul
- a ^= (a >> 47)
- b := (y ^ a) * mul
- return bits.RotateLeft64(b, -int(r)) * mul
-}
-
-// Hash64WithSeeds hashes a byte slice and two uint64 seeds and returns a uint64 hash value
-func Hash64WithSeeds(s []byte, seed0, seed1 uint64) uint64 {
- slen := len(s)
- if slen <= 64 {
- return naHash64WithSeeds(s, seed0, seed1)
- }
-
- // For strings over 64 bytes we loop.
- // Internal state consists of 64 bytes: u, v, w, x, y, and z.
- x := seed0
- y := seed1*k2 + 113
- z := shiftMix(y*k2) * k2
- v := uint128{seed0, seed1}
- var w uint128
- u := x - z
- x *= k2
- mul := k2 + (u & 0x82)
-
- // Set end so that after the loop we have 1 to 64 bytes left to process.
- endIdx := ((slen - 1) / 64) * 64
- last64Idx := endIdx + ((slen - 1) & 63) - 63
- last64 := s[last64Idx:]
-
- for len(s) > 64 {
- a0 := binary.LittleEndian.Uint64(s[0 : 0+8])
- a1 := binary.LittleEndian.Uint64(s[8 : 8+8])
- a2 := binary.LittleEndian.Uint64(s[16 : 16+8])
- a3 := binary.LittleEndian.Uint64(s[24 : 24+8])
- a4 := binary.LittleEndian.Uint64(s[32 : 32+8])
- a5 := binary.LittleEndian.Uint64(s[40 : 40+8])
- a6 := binary.LittleEndian.Uint64(s[48 : 48+8])
- a7 := binary.LittleEndian.Uint64(s[56 : 56+8])
- x += a0 + a1
- y += a2
- z += a3
- v.lo += a4
- v.hi += a5 + a1
- w.lo += a6
- w.hi += a7
-
- x = bits.RotateLeft64(x, -26)
- x *= 9
- y = bits.RotateLeft64(y, -29)
- z *= mul
- v.lo = bits.RotateLeft64(v.lo, -33)
- v.hi = bits.RotateLeft64(v.hi, -30)
- w.lo ^= x
- w.lo *= 9
- z = bits.RotateLeft64(z, -32)
- z += w.hi
- w.hi += z
- z *= 9
- u, y = y, u
-
- z += a0 + a6
- v.lo += a2
- v.hi += a3
- w.lo += a4
- w.hi += a5 + a6
- x += a1
- y += a7
-
- y += v.lo
- v.lo += x - y
- v.hi += w.lo
- w.lo += v.hi
- w.hi += x - y
- x += w.hi
- w.hi = bits.RotateLeft64(w.hi, -34)
- u, z = z, u
- s = s[64:]
- }
- // Make s point to the last 64 bytes of input.
- s = last64
- u *= 9
- v.hi = bits.RotateLeft64(v.hi, -28)
- v.lo = bits.RotateLeft64(v.lo, -20)
- w.lo += (uint64(slen-1) & 63)
- u += y
- y += u
- x = bits.RotateLeft64(y-x+v.lo+binary.LittleEndian.Uint64(s[8:8+8]), -37) * mul
- y = bits.RotateLeft64(y^v.hi^binary.LittleEndian.Uint64(s[48:48+8]), -42) * mul
- x ^= w.hi * 9
- y += v.lo + binary.LittleEndian.Uint64(s[40:40+8])
- z = bits.RotateLeft64(z+w.lo, -33) * mul
- v.lo, v.hi = weakHashLen32WithSeeds(s, v.hi*mul, x+w.lo)
- w.lo, w.hi = weakHashLen32WithSeeds(s[32:], z+w.hi, y+binary.LittleEndian.Uint64(s[16:16+8]))
- return uoH(hashLen16Mul(v.lo+x, w.lo^y, mul)+z-u,
- uoH(v.hi+y, w.hi+z, k2, 30)^x,
- k2,
- 31)
-}
-
-// Hash64WithSeed hashes a byte slice and a uint64 seed and returns a uint64 hash value
-func Hash64WithSeed(s []byte, seed uint64) uint64 {
- if len(s) <= 64 {
- return naHash64WithSeed(s, seed)
- }
- return Hash64WithSeeds(s, 0, seed)
-}
-
-// Hash64 hashes a byte slice and returns a uint64 hash value
-func Hash64(s []byte) uint64 {
- if len(s) <= 64 {
- return naHash64(s)
- }
- return Hash64WithSeeds(s, 81, 0)
-}
diff --git a/vendor/github.com/dgryski/go-farm/fp_amd64.s b/vendor/github.com/dgryski/go-farm/fp_amd64.s
deleted file mode 100644
index 2b8fa324..00000000
--- a/vendor/github.com/dgryski/go-farm/fp_amd64.s
+++ /dev/null
@@ -1,951 +0,0 @@
-// Code generated by command: go run asm.go -out=fp_amd64.s -go111=false. DO NOT EDIT.
-
-// +build amd64,!purego
-
-#include "textflag.h"
-
-// func Fingerprint64(s []byte) uint64
-TEXT ·Fingerprint64(SB), NOSPLIT, $0-32
- MOVQ s_base+0(FP), CX
- MOVQ s_len+8(FP), AX
- CMPQ AX, $0x10
- JG check32
- CMPQ AX, $0x08
- JL check4
- MOVQ (CX), DX
- MOVQ AX, BX
- SUBQ $0x08, BX
- ADDQ CX, BX
- MOVQ (BX), BX
- MOVQ $0x9ae16a3b2f90404f, BP
- ADDQ BP, DX
- SHLQ $0x01, AX
- ADDQ BP, AX
- MOVQ BX, BP
- RORQ $0x25, BP
- IMULQ AX, BP
- ADDQ DX, BP
- RORQ $0x19, DX
- ADDQ BX, DX
- IMULQ AX, DX
- XORQ DX, BP
- IMULQ AX, BP
- MOVQ BP, BX
- SHRQ $0x2f, BX
- XORQ BP, BX
- XORQ BX, DX
- IMULQ AX, DX
- MOVQ DX, BX
- SHRQ $0x2f, BX
- XORQ DX, BX
- IMULQ AX, BX
- MOVQ BX, ret+24(FP)
- RET
-
-check4:
- CMPQ AX, $0x04
- JL check0
- MOVQ $0x9ae16a3b2f90404f, DX
- MOVQ AX, BX
- SHLQ $0x01, BX
- ADDQ DX, BX
- MOVL (CX), SI
- SHLQ $0x03, SI
- ADDQ AX, SI
- SUBQ $0x04, AX
- ADDQ AX, CX
- MOVL (CX), DI
- XORQ DI, SI
- IMULQ BX, SI
- MOVQ SI, DX
- SHRQ $0x2f, DX
- XORQ SI, DX
- XORQ DX, DI
- IMULQ BX, DI
- MOVQ DI, DX
- SHRQ $0x2f, DX
- XORQ DI, DX
- IMULQ BX, DX
- MOVQ DX, ret+24(FP)
- RET
-
-check0:
- TESTQ AX, AX
- JZ empty
- MOVBQZX (CX), DX
- MOVQ AX, BX
- SHRQ $0x01, BX
- ADDQ CX, BX
- MOVBQZX (BX), BP
- MOVQ AX, BX
- SUBQ $0x01, BX
- ADDQ CX, BX
- MOVBQZX (BX), BX
- SHLQ $0x08, BP
- ADDQ BP, DX
- SHLQ $0x02, BX
- ADDQ BX, AX
- MOVQ $0xc3a5c85c97cb3127, BX
- IMULQ BX, AX
- MOVQ $0x9ae16a3b2f90404f, BX
- IMULQ BX, DX
- XORQ DX, AX
- MOVQ AX, DX
- SHRQ $0x2f, DX
- XORQ AX, DX
- IMULQ BX, DX
- MOVQ DX, ret+24(FP)
- RET
-
-empty:
- MOVQ $0x9ae16a3b2f90404f, DX
- MOVQ DX, ret+24(FP)
- RET
-
-check32:
- CMPQ AX, $0x20
- JG check64
- MOVQ AX, DX
- SHLQ $0x01, DX
- MOVQ $0x9ae16a3b2f90404f, BX
- ADDQ BX, DX
- MOVQ (CX), BP
- MOVQ $0xb492b66fbe98f273, SI
- IMULQ SI, BP
- MOVQ 8(CX), SI
- MOVQ AX, DI
- SUBQ $0x10, DI
- ADDQ CX, DI
- MOVQ 8(DI), R12
- IMULQ DX, R12
- MOVQ (DI), DI
- IMULQ BX, DI
- MOVQ BP, R13
- ADDQ SI, R13
- RORQ $0x2b, R13
- ADDQ DI, R13
- MOVQ R12, DI
- RORQ $0x1e, DI
- ADDQ DI, R13
- ADDQ R12, BP
- ADDQ BX, SI
- RORQ $0x12, SI
- ADDQ SI, BP
- XORQ BP, R13
- IMULQ DX, R13
- MOVQ R13, BX
- SHRQ $0x2f, BX
- XORQ R13, BX
- XORQ BX, BP
- IMULQ DX, BP
- MOVQ BP, BX
- SHRQ $0x2f, BX
- XORQ BP, BX
- IMULQ DX, BX
- MOVQ BX, ret+24(FP)
- RET
-
-check64:
- CMPQ AX, $0x40
- JG long
- MOVQ AX, DX
- SHLQ $0x01, DX
- MOVQ $0x9ae16a3b2f90404f, BX
- ADDQ BX, DX
- MOVQ (CX), BP
- IMULQ BX, BP
- MOVQ 8(CX), SI
- MOVQ AX, DI
- SUBQ $0x10, DI
- ADDQ CX, DI
- MOVQ 8(DI), R12
- IMULQ DX, R12
- MOVQ (DI), DI
- IMULQ BX, DI
- MOVQ BP, R13
- ADDQ SI, R13
- RORQ $0x2b, R13
- ADDQ DI, R13
- MOVQ R12, DI
- RORQ $0x1e, DI
- ADDQ DI, R13
- ADDQ BP, R12
- ADDQ BX, SI
- RORQ $0x12, SI
- ADDQ SI, R12
- MOVQ R13, BX
- XORQ R12, BX
- IMULQ DX, BX
- MOVQ BX, SI
- SHRQ $0x2f, SI
- XORQ BX, SI
- XORQ SI, R12
- IMULQ DX, R12
- MOVQ R12, BX
- SHRQ $0x2f, BX
- XORQ R12, BX
- IMULQ DX, BX
- MOVQ 16(CX), SI
- IMULQ DX, SI
- MOVQ 24(CX), DI
- MOVQ AX, R12
- SUBQ $0x20, R12
- ADDQ CX, R12
- MOVQ (R12), R14
- ADDQ R13, R14
- IMULQ DX, R14
- MOVQ 8(R12), R12
- ADDQ BX, R12
- IMULQ DX, R12
- MOVQ SI, BX
- ADDQ DI, BX
- RORQ $0x2b, BX
- ADDQ R12, BX
- MOVQ R14, R12
- RORQ $0x1e, R12
- ADDQ R12, BX
- ADDQ R14, SI
- ADDQ BP, DI
- RORQ $0x12, DI
- ADDQ DI, SI
- XORQ SI, BX
- IMULQ DX, BX
- MOVQ BX, BP
- SHRQ $0x2f, BP
- XORQ BX, BP
- XORQ BP, SI
- IMULQ DX, SI
- MOVQ SI, BX
- SHRQ $0x2f, BX
- XORQ SI, BX
- IMULQ DX, BX
- MOVQ BX, ret+24(FP)
- RET
-
-long:
- XORQ R8, R8
- XORQ R9, R9
- XORQ R10, R10
- XORQ R11, R11
- MOVQ $0x01529cba0ca458ff, DX
- ADDQ (CX), DX
- MOVQ $0x226bb95b4e64b6d4, BX
- MOVQ $0x134a747f856d0526, BP
- MOVQ AX, SI
- SUBQ $0x01, SI
- MOVQ $0xffffffffffffffc0, DI
- ANDQ DI, SI
- MOVQ AX, DI
- SUBQ $0x01, DI
- ANDQ $0x3f, DI
- SUBQ $0x3f, DI
- ADDQ SI, DI
- MOVQ DI, SI
- ADDQ CX, SI
- MOVQ AX, DI
-
-loop:
- MOVQ $0xb492b66fbe98f273, R12
- ADDQ BX, DX
- ADDQ R8, DX
- ADDQ 8(CX), DX
- RORQ $0x25, DX
- IMULQ R12, DX
- ADDQ R9, BX
- ADDQ 48(CX), BX
- RORQ $0x2a, BX
- IMULQ R12, BX
- XORQ R11, DX
- ADDQ R8, BX
- ADDQ 40(CX), BX
- ADDQ R10, BP
- RORQ $0x21, BP
- IMULQ R12, BP
- IMULQ R12, R9
- MOVQ DX, R8
- ADDQ R10, R8
- ADDQ (CX), R9
- ADDQ R9, R8
- ADDQ 24(CX), R8
- RORQ $0x15, R8
- MOVQ R9, R10
- ADDQ 8(CX), R9
- ADDQ 16(CX), R9
- MOVQ R9, R13
- RORQ $0x2c, R13
- ADDQ R13, R8
- ADDQ 24(CX), R9
- ADDQ R10, R8
- XCHGQ R9, R8
- ADDQ BP, R11
- MOVQ BX, R10
- ADDQ 16(CX), R10
- ADDQ 32(CX), R11
- ADDQ R11, R10
- ADDQ 56(CX), R10
- RORQ $0x15, R10
- MOVQ R11, R13
- ADDQ 40(CX), R11
- ADDQ 48(CX), R11
- MOVQ R11, R14
- RORQ $0x2c, R14
- ADDQ R14, R10
- ADDQ 56(CX), R11
- ADDQ R13, R10
- XCHGQ R11, R10
- XCHGQ BP, DX
- ADDQ $0x40, CX
- SUBQ $0x40, DI
- CMPQ DI, $0x40
- JG loop
- MOVQ SI, CX
- MOVQ BP, DI
- ANDQ $0xff, DI
- SHLQ $0x01, DI
- ADDQ R12, DI
- MOVQ SI, CX
- SUBQ $0x01, AX
- ANDQ $0x3f, AX
- ADDQ AX, R10
- ADDQ R10, R8
- ADDQ R8, R10
- ADDQ BX, DX
- ADDQ R8, DX
- ADDQ 8(CX), DX
- RORQ $0x25, DX
- IMULQ DI, DX
- ADDQ R9, BX
- ADDQ 48(CX), BX
- RORQ $0x2a, BX
- IMULQ DI, BX
- MOVQ $0x00000009, AX
- IMULQ R11, AX
- XORQ AX, DX
- MOVQ $0x00000009, AX
- IMULQ R8, AX
- ADDQ AX, BX
- ADDQ 40(CX), BX
- ADDQ R10, BP
- RORQ $0x21, BP
- IMULQ DI, BP
- IMULQ DI, R9
- MOVQ DX, R8
- ADDQ R10, R8
- ADDQ (CX), R9
- ADDQ R9, R8
- ADDQ 24(CX), R8
- RORQ $0x15, R8
- MOVQ R9, AX
- ADDQ 8(CX), R9
- ADDQ 16(CX), R9
- MOVQ R9, SI
- RORQ $0x2c, SI
- ADDQ SI, R8
- ADDQ 24(CX), R9
- ADDQ AX, R8
- XCHGQ R9, R8
- ADDQ BP, R11
- MOVQ BX, R10
- ADDQ 16(CX), R10
- ADDQ 32(CX), R11
- ADDQ R11, R10
- ADDQ 56(CX), R10
- RORQ $0x15, R10
- MOVQ R11, AX
- ADDQ 40(CX), R11
- ADDQ 48(CX), R11
- MOVQ R11, SI
- RORQ $0x2c, SI
- ADDQ SI, R10
- ADDQ 56(CX), R11
- ADDQ AX, R10
- XCHGQ R11, R10
- XCHGQ BP, DX
- XORQ R10, R8
- IMULQ DI, R8
- MOVQ R8, AX
- SHRQ $0x2f, AX
- XORQ R8, AX
- XORQ AX, R10
- IMULQ DI, R10
- MOVQ R10, AX
- SHRQ $0x2f, AX
- XORQ R10, AX
- IMULQ DI, AX
- ADDQ BP, AX
- MOVQ BX, CX
- SHRQ $0x2f, CX
- XORQ BX, CX
- MOVQ $0xc3a5c85c97cb3127, BX
- IMULQ BX, CX
- ADDQ CX, AX
- XORQ R11, R9
- IMULQ DI, R9
- MOVQ R9, CX
- SHRQ $0x2f, CX
- XORQ R9, CX
- XORQ CX, R11
- IMULQ DI, R11
- MOVQ R11, CX
- SHRQ $0x2f, CX
- XORQ R11, CX
- IMULQ DI, CX
- ADDQ DX, CX
- XORQ CX, AX
- IMULQ DI, AX
- MOVQ AX, DX
- SHRQ $0x2f, DX
- XORQ AX, DX
- XORQ DX, CX
- IMULQ DI, CX
- MOVQ CX, AX
- SHRQ $0x2f, AX
- XORQ CX, AX
- IMULQ DI, AX
- MOVQ AX, ret+24(FP)
- RET
-
-// func Fingerprint32(s []byte) uint32
-TEXT ·Fingerprint32(SB), NOSPLIT, $0-28
- MOVQ s_base+0(FP), AX
- MOVQ s_len+8(FP), CX
- CMPQ CX, $0x18
- JG long
- CMPQ CX, $0x0c
- JG hash_13_24
- CMPQ CX, $0x04
- JG hash_5_12
- XORL DX, DX
- MOVL $0x00000009, BX
- TESTQ CX, CX
- JZ done
- MOVQ CX, BP
- MOVL $0xcc9e2d51, DI
- IMULL DI, DX
- MOVBLSX (AX), SI
- ADDL SI, DX
- XORL DX, BX
- SUBQ $0x01, BP
- TESTQ BP, BP
- JZ done
- IMULL DI, DX
- MOVBLSX 1(AX), SI
- ADDL SI, DX
- XORL DX, BX
- SUBQ $0x01, BP
- TESTQ BP, BP
- JZ done
- IMULL DI, DX
- MOVBLSX 2(AX), SI
- ADDL SI, DX
- XORL DX, BX
- SUBQ $0x01, BP
- TESTQ BP, BP
- JZ done
- IMULL DI, DX
- MOVBLSX 3(AX), SI
- ADDL SI, DX
- XORL DX, BX
- SUBQ $0x01, BP
- TESTQ BP, BP
- JZ done
-
-done:
- MOVL CX, BP
- MOVL $0xcc9e2d51, SI
- IMULL SI, BP
- RORL $0x11, BP
- MOVL $0x1b873593, SI
- IMULL SI, BP
- XORL BP, BX
- RORL $0x13, BX
- LEAL (BX)(BX*4), BP
- LEAL 3864292196(BP), BX
- MOVL $0xcc9e2d51, BP
- IMULL BP, DX
- RORL $0x11, DX
- MOVL $0x1b873593, BP
- IMULL BP, DX
- XORL DX, BX
- RORL $0x13, BX
- LEAL (BX)(BX*4), DX
- LEAL 3864292196(DX), BX
- MOVL BX, DX
- SHRL $0x10, DX
- XORL DX, BX
- MOVL $0x85ebca6b, DX
- IMULL DX, BX
- MOVL BX, DX
- SHRL $0x0d, DX
- XORL DX, BX
- MOVL $0xc2b2ae35, DX
- IMULL DX, BX
- MOVL BX, DX
- SHRL $0x10, DX
- XORL DX, BX
- MOVL BX, ret+24(FP)
- RET
-
-hash_5_12:
- MOVL CX, DX
- MOVL DX, BX
- SHLL $0x02, BX
- ADDL DX, BX
- MOVL $0x00000009, BP
- MOVL BX, SI
- ADDL (AX), DX
- MOVQ CX, DI
- SUBQ $0x04, DI
- ADDQ AX, DI
- ADDL (DI), BX
- MOVQ CX, DI
- SHRQ $0x01, DI
- ANDQ $0x04, DI
- ADDQ AX, DI
- ADDL (DI), BP
- MOVL $0xcc9e2d51, DI
- IMULL DI, DX
- RORL $0x11, DX
- MOVL $0x1b873593, DI
- IMULL DI, DX
- XORL DX, SI
- RORL $0x13, SI
- LEAL (SI)(SI*4), DX
- LEAL 3864292196(DX), SI
- MOVL $0xcc9e2d51, DX
- IMULL DX, BX
- RORL $0x11, BX
- MOVL $0x1b873593, DX
- IMULL DX, BX
- XORL BX, SI
- RORL $0x13, SI
- LEAL (SI)(SI*4), BX
- LEAL 3864292196(BX), SI
- MOVL $0xcc9e2d51, DX
- IMULL DX, BP
- RORL $0x11, BP
- MOVL $0x1b873593, DX
- IMULL DX, BP
- XORL BP, SI
- RORL $0x13, SI
- LEAL (SI)(SI*4), BP
- LEAL 3864292196(BP), SI
- MOVL SI, DX
- SHRL $0x10, DX
- XORL DX, SI
- MOVL $0x85ebca6b, DX
- IMULL DX, SI
- MOVL SI, DX
- SHRL $0x0d, DX
- XORL DX, SI
- MOVL $0xc2b2ae35, DX
- IMULL DX, SI
- MOVL SI, DX
- SHRL $0x10, DX
- XORL DX, SI
- MOVL SI, ret+24(FP)
- RET
-
-hash_13_24:
- MOVQ CX, DX
- SHRQ $0x01, DX
- ADDQ AX, DX
- MOVL -4(DX), BX
- MOVL 4(AX), BP
- MOVQ CX, SI
- ADDQ AX, SI
- MOVL -8(SI), DI
- MOVL (DX), DX
- MOVL (AX), R8
- MOVL -4(SI), SI
- MOVL $0xcc9e2d51, R9
- IMULL DX, R9
- ADDL CX, R9
- RORL $0x0c, BX
- ADDL SI, BX
- MOVL DI, R10
- MOVL $0xcc9e2d51, R11
- IMULL R11, R10
- RORL $0x11, R10
- MOVL $0x1b873593, R11
- IMULL R11, R10
- XORL R10, R9
- RORL $0x13, R9
- LEAL (R9)(R9*4), R10
- LEAL 3864292196(R10), R9
- ADDL BX, R9
- RORL $0x03, BX
- ADDL DI, BX
- MOVL $0xcc9e2d51, DI
- IMULL DI, R8
- RORL $0x11, R8
- MOVL $0x1b873593, DI
- IMULL DI, R8
- XORL R8, R9
- RORL $0x13, R9
- LEAL (R9)(R9*4), R8
- LEAL 3864292196(R8), R9
- ADDL BX, R9
- ADDL SI, BX
- RORL $0x0c, BX
- ADDL DX, BX
- MOVL $0xcc9e2d51, DX
- IMULL DX, BP
- RORL $0x11, BP
- MOVL $0x1b873593, DX
- IMULL DX, BP
- XORL BP, R9
- RORL $0x13, R9
- LEAL (R9)(R9*4), BP
- LEAL 3864292196(BP), R9
- ADDL BX, R9
- MOVL R9, DX
- SHRL $0x10, DX
- XORL DX, R9
- MOVL $0x85ebca6b, DX
- IMULL DX, R9
- MOVL R9, DX
- SHRL $0x0d, DX
- XORL DX, R9
- MOVL $0xc2b2ae35, DX
- IMULL DX, R9
- MOVL R9, DX
- SHRL $0x10, DX
- XORL DX, R9
- MOVL R9, ret+24(FP)
- RET
-
-long:
- MOVL CX, DX
- MOVL $0xcc9e2d51, BX
- IMULL DX, BX
- MOVL BX, BP
- MOVQ CX, SI
- ADDQ AX, SI
- MOVL $0xcc9e2d51, DI
- MOVL $0x1b873593, R8
- MOVL -4(SI), R9
- IMULL DI, R9
- RORL $0x11, R9
- IMULL R8, R9
- XORL R9, DX
- RORL $0x13, DX
- MOVL DX, R9
- SHLL $0x02, R9
- ADDL R9, DX
- ADDL $0xe6546b64, DX
- MOVL -8(SI), R9
- IMULL DI, R9
- RORL $0x11, R9
- IMULL R8, R9
- XORL R9, BX
- RORL $0x13, BX
- MOVL BX, R9
- SHLL $0x02, R9
- ADDL R9, BX
- ADDL $0xe6546b64, BX
- MOVL -16(SI), R9
- IMULL DI, R9
- RORL $0x11, R9
- IMULL R8, R9
- XORL R9, DX
- RORL $0x13, DX
- MOVL DX, R9
- SHLL $0x02, R9
- ADDL R9, DX
- ADDL $0xe6546b64, DX
- MOVL -12(SI), R9
- IMULL DI, R9
- RORL $0x11, R9
- IMULL R8, R9
- XORL R9, BX
- RORL $0x13, BX
- MOVL BX, R9
- SHLL $0x02, R9
- ADDL R9, BX
- ADDL $0xe6546b64, BX
- PREFETCHT0 (AX)
- MOVL -20(SI), SI
- IMULL DI, SI
- RORL $0x11, SI
- IMULL R8, SI
- ADDL SI, BP
- RORL $0x13, BP
- ADDL $0x71, BP
-
-loop80:
- CMPQ CX, $0x64
- JL loop20
- PREFETCHT0 20(AX)
- MOVL (AX), SI
- ADDL SI, DX
- MOVL 4(AX), DI
- ADDL DI, BX
- MOVL 8(AX), R8
- ADDL R8, BP
- MOVL 12(AX), R9
- MOVL R9, R11
- MOVL $0xcc9e2d51, R10
- IMULL R10, R11
- RORL $0x11, R11
- MOVL $0x1b873593, R10
- IMULL R10, R11
- XORL R11, DX
- RORL $0x13, DX
- LEAL (DX)(DX*4), R11
- LEAL 3864292196(R11), DX
- MOVL 16(AX), R10
- ADDL R10, DX
- MOVL R8, R11
- MOVL $0xcc9e2d51, R8
- IMULL R8, R11
- RORL $0x11, R11
- MOVL $0x1b873593, R8
- IMULL R8, R11
- XORL R11, BX
- RORL $0x13, BX
- LEAL (BX)(BX*4), R11
- LEAL 3864292196(R11), BX
- ADDL SI, BX
- MOVL $0xcc9e2d51, SI
- IMULL SI, R10
- MOVL R10, R11
- ADDL DI, R11
- MOVL $0xcc9e2d51, SI
- IMULL SI, R11
- RORL $0x11, R11
- MOVL $0x1b873593, SI
- IMULL SI, R11
- XORL R11, BP
- RORL $0x13, BP
- LEAL (BP)(BP*4), R11
- LEAL 3864292196(R11), BP
- ADDL R9, BP
- ADDL BX, BP
- ADDL BP, BX
- PREFETCHT0 40(AX)
- MOVL 20(AX), SI
- ADDL SI, DX
- MOVL 24(AX), DI
- ADDL DI, BX
- MOVL 28(AX), R8
- ADDL R8, BP
- MOVL 32(AX), R9
- MOVL R9, R11
- MOVL $0xcc9e2d51, R10
- IMULL R10, R11
- RORL $0x11, R11
- MOVL $0x1b873593, R10
- IMULL R10, R11
- XORL R11, DX
- RORL $0x13, DX
- LEAL (DX)(DX*4), R11
- LEAL 3864292196(R11), DX
- MOVL 36(AX), R10
- ADDL R10, DX
- MOVL R8, R11
- MOVL $0xcc9e2d51, R8
- IMULL R8, R11
- RORL $0x11, R11
- MOVL $0x1b873593, R8
- IMULL R8, R11
- XORL R11, BX
- RORL $0x13, BX
- LEAL (BX)(BX*4), R11
- LEAL 3864292196(R11), BX
- ADDL SI, BX
- MOVL $0xcc9e2d51, SI
- IMULL SI, R10
- MOVL R10, R11
- ADDL DI, R11
- MOVL $0xcc9e2d51, SI
- IMULL SI, R11
- RORL $0x11, R11
- MOVL $0x1b873593, SI
- IMULL SI, R11
- XORL R11, BP
- RORL $0x13, BP
- LEAL (BP)(BP*4), R11
- LEAL 3864292196(R11), BP
- ADDL R9, BP
- ADDL BX, BP
- ADDL BP, BX
- PREFETCHT0 60(AX)
- MOVL 40(AX), SI
- ADDL SI, DX
- MOVL 44(AX), DI
- ADDL DI, BX
- MOVL 48(AX), R8
- ADDL R8, BP
- MOVL 52(AX), R9
- MOVL R9, R11
- MOVL $0xcc9e2d51, R10
- IMULL R10, R11
- RORL $0x11, R11
- MOVL $0x1b873593, R10
- IMULL R10, R11
- XORL R11, DX
- RORL $0x13, DX
- LEAL (DX)(DX*4), R11
- LEAL 3864292196(R11), DX
- MOVL 56(AX), R10
- ADDL R10, DX
- MOVL R8, R11
- MOVL $0xcc9e2d51, R8
- IMULL R8, R11
- RORL $0x11, R11
- MOVL $0x1b873593, R8
- IMULL R8, R11
- XORL R11, BX
- RORL $0x13, BX
- LEAL (BX)(BX*4), R11
- LEAL 3864292196(R11), BX
- ADDL SI, BX
- MOVL $0xcc9e2d51, SI
- IMULL SI, R10
- MOVL R10, R11
- ADDL DI, R11
- MOVL $0xcc9e2d51, SI
- IMULL SI, R11
- RORL $0x11, R11
- MOVL $0x1b873593, SI
- IMULL SI, R11
- XORL R11, BP
- RORL $0x13, BP
- LEAL (BP)(BP*4), R11
- LEAL 3864292196(R11), BP
- ADDL R9, BP
- ADDL BX, BP
- ADDL BP, BX
- PREFETCHT0 80(AX)
- MOVL 60(AX), SI
- ADDL SI, DX
- MOVL 64(AX), DI
- ADDL DI, BX
- MOVL 68(AX), R8
- ADDL R8, BP
- MOVL 72(AX), R9
- MOVL R9, R11
- MOVL $0xcc9e2d51, R10
- IMULL R10, R11
- RORL $0x11, R11
- MOVL $0x1b873593, R10
- IMULL R10, R11
- XORL R11, DX
- RORL $0x13, DX
- LEAL (DX)(DX*4), R11
- LEAL 3864292196(R11), DX
- MOVL 76(AX), R10
- ADDL R10, DX
- MOVL R8, R11
- MOVL $0xcc9e2d51, R8
- IMULL R8, R11
- RORL $0x11, R11
- MOVL $0x1b873593, R8
- IMULL R8, R11
- XORL R11, BX
- RORL $0x13, BX
- LEAL (BX)(BX*4), R11
- LEAL 3864292196(R11), BX
- ADDL SI, BX
- MOVL $0xcc9e2d51, SI
- IMULL SI, R10
- MOVL R10, R11
- ADDL DI, R11
- MOVL $0xcc9e2d51, SI
- IMULL SI, R11
- RORL $0x11, R11
- MOVL $0x1b873593, SI
- IMULL SI, R11
- XORL R11, BP
- RORL $0x13, BP
- LEAL (BP)(BP*4), R11
- LEAL 3864292196(R11), BP
- ADDL R9, BP
- ADDL BX, BP
- ADDL BP, BX
- ADDQ $0x50, AX
- SUBQ $0x50, CX
- JMP loop80
-
-loop20:
- CMPQ CX, $0x14
- JLE after
- MOVL (AX), SI
- ADDL SI, DX
- MOVL 4(AX), DI
- ADDL DI, BX
- MOVL 8(AX), R8
- ADDL R8, BP
- MOVL 12(AX), R9
- MOVL R9, R11
- MOVL $0xcc9e2d51, R10
- IMULL R10, R11
- RORL $0x11, R11
- MOVL $0x1b873593, R10
- IMULL R10, R11
- XORL R11, DX
- RORL $0x13, DX
- LEAL (DX)(DX*4), R11
- LEAL 3864292196(R11), DX
- MOVL 16(AX), R10
- ADDL R10, DX
- MOVL R8, R11
- MOVL $0xcc9e2d51, R8
- IMULL R8, R11
- RORL $0x11, R11
- MOVL $0x1b873593, R8
- IMULL R8, R11
- XORL R11, BX
- RORL $0x13, BX
- LEAL (BX)(BX*4), R11
- LEAL 3864292196(R11), BX
- ADDL SI, BX
- MOVL $0xcc9e2d51, SI
- IMULL SI, R10
- MOVL R10, R11
- ADDL DI, R11
- MOVL $0xcc9e2d51, SI
- IMULL SI, R11
- RORL $0x11, R11
- MOVL $0x1b873593, SI
- IMULL SI, R11
- XORL R11, BP
- RORL $0x13, BP
- LEAL (BP)(BP*4), R11
- LEAL 3864292196(R11), BP
- ADDL R9, BP
- ADDL BX, BP
- ADDL BP, BX
- ADDQ $0x14, AX
- SUBQ $0x14, CX
- JMP loop20
-
-after:
- MOVL $0xcc9e2d51, AX
- RORL $0x0b, BX
- IMULL AX, BX
- RORL $0x11, BX
- IMULL AX, BX
- RORL $0x0b, BP
- IMULL AX, BP
- RORL $0x11, BP
- IMULL AX, BP
- ADDL BX, DX
- RORL $0x13, DX
- MOVL DX, CX
- SHLL $0x02, CX
- ADDL CX, DX
- ADDL $0xe6546b64, DX
- RORL $0x11, DX
- IMULL AX, DX
- ADDL BP, DX
- RORL $0x13, DX
- MOVL DX, CX
- SHLL $0x02, CX
- ADDL CX, DX
- ADDL $0xe6546b64, DX
- RORL $0x11, DX
- IMULL AX, DX
- MOVL DX, ret+24(FP)
- RET
diff --git a/vendor/github.com/dgryski/go-farm/fp_generic.go b/vendor/github.com/dgryski/go-farm/fp_generic.go
deleted file mode 100644
index 2cfa1b9d..00000000
--- a/vendor/github.com/dgryski/go-farm/fp_generic.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// +build !amd64 purego
-
-package farm
-
-// Fingerprint64 is a 64-bit fingerprint function for byte-slices
-func Fingerprint64(s []byte) uint64 {
- return naHash64(s)
-}
-
-// Fingerprint32 is a 32-bit fingerprint function for byte-slices
-func Fingerprint32(s []byte) uint32 {
- return Hash32(s)
-}
diff --git a/vendor/github.com/dgryski/go-farm/fp_stub.go b/vendor/github.com/dgryski/go-farm/fp_stub.go
deleted file mode 100644
index 94fff8de..00000000
--- a/vendor/github.com/dgryski/go-farm/fp_stub.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// Code generated by command: go run asm.go -out=fp_amd64.s -stubs=fp_stub.go. DO NOT EDIT.
-
-// +build amd64,!purego
-
-package farm
-
-func Fingerprint64(s []byte) uint64
-
-func Fingerprint32(s []byte) uint32
diff --git a/vendor/github.com/dgryski/go-rendezvous/LICENSE b/vendor/github.com/dgryski/go-rendezvous/LICENSE
deleted file mode 100644
index 22080f73..00000000
--- a/vendor/github.com/dgryski/go-rendezvous/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2017-2020 Damian Gryski
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
diff --git a/vendor/github.com/dgryski/go-rendezvous/rdv.go b/vendor/github.com/dgryski/go-rendezvous/rdv.go
deleted file mode 100644
index 7a6f8203..00000000
--- a/vendor/github.com/dgryski/go-rendezvous/rdv.go
+++ /dev/null
@@ -1,79 +0,0 @@
-package rendezvous
-
-type Rendezvous struct {
- nodes map[string]int
- nstr []string
- nhash []uint64
- hash Hasher
-}
-
-type Hasher func(s string) uint64
-
-func New(nodes []string, hash Hasher) *Rendezvous {
- r := &Rendezvous{
- nodes: make(map[string]int, len(nodes)),
- nstr: make([]string, len(nodes)),
- nhash: make([]uint64, len(nodes)),
- hash: hash,
- }
-
- for i, n := range nodes {
- r.nodes[n] = i
- r.nstr[i] = n
- r.nhash[i] = hash(n)
- }
-
- return r
-}
-
-func (r *Rendezvous) Lookup(k string) string {
- // short-circuit if we're empty
- if len(r.nodes) == 0 {
- return ""
- }
-
- khash := r.hash(k)
-
- var midx int
- var mhash = xorshiftMult64(khash ^ r.nhash[0])
-
- for i, nhash := range r.nhash[1:] {
- if h := xorshiftMult64(khash ^ nhash); h > mhash {
- midx = i + 1
- mhash = h
- }
- }
-
- return r.nstr[midx]
-}
-
-func (r *Rendezvous) Add(node string) {
- r.nodes[node] = len(r.nstr)
- r.nstr = append(r.nstr, node)
- r.nhash = append(r.nhash, r.hash(node))
-}
-
-func (r *Rendezvous) Remove(node string) {
- // find index of node to remove
- nidx := r.nodes[node]
-
- // remove from the slices
- l := len(r.nstr)
- r.nstr[nidx] = r.nstr[l]
- r.nstr = r.nstr[:l]
-
- r.nhash[nidx] = r.nhash[l]
- r.nhash = r.nhash[:l]
-
- // update the map
- delete(r.nodes, node)
- moved := r.nstr[nidx]
- r.nodes[moved] = nidx
-}
-
-func xorshiftMult64(x uint64) uint64 {
- x ^= x >> 12 // a
- x ^= x << 25 // b
- x ^= x >> 27 // c
- return x * 2685821657736338717
-}
diff --git a/vendor/github.com/dustin/go-humanize/.travis.yml b/vendor/github.com/dustin/go-humanize/.travis.yml
deleted file mode 100644
index ba95cdd1..00000000
--- a/vendor/github.com/dustin/go-humanize/.travis.yml
+++ /dev/null
@@ -1,21 +0,0 @@
-sudo: false
-language: go
-go:
- - 1.3.x
- - 1.5.x
- - 1.6.x
- - 1.7.x
- - 1.8.x
- - 1.9.x
- - master
-matrix:
- allow_failures:
- - go: master
- fast_finish: true
-install:
- - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step).
-script:
- - go get -t -v ./...
- - diff -u <(echo -n) <(gofmt -d -s .)
- - go tool vet .
- - go test -v -race ./...
diff --git a/vendor/github.com/dustin/go-humanize/LICENSE b/vendor/github.com/dustin/go-humanize/LICENSE
deleted file mode 100644
index 8d9a94a9..00000000
--- a/vendor/github.com/dustin/go-humanize/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-Copyright (c) 2005-2008 Dustin Sallings
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
-
diff --git a/vendor/github.com/dustin/go-humanize/README.markdown b/vendor/github.com/dustin/go-humanize/README.markdown
deleted file mode 100644
index 91b4ae56..00000000
--- a/vendor/github.com/dustin/go-humanize/README.markdown
+++ /dev/null
@@ -1,124 +0,0 @@
-# Humane Units [![Build Status](https://travis-ci.org/dustin/go-humanize.svg?branch=master)](https://travis-ci.org/dustin/go-humanize) [![GoDoc](https://godoc.org/github.com/dustin/go-humanize?status.svg)](https://godoc.org/github.com/dustin/go-humanize)
-
-Just a few functions for helping humanize times and sizes.
-
-`go get` it as `github.com/dustin/go-humanize`, import it as
-`"github.com/dustin/go-humanize"`, use it as `humanize`.
-
-See [godoc](https://godoc.org/github.com/dustin/go-humanize) for
-complete documentation.
-
-## Sizes
-
-This lets you take numbers like `82854982` and convert them to useful
-strings like, `83 MB` or `79 MiB` (whichever you prefer).
-
-Example:
-
-```go
-fmt.Printf("That file is %s.", humanize.Bytes(82854982)) // That file is 83 MB.
-```
-
-## Times
-
-This lets you take a `time.Time` and spit it out in relative terms.
-For example, `12 seconds ago` or `3 days from now`.
-
-Example:
-
-```go
-fmt.Printf("This was touched %s.", humanize.Time(someTimeInstance)) // This was touched 7 hours ago.
-```
-
-Thanks to Kyle Lemons for the time implementation from an IRC
-conversation one day. It's pretty neat.
-
-## Ordinals
-
-From a [mailing list discussion][odisc] where a user wanted to be able
-to label ordinals.
-
- 0 -> 0th
- 1 -> 1st
- 2 -> 2nd
- 3 -> 3rd
- 4 -> 4th
- [...]
-
-Example:
-
-```go
-fmt.Printf("You're my %s best friend.", humanize.Ordinal(193)) // You are my 193rd best friend.
-```
-
-## Commas
-
-Want to shove commas into numbers? Be my guest.
-
- 0 -> 0
- 100 -> 100
- 1000 -> 1,000
- 1000000000 -> 1,000,000,000
- -100000 -> -100,000
-
-Example:
-
-```go
-fmt.Printf("You owe $%s.\n", humanize.Comma(6582491)) // You owe $6,582,491.
-```
-
-## Ftoa
-
-Nicer float64 formatter that removes trailing zeros.
-
-```go
-fmt.Printf("%f", 2.24) // 2.240000
-fmt.Printf("%s", humanize.Ftoa(2.24)) // 2.24
-fmt.Printf("%f", 2.0) // 2.000000
-fmt.Printf("%s", humanize.Ftoa(2.0)) // 2
-```
-
-## SI notation
-
-Format numbers with [SI notation][sinotation].
-
-Example:
-
-```go
-humanize.SI(0.00000000223, "M") // 2.23 nM
-```
-
-## English-specific functions
-
-The following functions are in the `humanize/english` subpackage.
-
-### Plurals
-
-Simple English pluralization
-
-```go
-english.PluralWord(1, "object", "") // object
-english.PluralWord(42, "object", "") // objects
-english.PluralWord(2, "bus", "") // buses
-english.PluralWord(99, "locus", "loci") // loci
-
-english.Plural(1, "object", "") // 1 object
-english.Plural(42, "object", "") // 42 objects
-english.Plural(2, "bus", "") // 2 buses
-english.Plural(99, "locus", "loci") // 99 loci
-```
-
-### Word series
-
-Format comma-separated words lists with conjuctions:
-
-```go
-english.WordSeries([]string{"foo"}, "and") // foo
-english.WordSeries([]string{"foo", "bar"}, "and") // foo and bar
-english.WordSeries([]string{"foo", "bar", "baz"}, "and") // foo, bar and baz
-
-english.OxfordWordSeries([]string{"foo", "bar", "baz"}, "and") // foo, bar, and baz
-```
-
-[odisc]: https://groups.google.com/d/topic/golang-nuts/l8NhI74jl-4/discussion
-[sinotation]: http://en.wikipedia.org/wiki/Metric_prefix
diff --git a/vendor/github.com/dustin/go-humanize/big.go b/vendor/github.com/dustin/go-humanize/big.go
deleted file mode 100644
index f49dc337..00000000
--- a/vendor/github.com/dustin/go-humanize/big.go
+++ /dev/null
@@ -1,31 +0,0 @@
-package humanize
-
-import (
- "math/big"
-)
-
-// order of magnitude (to a max order)
-func oomm(n, b *big.Int, maxmag int) (float64, int) {
- mag := 0
- m := &big.Int{}
- for n.Cmp(b) >= 0 {
- n.DivMod(n, b, m)
- mag++
- if mag == maxmag && maxmag >= 0 {
- break
- }
- }
- return float64(n.Int64()) + (float64(m.Int64()) / float64(b.Int64())), mag
-}
-
-// total order of magnitude
-// (same as above, but with no upper limit)
-func oom(n, b *big.Int) (float64, int) {
- mag := 0
- m := &big.Int{}
- for n.Cmp(b) >= 0 {
- n.DivMod(n, b, m)
- mag++
- }
- return float64(n.Int64()) + (float64(m.Int64()) / float64(b.Int64())), mag
-}
diff --git a/vendor/github.com/dustin/go-humanize/bigbytes.go b/vendor/github.com/dustin/go-humanize/bigbytes.go
deleted file mode 100644
index 1a2bf617..00000000
--- a/vendor/github.com/dustin/go-humanize/bigbytes.go
+++ /dev/null
@@ -1,173 +0,0 @@
-package humanize
-
-import (
- "fmt"
- "math/big"
- "strings"
- "unicode"
-)
-
-var (
- bigIECExp = big.NewInt(1024)
-
- // BigByte is one byte in bit.Ints
- BigByte = big.NewInt(1)
- // BigKiByte is 1,024 bytes in bit.Ints
- BigKiByte = (&big.Int{}).Mul(BigByte, bigIECExp)
- // BigMiByte is 1,024 k bytes in bit.Ints
- BigMiByte = (&big.Int{}).Mul(BigKiByte, bigIECExp)
- // BigGiByte is 1,024 m bytes in bit.Ints
- BigGiByte = (&big.Int{}).Mul(BigMiByte, bigIECExp)
- // BigTiByte is 1,024 g bytes in bit.Ints
- BigTiByte = (&big.Int{}).Mul(BigGiByte, bigIECExp)
- // BigPiByte is 1,024 t bytes in bit.Ints
- BigPiByte = (&big.Int{}).Mul(BigTiByte, bigIECExp)
- // BigEiByte is 1,024 p bytes in bit.Ints
- BigEiByte = (&big.Int{}).Mul(BigPiByte, bigIECExp)
- // BigZiByte is 1,024 e bytes in bit.Ints
- BigZiByte = (&big.Int{}).Mul(BigEiByte, bigIECExp)
- // BigYiByte is 1,024 z bytes in bit.Ints
- BigYiByte = (&big.Int{}).Mul(BigZiByte, bigIECExp)
-)
-
-var (
- bigSIExp = big.NewInt(1000)
-
- // BigSIByte is one SI byte in big.Ints
- BigSIByte = big.NewInt(1)
- // BigKByte is 1,000 SI bytes in big.Ints
- BigKByte = (&big.Int{}).Mul(BigSIByte, bigSIExp)
- // BigMByte is 1,000 SI k bytes in big.Ints
- BigMByte = (&big.Int{}).Mul(BigKByte, bigSIExp)
- // BigGByte is 1,000 SI m bytes in big.Ints
- BigGByte = (&big.Int{}).Mul(BigMByte, bigSIExp)
- // BigTByte is 1,000 SI g bytes in big.Ints
- BigTByte = (&big.Int{}).Mul(BigGByte, bigSIExp)
- // BigPByte is 1,000 SI t bytes in big.Ints
- BigPByte = (&big.Int{}).Mul(BigTByte, bigSIExp)
- // BigEByte is 1,000 SI p bytes in big.Ints
- BigEByte = (&big.Int{}).Mul(BigPByte, bigSIExp)
- // BigZByte is 1,000 SI e bytes in big.Ints
- BigZByte = (&big.Int{}).Mul(BigEByte, bigSIExp)
- // BigYByte is 1,000 SI z bytes in big.Ints
- BigYByte = (&big.Int{}).Mul(BigZByte, bigSIExp)
-)
-
-var bigBytesSizeTable = map[string]*big.Int{
- "b": BigByte,
- "kib": BigKiByte,
- "kb": BigKByte,
- "mib": BigMiByte,
- "mb": BigMByte,
- "gib": BigGiByte,
- "gb": BigGByte,
- "tib": BigTiByte,
- "tb": BigTByte,
- "pib": BigPiByte,
- "pb": BigPByte,
- "eib": BigEiByte,
- "eb": BigEByte,
- "zib": BigZiByte,
- "zb": BigZByte,
- "yib": BigYiByte,
- "yb": BigYByte,
- // Without suffix
- "": BigByte,
- "ki": BigKiByte,
- "k": BigKByte,
- "mi": BigMiByte,
- "m": BigMByte,
- "gi": BigGiByte,
- "g": BigGByte,
- "ti": BigTiByte,
- "t": BigTByte,
- "pi": BigPiByte,
- "p": BigPByte,
- "ei": BigEiByte,
- "e": BigEByte,
- "z": BigZByte,
- "zi": BigZiByte,
- "y": BigYByte,
- "yi": BigYiByte,
-}
-
-var ten = big.NewInt(10)
-
-func humanateBigBytes(s, base *big.Int, sizes []string) string {
- if s.Cmp(ten) < 0 {
- return fmt.Sprintf("%d B", s)
- }
- c := (&big.Int{}).Set(s)
- val, mag := oomm(c, base, len(sizes)-1)
- suffix := sizes[mag]
- f := "%.0f %s"
- if val < 10 {
- f = "%.1f %s"
- }
-
- return fmt.Sprintf(f, val, suffix)
-
-}
-
-// BigBytes produces a human readable representation of an SI size.
-//
-// See also: ParseBigBytes.
-//
-// BigBytes(82854982) -> 83 MB
-func BigBytes(s *big.Int) string {
- sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"}
- return humanateBigBytes(s, bigSIExp, sizes)
-}
-
-// BigIBytes produces a human readable representation of an IEC size.
-//
-// See also: ParseBigBytes.
-//
-// BigIBytes(82854982) -> 79 MiB
-func BigIBytes(s *big.Int) string {
- sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"}
- return humanateBigBytes(s, bigIECExp, sizes)
-}
-
-// ParseBigBytes parses a string representation of bytes into the number
-// of bytes it represents.
-//
-// See also: BigBytes, BigIBytes.
-//
-// ParseBigBytes("42 MB") -> 42000000, nil
-// ParseBigBytes("42 mib") -> 44040192, nil
-func ParseBigBytes(s string) (*big.Int, error) {
- lastDigit := 0
- hasComma := false
- for _, r := range s {
- if !(unicode.IsDigit(r) || r == '.' || r == ',') {
- break
- }
- if r == ',' {
- hasComma = true
- }
- lastDigit++
- }
-
- num := s[:lastDigit]
- if hasComma {
- num = strings.Replace(num, ",", "", -1)
- }
-
- val := &big.Rat{}
- _, err := fmt.Sscanf(num, "%f", val)
- if err != nil {
- return nil, err
- }
-
- extra := strings.ToLower(strings.TrimSpace(s[lastDigit:]))
- if m, ok := bigBytesSizeTable[extra]; ok {
- mv := (&big.Rat{}).SetInt(m)
- val.Mul(val, mv)
- rv := &big.Int{}
- rv.Div(val.Num(), val.Denom())
- return rv, nil
- }
-
- return nil, fmt.Errorf("unhandled size name: %v", extra)
-}
diff --git a/vendor/github.com/dustin/go-humanize/bytes.go b/vendor/github.com/dustin/go-humanize/bytes.go
deleted file mode 100644
index 0b498f48..00000000
--- a/vendor/github.com/dustin/go-humanize/bytes.go
+++ /dev/null
@@ -1,143 +0,0 @@
-package humanize
-
-import (
- "fmt"
- "math"
- "strconv"
- "strings"
- "unicode"
-)
-
-// IEC Sizes.
-// kibis of bits
-const (
- Byte = 1 << (iota * 10)
- KiByte
- MiByte
- GiByte
- TiByte
- PiByte
- EiByte
-)
-
-// SI Sizes.
-const (
- IByte = 1
- KByte = IByte * 1000
- MByte = KByte * 1000
- GByte = MByte * 1000
- TByte = GByte * 1000
- PByte = TByte * 1000
- EByte = PByte * 1000
-)
-
-var bytesSizeTable = map[string]uint64{
- "b": Byte,
- "kib": KiByte,
- "kb": KByte,
- "mib": MiByte,
- "mb": MByte,
- "gib": GiByte,
- "gb": GByte,
- "tib": TiByte,
- "tb": TByte,
- "pib": PiByte,
- "pb": PByte,
- "eib": EiByte,
- "eb": EByte,
- // Without suffix
- "": Byte,
- "ki": KiByte,
- "k": KByte,
- "mi": MiByte,
- "m": MByte,
- "gi": GiByte,
- "g": GByte,
- "ti": TiByte,
- "t": TByte,
- "pi": PiByte,
- "p": PByte,
- "ei": EiByte,
- "e": EByte,
-}
-
-func logn(n, b float64) float64 {
- return math.Log(n) / math.Log(b)
-}
-
-func humanateBytes(s uint64, base float64, sizes []string) string {
- if s < 10 {
- return fmt.Sprintf("%d B", s)
- }
- e := math.Floor(logn(float64(s), base))
- suffix := sizes[int(e)]
- val := math.Floor(float64(s)/math.Pow(base, e)*10+0.5) / 10
- f := "%.0f %s"
- if val < 10 {
- f = "%.1f %s"
- }
-
- return fmt.Sprintf(f, val, suffix)
-}
-
-// Bytes produces a human readable representation of an SI size.
-//
-// See also: ParseBytes.
-//
-// Bytes(82854982) -> 83 MB
-func Bytes(s uint64) string {
- sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB"}
- return humanateBytes(s, 1000, sizes)
-}
-
-// IBytes produces a human readable representation of an IEC size.
-//
-// See also: ParseBytes.
-//
-// IBytes(82854982) -> 79 MiB
-func IBytes(s uint64) string {
- sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB"}
- return humanateBytes(s, 1024, sizes)
-}
-
-// ParseBytes parses a string representation of bytes into the number
-// of bytes it represents.
-//
-// See Also: Bytes, IBytes.
-//
-// ParseBytes("42 MB") -> 42000000, nil
-// ParseBytes("42 mib") -> 44040192, nil
-func ParseBytes(s string) (uint64, error) {
- lastDigit := 0
- hasComma := false
- for _, r := range s {
- if !(unicode.IsDigit(r) || r == '.' || r == ',') {
- break
- }
- if r == ',' {
- hasComma = true
- }
- lastDigit++
- }
-
- num := s[:lastDigit]
- if hasComma {
- num = strings.Replace(num, ",", "", -1)
- }
-
- f, err := strconv.ParseFloat(num, 64)
- if err != nil {
- return 0, err
- }
-
- extra := strings.ToLower(strings.TrimSpace(s[lastDigit:]))
- if m, ok := bytesSizeTable[extra]; ok {
- f *= float64(m)
- if f >= math.MaxUint64 {
- return 0, fmt.Errorf("too large: %v", s)
- }
- return uint64(f), nil
- }
-
- return 0, fmt.Errorf("unhandled size name: %v", extra)
-}
diff --git a/vendor/github.com/dustin/go-humanize/comma.go b/vendor/github.com/dustin/go-humanize/comma.go
deleted file mode 100644
index 520ae3e5..00000000
--- a/vendor/github.com/dustin/go-humanize/comma.go
+++ /dev/null
@@ -1,116 +0,0 @@
-package humanize
-
-import (
- "bytes"
- "math"
- "math/big"
- "strconv"
- "strings"
-)
-
-// Comma produces a string form of the given number in base 10 with
-// commas after every three orders of magnitude.
-//
-// e.g. Comma(834142) -> 834,142
-func Comma(v int64) string {
- sign := ""
-
- // Min int64 can't be negated to a usable value, so it has to be special cased.
- if v == math.MinInt64 {
- return "-9,223,372,036,854,775,808"
- }
-
- if v < 0 {
- sign = "-"
- v = 0 - v
- }
-
- parts := []string{"", "", "", "", "", "", ""}
- j := len(parts) - 1
-
- for v > 999 {
- parts[j] = strconv.FormatInt(v%1000, 10)
- switch len(parts[j]) {
- case 2:
- parts[j] = "0" + parts[j]
- case 1:
- parts[j] = "00" + parts[j]
- }
- v = v / 1000
- j--
- }
- parts[j] = strconv.Itoa(int(v))
- return sign + strings.Join(parts[j:], ",")
-}
-
-// Commaf produces a string form of the given number in base 10 with
-// commas after every three orders of magnitude.
-//
-// e.g. Commaf(834142.32) -> 834,142.32
-func Commaf(v float64) string {
- buf := &bytes.Buffer{}
- if v < 0 {
- buf.Write([]byte{'-'})
- v = 0 - v
- }
-
- comma := []byte{','}
-
- parts := strings.Split(strconv.FormatFloat(v, 'f', -1, 64), ".")
- pos := 0
- if len(parts[0])%3 != 0 {
- pos += len(parts[0]) % 3
- buf.WriteString(parts[0][:pos])
- buf.Write(comma)
- }
- for ; pos < len(parts[0]); pos += 3 {
- buf.WriteString(parts[0][pos : pos+3])
- buf.Write(comma)
- }
- buf.Truncate(buf.Len() - 1)
-
- if len(parts) > 1 {
- buf.Write([]byte{'.'})
- buf.WriteString(parts[1])
- }
- return buf.String()
-}
-
-// CommafWithDigits works like the Commaf but limits the resulting
-// string to the given number of decimal places.
-//
-// e.g. CommafWithDigits(834142.32, 1) -> 834,142.3
-func CommafWithDigits(f float64, decimals int) string {
- return stripTrailingDigits(Commaf(f), decimals)
-}
-
-// BigComma produces a string form of the given big.Int in base 10
-// with commas after every three orders of magnitude.
-func BigComma(b *big.Int) string {
- sign := ""
- if b.Sign() < 0 {
- sign = "-"
- b.Abs(b)
- }
-
- athousand := big.NewInt(1000)
- c := (&big.Int{}).Set(b)
- _, m := oom(c, athousand)
- parts := make([]string, m+1)
- j := len(parts) - 1
-
- mod := &big.Int{}
- for b.Cmp(athousand) >= 0 {
- b.DivMod(b, athousand, mod)
- parts[j] = strconv.FormatInt(mod.Int64(), 10)
- switch len(parts[j]) {
- case 2:
- parts[j] = "0" + parts[j]
- case 1:
- parts[j] = "00" + parts[j]
- }
- j--
- }
- parts[j] = strconv.Itoa(int(b.Int64()))
- return sign + strings.Join(parts[j:], ",")
-}
diff --git a/vendor/github.com/dustin/go-humanize/commaf.go b/vendor/github.com/dustin/go-humanize/commaf.go
deleted file mode 100644
index 620690de..00000000
--- a/vendor/github.com/dustin/go-humanize/commaf.go
+++ /dev/null
@@ -1,40 +0,0 @@
-// +build go1.6
-
-package humanize
-
-import (
- "bytes"
- "math/big"
- "strings"
-)
-
-// BigCommaf produces a string form of the given big.Float in base 10
-// with commas after every three orders of magnitude.
-func BigCommaf(v *big.Float) string {
- buf := &bytes.Buffer{}
- if v.Sign() < 0 {
- buf.Write([]byte{'-'})
- v.Abs(v)
- }
-
- comma := []byte{','}
-
- parts := strings.Split(v.Text('f', -1), ".")
- pos := 0
- if len(parts[0])%3 != 0 {
- pos += len(parts[0]) % 3
- buf.WriteString(parts[0][:pos])
- buf.Write(comma)
- }
- for ; pos < len(parts[0]); pos += 3 {
- buf.WriteString(parts[0][pos : pos+3])
- buf.Write(comma)
- }
- buf.Truncate(buf.Len() - 1)
-
- if len(parts) > 1 {
- buf.Write([]byte{'.'})
- buf.WriteString(parts[1])
- }
- return buf.String()
-}
diff --git a/vendor/github.com/dustin/go-humanize/ftoa.go b/vendor/github.com/dustin/go-humanize/ftoa.go
deleted file mode 100644
index 1c62b640..00000000
--- a/vendor/github.com/dustin/go-humanize/ftoa.go
+++ /dev/null
@@ -1,46 +0,0 @@
-package humanize
-
-import (
- "strconv"
- "strings"
-)
-
-func stripTrailingZeros(s string) string {
- offset := len(s) - 1
- for offset > 0 {
- if s[offset] == '.' {
- offset--
- break
- }
- if s[offset] != '0' {
- break
- }
- offset--
- }
- return s[:offset+1]
-}
-
-func stripTrailingDigits(s string, digits int) string {
- if i := strings.Index(s, "."); i >= 0 {
- if digits <= 0 {
- return s[:i]
- }
- i++
- if i+digits >= len(s) {
- return s
- }
- return s[:i+digits]
- }
- return s
-}
-
-// Ftoa converts a float to a string with no trailing zeros.
-func Ftoa(num float64) string {
- return stripTrailingZeros(strconv.FormatFloat(num, 'f', 6, 64))
-}
-
-// FtoaWithDigits converts a float to a string but limits the resulting string
-// to the given number of decimal places, and no trailing zeros.
-func FtoaWithDigits(num float64, digits int) string {
- return stripTrailingZeros(stripTrailingDigits(strconv.FormatFloat(num, 'f', 6, 64), digits))
-}
diff --git a/vendor/github.com/dustin/go-humanize/humanize.go b/vendor/github.com/dustin/go-humanize/humanize.go
deleted file mode 100644
index a2c2da31..00000000
--- a/vendor/github.com/dustin/go-humanize/humanize.go
+++ /dev/null
@@ -1,8 +0,0 @@
-/*
-Package humanize converts boring ugly numbers to human-friendly strings and back.
-
-Durations can be turned into strings such as "3 days ago", numbers
-representing sizes like 82854982 into useful strings like, "83 MB" or
-"79 MiB" (whichever you prefer).
-*/
-package humanize
diff --git a/vendor/github.com/dustin/go-humanize/number.go b/vendor/github.com/dustin/go-humanize/number.go
deleted file mode 100644
index dec61865..00000000
--- a/vendor/github.com/dustin/go-humanize/number.go
+++ /dev/null
@@ -1,192 +0,0 @@
-package humanize
-
-/*
-Slightly adapted from the source to fit go-humanize.
-
-Author: https://github.com/gorhill
-Source: https://gist.github.com/gorhill/5285193
-
-*/
-
-import (
- "math"
- "strconv"
-)
-
-var (
- renderFloatPrecisionMultipliers = [...]float64{
- 1,
- 10,
- 100,
- 1000,
- 10000,
- 100000,
- 1000000,
- 10000000,
- 100000000,
- 1000000000,
- }
-
- renderFloatPrecisionRounders = [...]float64{
- 0.5,
- 0.05,
- 0.005,
- 0.0005,
- 0.00005,
- 0.000005,
- 0.0000005,
- 0.00000005,
- 0.000000005,
- 0.0000000005,
- }
-)
-
-// FormatFloat produces a formatted number as string based on the following user-specified criteria:
-// * thousands separator
-// * decimal separator
-// * decimal precision
-//
-// Usage: s := RenderFloat(format, n)
-// The format parameter tells how to render the number n.
-//
-// See examples: http://play.golang.org/p/LXc1Ddm1lJ
-//
-// Examples of format strings, given n = 12345.6789:
-// "#,###.##" => "12,345.67"
-// "#,###." => "12,345"
-// "#,###" => "12345,678"
-// "#\u202F###,##" => "12 345,68"
-// "#.###,###### => 12.345,678900
-// "" (aka default format) => 12,345.67
-//
-// The highest precision allowed is 9 digits after the decimal symbol.
-// There is also a version for integer number, FormatInteger(),
-// which is convenient for calls within template.
-func FormatFloat(format string, n float64) string {
- // Special cases:
- // NaN = "NaN"
- // +Inf = "+Infinity"
- // -Inf = "-Infinity"
- if math.IsNaN(n) {
- return "NaN"
- }
- if n > math.MaxFloat64 {
- return "Infinity"
- }
- if n < -math.MaxFloat64 {
- return "-Infinity"
- }
-
- // default format
- precision := 2
- decimalStr := "."
- thousandStr := ","
- positiveStr := ""
- negativeStr := "-"
-
- if len(format) > 0 {
- format := []rune(format)
-
- // If there is an explicit format directive,
- // then default values are these:
- precision = 9
- thousandStr = ""
-
- // collect indices of meaningful formatting directives
- formatIndx := []int{}
- for i, char := range format {
- if char != '#' && char != '0' {
- formatIndx = append(formatIndx, i)
- }
- }
-
- if len(formatIndx) > 0 {
- // Directive at index 0:
- // Must be a '+'
- // Raise an error if not the case
- // index: 0123456789
- // +0.000,000
- // +000,000.0
- // +0000.00
- // +0000
- if formatIndx[0] == 0 {
- if format[formatIndx[0]] != '+' {
- panic("RenderFloat(): invalid positive sign directive")
- }
- positiveStr = "+"
- formatIndx = formatIndx[1:]
- }
-
- // Two directives:
- // First is thousands separator
- // Raise an error if not followed by 3-digit
- // 0123456789
- // 0.000,000
- // 000,000.00
- if len(formatIndx) == 2 {
- if (formatIndx[1] - formatIndx[0]) != 4 {
- panic("RenderFloat(): thousands separator directive must be followed by 3 digit-specifiers")
- }
- thousandStr = string(format[formatIndx[0]])
- formatIndx = formatIndx[1:]
- }
-
- // One directive:
- // Directive is decimal separator
- // The number of digit-specifier following the separator indicates wanted precision
- // 0123456789
- // 0.00
- // 000,0000
- if len(formatIndx) == 1 {
- decimalStr = string(format[formatIndx[0]])
- precision = len(format) - formatIndx[0] - 1
- }
- }
- }
-
- // generate sign part
- var signStr string
- if n >= 0.000000001 {
- signStr = positiveStr
- } else if n <= -0.000000001 {
- signStr = negativeStr
- n = -n
- } else {
- signStr = ""
- n = 0.0
- }
-
- // split number into integer and fractional parts
- intf, fracf := math.Modf(n + renderFloatPrecisionRounders[precision])
-
- // generate integer part string
- intStr := strconv.FormatInt(int64(intf), 10)
-
- // add thousand separator if required
- if len(thousandStr) > 0 {
- for i := len(intStr); i > 3; {
- i -= 3
- intStr = intStr[:i] + thousandStr + intStr[i:]
- }
- }
-
- // no fractional part, we can leave now
- if precision == 0 {
- return signStr + intStr
- }
-
- // generate fractional part
- fracStr := strconv.Itoa(int(fracf * renderFloatPrecisionMultipliers[precision]))
- // may need padding
- if len(fracStr) < precision {
- fracStr = "000000000000000"[:precision-len(fracStr)] + fracStr
- }
-
- return signStr + intStr + decimalStr + fracStr
-}
-
-// FormatInteger produces a formatted number as string.
-// See FormatFloat.
-func FormatInteger(format string, n int) string {
- return FormatFloat(format, float64(n))
-}
diff --git a/vendor/github.com/dustin/go-humanize/ordinals.go b/vendor/github.com/dustin/go-humanize/ordinals.go
deleted file mode 100644
index 43d88a86..00000000
--- a/vendor/github.com/dustin/go-humanize/ordinals.go
+++ /dev/null
@@ -1,25 +0,0 @@
-package humanize
-
-import "strconv"
-
-// Ordinal gives you the input number in a rank/ordinal format.
-//
-// Ordinal(3) -> 3rd
-func Ordinal(x int) string {
- suffix := "th"
- switch x % 10 {
- case 1:
- if x%100 != 11 {
- suffix = "st"
- }
- case 2:
- if x%100 != 12 {
- suffix = "nd"
- }
- case 3:
- if x%100 != 13 {
- suffix = "rd"
- }
- }
- return strconv.Itoa(x) + suffix
-}
diff --git a/vendor/github.com/dustin/go-humanize/si.go b/vendor/github.com/dustin/go-humanize/si.go
deleted file mode 100644
index ae659e0e..00000000
--- a/vendor/github.com/dustin/go-humanize/si.go
+++ /dev/null
@@ -1,123 +0,0 @@
-package humanize
-
-import (
- "errors"
- "math"
- "regexp"
- "strconv"
-)
-
-var siPrefixTable = map[float64]string{
- -24: "y", // yocto
- -21: "z", // zepto
- -18: "a", // atto
- -15: "f", // femto
- -12: "p", // pico
- -9: "n", // nano
- -6: "µ", // micro
- -3: "m", // milli
- 0: "",
- 3: "k", // kilo
- 6: "M", // mega
- 9: "G", // giga
- 12: "T", // tera
- 15: "P", // peta
- 18: "E", // exa
- 21: "Z", // zetta
- 24: "Y", // yotta
-}
-
-var revSIPrefixTable = revfmap(siPrefixTable)
-
-// revfmap reverses the map and precomputes the power multiplier
-func revfmap(in map[float64]string) map[string]float64 {
- rv := map[string]float64{}
- for k, v := range in {
- rv[v] = math.Pow(10, k)
- }
- return rv
-}
-
-var riParseRegex *regexp.Regexp
-
-func init() {
- ri := `^([\-0-9.]+)\s?([`
- for _, v := range siPrefixTable {
- ri += v
- }
- ri += `]?)(.*)`
-
- riParseRegex = regexp.MustCompile(ri)
-}
-
-// ComputeSI finds the most appropriate SI prefix for the given number
-// and returns the prefix along with the value adjusted to be within
-// that prefix.
-//
-// See also: SI, ParseSI.
-//
-// e.g. ComputeSI(2.2345e-12) -> (2.2345, "p")
-func ComputeSI(input float64) (float64, string) {
- if input == 0 {
- return 0, ""
- }
- mag := math.Abs(input)
- exponent := math.Floor(logn(mag, 10))
- exponent = math.Floor(exponent/3) * 3
-
- value := mag / math.Pow(10, exponent)
-
- // Handle special case where value is exactly 1000.0
- // Should return 1 M instead of 1000 k
- if value == 1000.0 {
- exponent += 3
- value = mag / math.Pow(10, exponent)
- }
-
- value = math.Copysign(value, input)
-
- prefix := siPrefixTable[exponent]
- return value, prefix
-}
-
-// SI returns a string with default formatting.
-//
-// SI uses Ftoa to format float value, removing trailing zeros.
-//
-// See also: ComputeSI, ParseSI.
-//
-// e.g. SI(1000000, "B") -> 1 MB
-// e.g. SI(2.2345e-12, "F") -> 2.2345 pF
-func SI(input float64, unit string) string {
- value, prefix := ComputeSI(input)
- return Ftoa(value) + " " + prefix + unit
-}
-
-// SIWithDigits works like SI but limits the resulting string to the
-// given number of decimal places.
-//
-// e.g. SIWithDigits(1000000, 0, "B") -> 1 MB
-// e.g. SIWithDigits(2.2345e-12, 2, "F") -> 2.23 pF
-func SIWithDigits(input float64, decimals int, unit string) string {
- value, prefix := ComputeSI(input)
- return FtoaWithDigits(value, decimals) + " " + prefix + unit
-}
-
-var errInvalid = errors.New("invalid input")
-
-// ParseSI parses an SI string back into the number and unit.
-//
-// See also: SI, ComputeSI.
-//
-// e.g. ParseSI("2.2345 pF") -> (2.2345e-12, "F", nil)
-func ParseSI(input string) (float64, string, error) {
- found := riParseRegex.FindStringSubmatch(input)
- if len(found) != 4 {
- return 0, "", errInvalid
- }
- mag := revSIPrefixTable[found[2]]
- unit := found[3]
-
- base, err := strconv.ParseFloat(found[1], 64)
- return base * mag, unit, err
-}
diff --git a/vendor/github.com/dustin/go-humanize/times.go b/vendor/github.com/dustin/go-humanize/times.go
deleted file mode 100644
index dd3fbf5e..00000000
--- a/vendor/github.com/dustin/go-humanize/times.go
+++ /dev/null
@@ -1,117 +0,0 @@
-package humanize
-
-import (
- "fmt"
- "math"
- "sort"
- "time"
-)
-
-// Seconds-based time units
-const (
- Day = 24 * time.Hour
- Week = 7 * Day
- Month = 30 * Day
- Year = 12 * Month
- LongTime = 37 * Year
-)
-
-// Time formats a time into a relative string.
-//
-// Time(someT) -> "3 weeks ago"
-func Time(then time.Time) string {
- return RelTime(then, time.Now(), "ago", "from now")
-}
-
-// A RelTimeMagnitude struct contains a relative time point at which
-// the relative format of time will switch to a new format string. A
-// slice of these in ascending order by their "D" field is passed to
-// CustomRelTime to format durations.
-//
-// The Format field is a string that may contain a "%s" which will be
-// replaced with the appropriate signed label (e.g. "ago" or "from
-// now") and a "%d" that will be replaced by the quantity.
-//
-// The DivBy field is the amount of time the time difference must be
-// divided by in order to display correctly.
-//
-// e.g. if D is 2*time.Minute and you want to display "%d minutes %s"
-// DivBy should be time.Minute so whatever the duration is will be
-// expressed in minutes.
-type RelTimeMagnitude struct {
- D time.Duration
- Format string
- DivBy time.Duration
-}
-
-var defaultMagnitudes = []RelTimeMagnitude{
- {time.Second, "now", time.Second},
- {2 * time.Second, "1 second %s", 1},
- {time.Minute, "%d seconds %s", time.Second},
- {2 * time.Minute, "1 minute %s", 1},
- {time.Hour, "%d minutes %s", time.Minute},
- {2 * time.Hour, "1 hour %s", 1},
- {Day, "%d hours %s", time.Hour},
- {2 * Day, "1 day %s", 1},
- {Week, "%d days %s", Day},
- {2 * Week, "1 week %s", 1},
- {Month, "%d weeks %s", Week},
- {2 * Month, "1 month %s", 1},
- {Year, "%d months %s", Month},
- {18 * Month, "1 year %s", 1},
- {2 * Year, "2 years %s", 1},
- {LongTime, "%d years %s", Year},
- {math.MaxInt64, "a long while %s", 1},
-}
-
-// RelTime formats a time into a relative string.
-//
-// It takes two times and two labels. In addition to the generic time
-// delta string (e.g. 5 minutes), the labels are used applied so that
-// the label corresponding to the smaller time is applied.
-//
-// RelTime(timeInPast, timeInFuture, "earlier", "later") -> "3 weeks earlier"
-func RelTime(a, b time.Time, albl, blbl string) string {
- return CustomRelTime(a, b, albl, blbl, defaultMagnitudes)
-}
-
-// CustomRelTime formats a time into a relative string.
-//
-// It takes two times two labels and a table of relative time formats.
-// In addition to the generic time delta string (e.g. 5 minutes), the
-// labels are used applied so that the label corresponding to the
-// smaller time is applied.
-func CustomRelTime(a, b time.Time, albl, blbl string, magnitudes []RelTimeMagnitude) string {
- lbl := albl
- diff := b.Sub(a)
-
- if a.After(b) {
- lbl = blbl
- diff = a.Sub(b)
- }
-
- n := sort.Search(len(magnitudes), func(i int) bool {
- return magnitudes[i].D > diff
- })
-
- if n >= len(magnitudes) {
- n = len(magnitudes) - 1
- }
- mag := magnitudes[n]
- args := []interface{}{}
- escaped := false
- for _, ch := range mag.Format {
- if escaped {
- switch ch {
- case 's':
- args = append(args, lbl)
- case 'd':
- args = append(args, diff/mag.DivBy)
- }
- escaped = false
- } else {
- escaped = ch == '%'
- }
- }
- return fmt.Sprintf(mag.Format, args...)
-}
diff --git a/vendor/github.com/go-redis/redis/v8/.gitignore b/vendor/github.com/go-redis/redis/v8/.gitignore
deleted file mode 100644
index b975a7b4..00000000
--- a/vendor/github.com/go-redis/redis/v8/.gitignore
+++ /dev/null
@@ -1,3 +0,0 @@
-*.rdb
-testdata/*/
-.idea/
diff --git a/vendor/github.com/go-redis/redis/v8/.golangci.yml b/vendor/github.com/go-redis/redis/v8/.golangci.yml
deleted file mode 100644
index de514554..00000000
--- a/vendor/github.com/go-redis/redis/v8/.golangci.yml
+++ /dev/null
@@ -1,4 +0,0 @@
-run:
- concurrency: 8
- deadline: 5m
- tests: false
diff --git a/vendor/github.com/go-redis/redis/v8/.prettierrc.yml b/vendor/github.com/go-redis/redis/v8/.prettierrc.yml
deleted file mode 100644
index 8b7f044a..00000000
--- a/vendor/github.com/go-redis/redis/v8/.prettierrc.yml
+++ /dev/null
@@ -1,4 +0,0 @@
-semi: false
-singleQuote: true
-proseWrap: always
-printWidth: 100
diff --git a/vendor/github.com/go-redis/redis/v8/CHANGELOG.md b/vendor/github.com/go-redis/redis/v8/CHANGELOG.md
deleted file mode 100644
index 195e5193..00000000
--- a/vendor/github.com/go-redis/redis/v8/CHANGELOG.md
+++ /dev/null
@@ -1,177 +0,0 @@
-## [8.11.5](https://github.com/go-redis/redis/compare/v8.11.4...v8.11.5) (2022-03-17)
-
-
-### Bug Fixes
-
-* add missing Expire methods to Cmdable ([17e3b43](https://github.com/go-redis/redis/commit/17e3b43879d516437ada71cf9c0deac6a382ed9a))
-* add whitespace for avoid unlikely colisions ([7f7c181](https://github.com/go-redis/redis/commit/7f7c1817617cfec909efb13d14ad22ef05a6ad4c))
-* example/otel compile error ([#2028](https://github.com/go-redis/redis/issues/2028)) ([187c07c](https://github.com/go-redis/redis/commit/187c07c41bf68dc3ab280bc3a925e960bbef6475))
-* **extra/redisotel:** set span.kind attribute to client ([065b200](https://github.com/go-redis/redis/commit/065b200070b41e6e949710b4f9e01b50ccc60ab2))
-* format ([96f53a0](https://github.com/go-redis/redis/commit/96f53a0159a28affa94beec1543a62234e7f8b32))
-* invalid type assert in stringArg ([de6c131](https://github.com/go-redis/redis/commit/de6c131865b8263400c8491777b295035f2408e4))
-* rename Golang to Go ([#2030](https://github.com/go-redis/redis/issues/2030)) ([b82a2d9](https://github.com/go-redis/redis/commit/b82a2d9d4d2de7b7cbe8fcd4895be62dbcacacbc))
-* set timeout for WAIT command. Fixes [#1963](https://github.com/go-redis/redis/issues/1963) ([333fee1](https://github.com/go-redis/redis/commit/333fee1a8fd98a2fbff1ab187c1b03246a7eb01f))
-* update some argument counts in pre-allocs ([f6974eb](https://github.com/go-redis/redis/commit/f6974ebb5c40a8adf90d2cacab6dc297f4eba4c2))
-
-
-### Features
-
-* Add redis v7's NX, XX, GT, LT expire variants ([e19bbb2](https://github.com/go-redis/redis/commit/e19bbb26e2e395c6e077b48d80d79e99f729a8b8))
-* add support for acl sentinel auth in universal client ([ab0ccc4](https://github.com/go-redis/redis/commit/ab0ccc47413f9b2a6eabc852fed5005a3ee1af6e))
-* add support for COPY command ([#2016](https://github.com/go-redis/redis/issues/2016)) ([730afbc](https://github.com/go-redis/redis/commit/730afbcffb93760e8a36cc06cfe55ab102b693a7))
-* add support for passing extra attributes added to spans ([39faaa1](https://github.com/go-redis/redis/commit/39faaa171523834ba527c9789710c4fde87f5a2e))
-* add support for time.Duration write and scan ([2f1b74e](https://github.com/go-redis/redis/commit/2f1b74e20cdd7719b2aecf0768d3e3ae7c3e781b))
-* **redisotel:** ability to override TracerProvider ([#1998](https://github.com/go-redis/redis/issues/1998)) ([bf8d4aa](https://github.com/go-redis/redis/commit/bf8d4aa60c00366cda2e98c3ddddc8cf68507417))
-* set net.peer.name and net.peer.port in otel example ([69bf454](https://github.com/go-redis/redis/commit/69bf454f706204211cd34835f76b2e8192d3766d))
-
-
-
-## [8.11.4](https://github.com/go-redis/redis/compare/v8.11.3...v8.11.4) (2021-10-04)
-
-
-### Features
-
-* add acl auth support for sentinels ([f66582f](https://github.com/go-redis/redis/commit/f66582f44f3dc3a4705a5260f982043fde4aa634))
-* add Cmd.{String,Int,Float,Bool}Slice helpers and an example ([5d3d293](https://github.com/go-redis/redis/commit/5d3d293cc9c60b90871e2420602001463708ce24))
-* add SetVal method for each command ([168981d](https://github.com/go-redis/redis/commit/168981da2d84ee9e07d15d3e74d738c162e264c4))
-
-
-
-## v8.11
-
-- Remove OpenTelemetry metrics.
-- Supports more redis commands and options.
-
-## v8.10
-
-- Removed extra OpenTelemetry spans from go-redis core. Now go-redis instrumentation only adds a
- single span with a Redis command (instead of 4 spans). There are multiple reasons behind this
- decision:
-
- - Traces become smaller and less noisy.
- - It may be costly to process those 3 extra spans for each query.
- - go-redis no longer depends on OpenTelemetry.
-
- Eventually we hope to replace the information that we no longer collect with OpenTelemetry
- Metrics.
-
-## v8.9
-
-- Changed `PubSub.Channel` to only rely on `Ping` result. You can now use `WithChannelSize`,
- `WithChannelHealthCheckInterval`, and `WithChannelSendTimeout` to override default settings.
-
-## v8.8
-
-- To make updating easier, extra modules now have the same version as go-redis does. That means that
- you need to update your imports:
-
-```
-github.com/go-redis/redis/extra/redisotel -> github.com/go-redis/redis/extra/redisotel/v8
-github.com/go-redis/redis/extra/rediscensus -> github.com/go-redis/redis/extra/rediscensus/v8
-```
-
-## v8.5
-
-- [knadh](https://github.com/knadh) contributed long-awaited ability to scan Redis Hash into a
- struct:
-
-```go
-err := rdb.HGetAll(ctx, "hash").Scan(&data)
-
-err := rdb.MGet(ctx, "key1", "key2").Scan(&data)
-```
-
-- Please check [redismock](https://github.com/go-redis/redismock) by
- [monkey92t](https://github.com/monkey92t) if you are looking for mocking Redis Client.
-
-## v8
-
-- All commands require `context.Context` as a first argument, e.g. `rdb.Ping(ctx)`. If you are not
- using `context.Context` yet, the simplest option is to define global package variable
- `var ctx = context.TODO()` and use it when `ctx` is required.
-
-- Full support for `context.Context` canceling.
-
-- Added `redis.NewFailoverClusterClient` that supports routing read-only commands to a slave node.
-
-- Added `redisext.OpenTemetryHook` that adds
- [Redis OpenTelemetry instrumentation](https://redis.uptrace.dev/tracing/).
-
-- Redis slow log support.
-
-- Ring uses Rendezvous Hashing by default which provides better distribution. You need to move
- existing keys to a new location or keys will be inaccessible / lost. To use old hashing scheme:
-
-```go
-import "github.com/golang/groupcache/consistenthash"
-
-ring := redis.NewRing(&redis.RingOptions{
- NewConsistentHash: func() {
- return consistenthash.New(100, crc32.ChecksumIEEE)
- },
-})
-```
-
-- `ClusterOptions.MaxRedirects` default value is changed from 8 to 3.
-- `Options.MaxRetries` default value is changed from 0 to 3.
-
-- `Cluster.ForEachNode` is renamed to `ForEachShard` for consistency with `Ring`.
-
-## v7.3
-
-- New option `Options.Username` which causes client to use `AuthACL`. Be aware if your connection
- URL contains username.
-
-## v7.2
-
-- Existing `HMSet` is renamed to `HSet` and old deprecated `HMSet` is restored for Redis 3 users.
-
-## v7.1
-
-- Existing `Cmd.String` is renamed to `Cmd.Text`. New `Cmd.String` implements `fmt.Stringer`
- interface.
-
-## v7
-
-- _Important_. Tx.Pipeline now returns a non-transactional pipeline. Use Tx.TxPipeline for a
- transactional pipeline.
-- WrapProcess is replaced with more convenient AddHook that has access to context.Context.
-- WithContext now can not be used to create a shallow copy of the client.
-- New methods ProcessContext, DoContext, and ExecContext.
-- Client respects Context.Deadline when setting net.Conn deadline.
-- Client listens on Context.Done while waiting for a connection from the pool and returns an error
- when context context is cancelled.
-- Add PubSub.ChannelWithSubscriptions that sends `*Subscription` in addition to `*Message` to allow
- detecting reconnections.
-- `time.Time` is now marshalled in RFC3339 format. `rdb.Get("foo").Time()` helper is added to parse
- the time.
-- `SetLimiter` is removed and added `Options.Limiter` instead.
-- `HMSet` is deprecated as of Redis v4.
-
-## v6.15
-
-- Cluster and Ring pipelines process commands for each node in its own goroutine.
-
-## 6.14
-
-- Added Options.MinIdleConns.
-- Added Options.MaxConnAge.
-- PoolStats.FreeConns is renamed to PoolStats.IdleConns.
-- Add Client.Do to simplify creating custom commands.
-- Add Cmd.String, Cmd.Int, Cmd.Int64, Cmd.Uint64, Cmd.Float64, and Cmd.Bool helpers.
-- Lower memory usage.
-
-## v6.13
-
-- Ring got new options called `HashReplicas` and `Hash`. It is recommended to set
- `HashReplicas = 1000` for better keys distribution between shards.
-- Cluster client was optimized to use much less memory when reloading cluster state.
-- PubSub.ReceiveMessage is re-worked to not use ReceiveTimeout so it does not lose data when timeout
- occurres. In most cases it is recommended to use PubSub.Channel instead.
-- Dialer.KeepAlive is set to 5 minutes by default.
-
-## v6.12
-
-- ClusterClient got new option called `ClusterSlots` which allows to build cluster of normal Redis
- Servers that don't have cluster mode enabled. See
- https://godoc.org/github.com/go-redis/redis#example-NewClusterClient--ManualSetup
diff --git a/vendor/github.com/go-redis/redis/v8/LICENSE b/vendor/github.com/go-redis/redis/v8/LICENSE
deleted file mode 100644
index 298bed9b..00000000
--- a/vendor/github.com/go-redis/redis/v8/LICENSE
+++ /dev/null
@@ -1,25 +0,0 @@
-Copyright (c) 2013 The github.com/go-redis/redis Authors.
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/go-redis/redis/v8/Makefile b/vendor/github.com/go-redis/redis/v8/Makefile
deleted file mode 100644
index a4cfe057..00000000
--- a/vendor/github.com/go-redis/redis/v8/Makefile
+++ /dev/null
@@ -1,35 +0,0 @@
-PACKAGE_DIRS := $(shell find . -mindepth 2 -type f -name 'go.mod' -exec dirname {} \; | sort)
-
-test: testdeps
- go test ./...
- go test ./... -short -race
- go test ./... -run=NONE -bench=. -benchmem
- env GOOS=linux GOARCH=386 go test ./...
- go vet
-
-testdeps: testdata/redis/src/redis-server
-
-bench: testdeps
- go test ./... -test.run=NONE -test.bench=. -test.benchmem
-
-.PHONY: all test testdeps bench
-
-testdata/redis:
- mkdir -p $@
- wget -qO- https://download.redis.io/releases/redis-6.2.5.tar.gz | tar xvz --strip-components=1 -C $@
-
-testdata/redis/src/redis-server: testdata/redis
- cd $< && make all
-
-fmt:
- gofmt -w -s ./
- goimports -w -local github.com/go-redis/redis ./
-
-go_mod_tidy:
- go get -u && go mod tidy
- set -e; for dir in $(PACKAGE_DIRS); do \
- echo "go mod tidy in $${dir}"; \
- (cd "$${dir}" && \
- go get -u && \
- go mod tidy); \
- done
diff --git a/vendor/github.com/go-redis/redis/v8/README.md b/vendor/github.com/go-redis/redis/v8/README.md
deleted file mode 100644
index f3b6a018..00000000
--- a/vendor/github.com/go-redis/redis/v8/README.md
+++ /dev/null
@@ -1,175 +0,0 @@
-# Redis client for Go
-
-![build workflow](https://github.com/go-redis/redis/actions/workflows/build.yml/badge.svg)
-[![PkgGoDev](https://pkg.go.dev/badge/github.com/go-redis/redis/v8)](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc)
-[![Documentation](https://img.shields.io/badge/redis-documentation-informational)](https://redis.uptrace.dev/)
-
-go-redis is brought to you by :star: [**uptrace/uptrace**](https://github.com/uptrace/uptrace).
-Uptrace is an open source and blazingly fast **distributed tracing** backend powered by
-OpenTelemetry and ClickHouse. Give it a star as well!
-
-## Resources
-
-- [Discussions](https://github.com/go-redis/redis/discussions)
-- [Documentation](https://redis.uptrace.dev)
-- [Reference](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc)
-- [Examples](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#pkg-examples)
-- [RealWorld example app](https://github.com/uptrace/go-treemux-realworld-example-app)
-
-Other projects you may like:
-
-- [Bun](https://bun.uptrace.dev) - fast and simple SQL client for PostgreSQL, MySQL, and SQLite.
-- [BunRouter](https://bunrouter.uptrace.dev/) - fast and flexible HTTP router for Go.
-
-## Ecosystem
-
-- [Redis Mock](https://github.com/go-redis/redismock)
-- [Distributed Locks](https://github.com/bsm/redislock)
-- [Redis Cache](https://github.com/go-redis/cache)
-- [Rate limiting](https://github.com/go-redis/redis_rate)
-
-## Features
-
-- Redis 3 commands except QUIT, MONITOR, and SYNC.
-- Automatic connection pooling with
- [circuit breaker](https://en.wikipedia.org/wiki/Circuit_breaker_design_pattern) support.
-- [Pub/Sub](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#PubSub).
-- [Transactions](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client-TxPipeline).
-- [Pipeline](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client.Pipeline) and
- [TxPipeline](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client.TxPipeline).
-- [Scripting](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#Script).
-- [Timeouts](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#Options).
-- [Redis Sentinel](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#NewFailoverClient).
-- [Redis Cluster](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#NewClusterClient).
-- [Cluster of Redis Servers](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-NewClusterClient-ManualSetup)
- without using cluster mode and Redis Sentinel.
-- [Ring](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#NewRing).
-- [Instrumentation](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-package-Instrumentation).
-
-## Installation
-
-go-redis supports 2 last Go versions and requires a Go version with
-[modules](https://github.com/golang/go/wiki/Modules) support. So make sure to initialize a Go
-module:
-
-```shell
-go mod init github.com/my/repo
-```
-
-And then install go-redis/v8 (note _v8_ in the import; omitting it is a popular mistake):
-
-```shell
-go get github.com/go-redis/redis/v8
-```
-
-## Quickstart
-
-```go
-import (
- "context"
- "github.com/go-redis/redis/v8"
- "fmt"
-)
-
-var ctx = context.Background()
-
-func ExampleClient() {
- rdb := redis.NewClient(&redis.Options{
- Addr: "localhost:6379",
- Password: "", // no password set
- DB: 0, // use default DB
- })
-
- err := rdb.Set(ctx, "key", "value", 0).Err()
- if err != nil {
- panic(err)
- }
-
- val, err := rdb.Get(ctx, "key").Result()
- if err != nil {
- panic(err)
- }
- fmt.Println("key", val)
-
- val2, err := rdb.Get(ctx, "key2").Result()
- if err == redis.Nil {
- fmt.Println("key2 does not exist")
- } else if err != nil {
- panic(err)
- } else {
- fmt.Println("key2", val2)
- }
- // Output: key value
- // key2 does not exist
-}
-```
-
-## Look and feel
-
-Some corner cases:
-
-```go
-// SET key value EX 10 NX
-set, err := rdb.SetNX(ctx, "key", "value", 10*time.Second).Result()
-
-// SET key value keepttl NX
-set, err := rdb.SetNX(ctx, "key", "value", redis.KeepTTL).Result()
-
-// SORT list LIMIT 0 2 ASC
-vals, err := rdb.Sort(ctx, "list", &redis.Sort{Offset: 0, Count: 2, Order: "ASC"}).Result()
-
-// ZRANGEBYSCORE zset -inf +inf WITHSCORES LIMIT 0 2
-vals, err := rdb.ZRangeByScoreWithScores(ctx, "zset", &redis.ZRangeBy{
- Min: "-inf",
- Max: "+inf",
- Offset: 0,
- Count: 2,
-}).Result()
-
-// ZINTERSTORE out 2 zset1 zset2 WEIGHTS 2 3 AGGREGATE SUM
-vals, err := rdb.ZInterStore(ctx, "out", &redis.ZStore{
- Keys: []string{"zset1", "zset2"},
- Weights: []int64{2, 3}
-}).Result()
-
-// EVAL "return {KEYS[1],ARGV[1]}" 1 "key" "hello"
-vals, err := rdb.Eval(ctx, "return {KEYS[1],ARGV[1]}", []string{"key"}, "hello").Result()
-
-// custom command
-res, err := rdb.Do(ctx, "set", "key", "value").Result()
-```
-
-## Run the test
-
-go-redis will start a redis-server and run the test cases.
-
-The paths of redis-server bin file and redis config file are defined in `main_test.go`:
-
-```
-var (
- redisServerBin, _ = filepath.Abs(filepath.Join("testdata", "redis", "src", "redis-server"))
- redisServerConf, _ = filepath.Abs(filepath.Join("testdata", "redis", "redis.conf"))
-)
-```
-
-For local testing, you can change the variables to refer to your local files, or create a soft link
-to the corresponding folder for redis-server and copy the config file to `testdata/redis/`:
-
-```
-ln -s /usr/bin/redis-server ./go-redis/testdata/redis/src
-cp ./go-redis/testdata/redis.conf ./go-redis/testdata/redis/
-```
-
-Lastly, run:
-
-```
-go test
-```
-
-## Contributors
-
-Thanks to all the people who already contributed!
-
-
-
-
diff --git a/vendor/github.com/go-redis/redis/v8/RELEASING.md b/vendor/github.com/go-redis/redis/v8/RELEASING.md
deleted file mode 100644
index 1115db4e..00000000
--- a/vendor/github.com/go-redis/redis/v8/RELEASING.md
+++ /dev/null
@@ -1,15 +0,0 @@
-# Releasing
-
-1. Run `release.sh` script which updates versions in go.mod files and pushes a new branch to GitHub:
-
-```shell
-TAG=v1.0.0 ./scripts/release.sh
-```
-
-2. Open a pull request and wait for the build to finish.
-
-3. Merge the pull request and run `tag.sh` to create tags for packages:
-
-```shell
-TAG=v1.0.0 ./scripts/tag.sh
-```
diff --git a/vendor/github.com/go-redis/redis/v8/cluster.go b/vendor/github.com/go-redis/redis/v8/cluster.go
deleted file mode 100644
index a54f2f37..00000000
--- a/vendor/github.com/go-redis/redis/v8/cluster.go
+++ /dev/null
@@ -1,1750 +0,0 @@
-package redis
-
-import (
- "context"
- "crypto/tls"
- "fmt"
- "math"
- "net"
- "runtime"
- "sort"
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/go-redis/redis/v8/internal"
- "github.com/go-redis/redis/v8/internal/hashtag"
- "github.com/go-redis/redis/v8/internal/pool"
- "github.com/go-redis/redis/v8/internal/proto"
- "github.com/go-redis/redis/v8/internal/rand"
-)
-
-var errClusterNoNodes = fmt.Errorf("redis: cluster has no nodes")
-
-// ClusterOptions are used to configure a cluster client and should be
-// passed to NewClusterClient.
-type ClusterOptions struct {
- // A seed list of host:port addresses of cluster nodes.
- Addrs []string
-
- // NewClient creates a cluster node client with provided name and options.
- NewClient func(opt *Options) *Client
-
- // The maximum number of retries before giving up. Command is retried
- // on network errors and MOVED/ASK redirects.
- // Default is 3 retries.
- MaxRedirects int
-
- // Enables read-only commands on slave nodes.
- ReadOnly bool
- // Allows routing read-only commands to the closest master or slave node.
- // It automatically enables ReadOnly.
- RouteByLatency bool
- // Allows routing read-only commands to the random master or slave node.
- // It automatically enables ReadOnly.
- RouteRandomly bool
-
- // Optional function that returns cluster slots information.
- // It is useful to manually create cluster of standalone Redis servers
- // and load-balance read/write operations between master and slaves.
- // It can use service like ZooKeeper to maintain configuration information
- // and Cluster.ReloadState to manually trigger state reloading.
- ClusterSlots func(context.Context) ([]ClusterSlot, error)
-
- // Following options are copied from Options struct.
-
- Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
-
- OnConnect func(ctx context.Context, cn *Conn) error
-
- Username string
- Password string
-
- MaxRetries int
- MinRetryBackoff time.Duration
- MaxRetryBackoff time.Duration
-
- DialTimeout time.Duration
- ReadTimeout time.Duration
- WriteTimeout time.Duration
-
- // PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default LIFO).
- PoolFIFO bool
-
- // PoolSize applies per cluster node and not for the whole cluster.
- PoolSize int
- MinIdleConns int
- MaxConnAge time.Duration
- PoolTimeout time.Duration
- IdleTimeout time.Duration
- IdleCheckFrequency time.Duration
-
- TLSConfig *tls.Config
-}
-
-func (opt *ClusterOptions) init() {
- if opt.MaxRedirects == -1 {
- opt.MaxRedirects = 0
- } else if opt.MaxRedirects == 0 {
- opt.MaxRedirects = 3
- }
-
- if opt.RouteByLatency || opt.RouteRandomly {
- opt.ReadOnly = true
- }
-
- if opt.PoolSize == 0 {
- opt.PoolSize = 5 * runtime.GOMAXPROCS(0)
- }
-
- switch opt.ReadTimeout {
- case -1:
- opt.ReadTimeout = 0
- case 0:
- opt.ReadTimeout = 3 * time.Second
- }
- switch opt.WriteTimeout {
- case -1:
- opt.WriteTimeout = 0
- case 0:
- opt.WriteTimeout = opt.ReadTimeout
- }
-
- if opt.MaxRetries == 0 {
- opt.MaxRetries = -1
- }
- switch opt.MinRetryBackoff {
- case -1:
- opt.MinRetryBackoff = 0
- case 0:
- opt.MinRetryBackoff = 8 * time.Millisecond
- }
- switch opt.MaxRetryBackoff {
- case -1:
- opt.MaxRetryBackoff = 0
- case 0:
- opt.MaxRetryBackoff = 512 * time.Millisecond
- }
-
- if opt.NewClient == nil {
- opt.NewClient = NewClient
- }
-}
-
-func (opt *ClusterOptions) clientOptions() *Options {
- const disableIdleCheck = -1
-
- return &Options{
- Dialer: opt.Dialer,
- OnConnect: opt.OnConnect,
-
- Username: opt.Username,
- Password: opt.Password,
-
- MaxRetries: opt.MaxRetries,
- MinRetryBackoff: opt.MinRetryBackoff,
- MaxRetryBackoff: opt.MaxRetryBackoff,
-
- DialTimeout: opt.DialTimeout,
- ReadTimeout: opt.ReadTimeout,
- WriteTimeout: opt.WriteTimeout,
-
- PoolFIFO: opt.PoolFIFO,
- PoolSize: opt.PoolSize,
- MinIdleConns: opt.MinIdleConns,
- MaxConnAge: opt.MaxConnAge,
- PoolTimeout: opt.PoolTimeout,
- IdleTimeout: opt.IdleTimeout,
- IdleCheckFrequency: disableIdleCheck,
-
- TLSConfig: opt.TLSConfig,
- // If ClusterSlots is populated, then we probably have an artificial
- // cluster whose nodes are not in clustering mode (otherwise there isn't
- // much use for ClusterSlots config). This means we cannot execute the
- // READONLY command against that node -- setting readOnly to false in such
- // situations in the options below will prevent that from happening.
- readOnly: opt.ReadOnly && opt.ClusterSlots == nil,
- }
-}
-
-//------------------------------------------------------------------------------
-
-type clusterNode struct {
- Client *Client
-
- latency uint32 // atomic
- generation uint32 // atomic
- failing uint32 // atomic
-}
-
-func newClusterNode(clOpt *ClusterOptions, addr string) *clusterNode {
- opt := clOpt.clientOptions()
- opt.Addr = addr
- node := clusterNode{
- Client: clOpt.NewClient(opt),
- }
-
- node.latency = math.MaxUint32
- if clOpt.RouteByLatency {
- go node.updateLatency()
- }
-
- return &node
-}
-
-func (n *clusterNode) String() string {
- return n.Client.String()
-}
-
-func (n *clusterNode) Close() error {
- return n.Client.Close()
-}
-
-func (n *clusterNode) updateLatency() {
- const numProbe = 10
- var dur uint64
-
- for i := 0; i < numProbe; i++ {
- time.Sleep(time.Duration(10+rand.Intn(10)) * time.Millisecond)
-
- start := time.Now()
- n.Client.Ping(context.TODO())
- dur += uint64(time.Since(start) / time.Microsecond)
- }
-
- latency := float64(dur) / float64(numProbe)
- atomic.StoreUint32(&n.latency, uint32(latency+0.5))
-}
-
-func (n *clusterNode) Latency() time.Duration {
- latency := atomic.LoadUint32(&n.latency)
- return time.Duration(latency) * time.Microsecond
-}
-
-func (n *clusterNode) MarkAsFailing() {
- atomic.StoreUint32(&n.failing, uint32(time.Now().Unix()))
-}
-
-func (n *clusterNode) Failing() bool {
- const timeout = 15 // 15 seconds
-
- failing := atomic.LoadUint32(&n.failing)
- if failing == 0 {
- return false
- }
- if time.Now().Unix()-int64(failing) < timeout {
- return true
- }
- atomic.StoreUint32(&n.failing, 0)
- return false
-}
-
-func (n *clusterNode) Generation() uint32 {
- return atomic.LoadUint32(&n.generation)
-}
-
-func (n *clusterNode) SetGeneration(gen uint32) {
- for {
- v := atomic.LoadUint32(&n.generation)
- if gen < v || atomic.CompareAndSwapUint32(&n.generation, v, gen) {
- break
- }
- }
-}
-
-//------------------------------------------------------------------------------
-
-type clusterNodes struct {
- opt *ClusterOptions
-
- mu sync.RWMutex
- addrs []string
- nodes map[string]*clusterNode
- activeAddrs []string
- closed bool
-
- _generation uint32 // atomic
-}
-
-func newClusterNodes(opt *ClusterOptions) *clusterNodes {
- return &clusterNodes{
- opt: opt,
-
- addrs: opt.Addrs,
- nodes: make(map[string]*clusterNode),
- }
-}
-
-func (c *clusterNodes) Close() error {
- c.mu.Lock()
- defer c.mu.Unlock()
-
- if c.closed {
- return nil
- }
- c.closed = true
-
- var firstErr error
- for _, node := range c.nodes {
- if err := node.Client.Close(); err != nil && firstErr == nil {
- firstErr = err
- }
- }
-
- c.nodes = nil
- c.activeAddrs = nil
-
- return firstErr
-}
-
-func (c *clusterNodes) Addrs() ([]string, error) {
- var addrs []string
-
- c.mu.RLock()
- closed := c.closed //nolint:ifshort
- if !closed {
- if len(c.activeAddrs) > 0 {
- addrs = c.activeAddrs
- } else {
- addrs = c.addrs
- }
- }
- c.mu.RUnlock()
-
- if closed {
- return nil, pool.ErrClosed
- }
- if len(addrs) == 0 {
- return nil, errClusterNoNodes
- }
- return addrs, nil
-}
-
-func (c *clusterNodes) NextGeneration() uint32 {
- return atomic.AddUint32(&c._generation, 1)
-}
-
-// GC removes unused nodes.
-func (c *clusterNodes) GC(generation uint32) {
- //nolint:prealloc
- var collected []*clusterNode
-
- c.mu.Lock()
-
- c.activeAddrs = c.activeAddrs[:0]
- for addr, node := range c.nodes {
- if node.Generation() >= generation {
- c.activeAddrs = append(c.activeAddrs, addr)
- if c.opt.RouteByLatency {
- go node.updateLatency()
- }
- continue
- }
-
- delete(c.nodes, addr)
- collected = append(collected, node)
- }
-
- c.mu.Unlock()
-
- for _, node := range collected {
- _ = node.Client.Close()
- }
-}
-
-func (c *clusterNodes) GetOrCreate(addr string) (*clusterNode, error) {
- node, err := c.get(addr)
- if err != nil {
- return nil, err
- }
- if node != nil {
- return node, nil
- }
-
- c.mu.Lock()
- defer c.mu.Unlock()
-
- if c.closed {
- return nil, pool.ErrClosed
- }
-
- node, ok := c.nodes[addr]
- if ok {
- return node, nil
- }
-
- node = newClusterNode(c.opt, addr)
-
- c.addrs = appendIfNotExists(c.addrs, addr)
- c.nodes[addr] = node
-
- return node, nil
-}
-
-func (c *clusterNodes) get(addr string) (*clusterNode, error) {
- var node *clusterNode
- var err error
- c.mu.RLock()
- if c.closed {
- err = pool.ErrClosed
- } else {
- node = c.nodes[addr]
- }
- c.mu.RUnlock()
- return node, err
-}
-
-func (c *clusterNodes) All() ([]*clusterNode, error) {
- c.mu.RLock()
- defer c.mu.RUnlock()
-
- if c.closed {
- return nil, pool.ErrClosed
- }
-
- cp := make([]*clusterNode, 0, len(c.nodes))
- for _, node := range c.nodes {
- cp = append(cp, node)
- }
- return cp, nil
-}
-
-func (c *clusterNodes) Random() (*clusterNode, error) {
- addrs, err := c.Addrs()
- if err != nil {
- return nil, err
- }
-
- n := rand.Intn(len(addrs))
- return c.GetOrCreate(addrs[n])
-}
-
-//------------------------------------------------------------------------------
-
-type clusterSlot struct {
- start, end int
- nodes []*clusterNode
-}
-
-type clusterSlotSlice []*clusterSlot
-
-func (p clusterSlotSlice) Len() int {
- return len(p)
-}
-
-func (p clusterSlotSlice) Less(i, j int) bool {
- return p[i].start < p[j].start
-}
-
-func (p clusterSlotSlice) Swap(i, j int) {
- p[i], p[j] = p[j], p[i]
-}
-
-type clusterState struct {
- nodes *clusterNodes
- Masters []*clusterNode
- Slaves []*clusterNode
-
- slots []*clusterSlot
-
- generation uint32
- createdAt time.Time
-}
-
-func newClusterState(
- nodes *clusterNodes, slots []ClusterSlot, origin string,
-) (*clusterState, error) {
- c := clusterState{
- nodes: nodes,
-
- slots: make([]*clusterSlot, 0, len(slots)),
-
- generation: nodes.NextGeneration(),
- createdAt: time.Now(),
- }
-
- originHost, _, _ := net.SplitHostPort(origin)
- isLoopbackOrigin := isLoopback(originHost)
-
- for _, slot := range slots {
- var nodes []*clusterNode
- for i, slotNode := range slot.Nodes {
- addr := slotNode.Addr
- if !isLoopbackOrigin {
- addr = replaceLoopbackHost(addr, originHost)
- }
-
- node, err := c.nodes.GetOrCreate(addr)
- if err != nil {
- return nil, err
- }
-
- node.SetGeneration(c.generation)
- nodes = append(nodes, node)
-
- if i == 0 {
- c.Masters = appendUniqueNode(c.Masters, node)
- } else {
- c.Slaves = appendUniqueNode(c.Slaves, node)
- }
- }
-
- c.slots = append(c.slots, &clusterSlot{
- start: slot.Start,
- end: slot.End,
- nodes: nodes,
- })
- }
-
- sort.Sort(clusterSlotSlice(c.slots))
-
- time.AfterFunc(time.Minute, func() {
- nodes.GC(c.generation)
- })
-
- return &c, nil
-}
-
-func replaceLoopbackHost(nodeAddr, originHost string) string {
- nodeHost, nodePort, err := net.SplitHostPort(nodeAddr)
- if err != nil {
- return nodeAddr
- }
-
- nodeIP := net.ParseIP(nodeHost)
- if nodeIP == nil {
- return nodeAddr
- }
-
- if !nodeIP.IsLoopback() {
- return nodeAddr
- }
-
- // Use origin host which is not loopback and node port.
- return net.JoinHostPort(originHost, nodePort)
-}
-
-func isLoopback(host string) bool {
- ip := net.ParseIP(host)
- if ip == nil {
- return true
- }
- return ip.IsLoopback()
-}
-
-func (c *clusterState) slotMasterNode(slot int) (*clusterNode, error) {
- nodes := c.slotNodes(slot)
- if len(nodes) > 0 {
- return nodes[0], nil
- }
- return c.nodes.Random()
-}
-
-func (c *clusterState) slotSlaveNode(slot int) (*clusterNode, error) {
- nodes := c.slotNodes(slot)
- switch len(nodes) {
- case 0:
- return c.nodes.Random()
- case 1:
- return nodes[0], nil
- case 2:
- if slave := nodes[1]; !slave.Failing() {
- return slave, nil
- }
- return nodes[0], nil
- default:
- var slave *clusterNode
- for i := 0; i < 10; i++ {
- n := rand.Intn(len(nodes)-1) + 1
- slave = nodes[n]
- if !slave.Failing() {
- return slave, nil
- }
- }
-
- // All slaves are loading - use master.
- return nodes[0], nil
- }
-}
-
-func (c *clusterState) slotClosestNode(slot int) (*clusterNode, error) {
- nodes := c.slotNodes(slot)
- if len(nodes) == 0 {
- return c.nodes.Random()
- }
-
- var node *clusterNode
- for _, n := range nodes {
- if n.Failing() {
- continue
- }
- if node == nil || n.Latency() < node.Latency() {
- node = n
- }
- }
- if node != nil {
- return node, nil
- }
-
- // If all nodes are failing - return random node
- return c.nodes.Random()
-}
-
-func (c *clusterState) slotRandomNode(slot int) (*clusterNode, error) {
- nodes := c.slotNodes(slot)
- if len(nodes) == 0 {
- return c.nodes.Random()
- }
- if len(nodes) == 1 {
- return nodes[0], nil
- }
- randomNodes := rand.Perm(len(nodes))
- for _, idx := range randomNodes {
- if node := nodes[idx]; !node.Failing() {
- return node, nil
- }
- }
- return nodes[randomNodes[0]], nil
-}
-
-func (c *clusterState) slotNodes(slot int) []*clusterNode {
- i := sort.Search(len(c.slots), func(i int) bool {
- return c.slots[i].end >= slot
- })
- if i >= len(c.slots) {
- return nil
- }
- x := c.slots[i]
- if slot >= x.start && slot <= x.end {
- return x.nodes
- }
- return nil
-}
-
-//------------------------------------------------------------------------------
-
-type clusterStateHolder struct {
- load func(ctx context.Context) (*clusterState, error)
-
- state atomic.Value
- reloading uint32 // atomic
-}
-
-func newClusterStateHolder(fn func(ctx context.Context) (*clusterState, error)) *clusterStateHolder {
- return &clusterStateHolder{
- load: fn,
- }
-}
-
-func (c *clusterStateHolder) Reload(ctx context.Context) (*clusterState, error) {
- state, err := c.load(ctx)
- if err != nil {
- return nil, err
- }
- c.state.Store(state)
- return state, nil
-}
-
-func (c *clusterStateHolder) LazyReload() {
- if !atomic.CompareAndSwapUint32(&c.reloading, 0, 1) {
- return
- }
- go func() {
- defer atomic.StoreUint32(&c.reloading, 0)
-
- _, err := c.Reload(context.Background())
- if err != nil {
- return
- }
- time.Sleep(200 * time.Millisecond)
- }()
-}
-
-func (c *clusterStateHolder) Get(ctx context.Context) (*clusterState, error) {
- v := c.state.Load()
- if v == nil {
- return c.Reload(ctx)
- }
-
- state := v.(*clusterState)
- if time.Since(state.createdAt) > 10*time.Second {
- c.LazyReload()
- }
- return state, nil
-}
-
-func (c *clusterStateHolder) ReloadOrGet(ctx context.Context) (*clusterState, error) {
- state, err := c.Reload(ctx)
- if err == nil {
- return state, nil
- }
- return c.Get(ctx)
-}
-
-//------------------------------------------------------------------------------
-
-type clusterClient struct {
- opt *ClusterOptions
- nodes *clusterNodes
- state *clusterStateHolder //nolint:structcheck
- cmdsInfoCache *cmdsInfoCache //nolint:structcheck
-}
-
-// ClusterClient is a Redis Cluster client representing a pool of zero
-// or more underlying connections. It's safe for concurrent use by
-// multiple goroutines.
-type ClusterClient struct {
- *clusterClient
- cmdable
- hooks
- ctx context.Context
-}
-
-// NewClusterClient returns a Redis Cluster client as described in
-// http://redis.io/topics/cluster-spec.
-func NewClusterClient(opt *ClusterOptions) *ClusterClient {
- opt.init()
-
- c := &ClusterClient{
- clusterClient: &clusterClient{
- opt: opt,
- nodes: newClusterNodes(opt),
- },
- ctx: context.Background(),
- }
- c.state = newClusterStateHolder(c.loadState)
- c.cmdsInfoCache = newCmdsInfoCache(c.cmdsInfo)
- c.cmdable = c.Process
-
- if opt.IdleCheckFrequency > 0 {
- go c.reaper(opt.IdleCheckFrequency)
- }
-
- return c
-}
-
-func (c *ClusterClient) Context() context.Context {
- return c.ctx
-}
-
-func (c *ClusterClient) WithContext(ctx context.Context) *ClusterClient {
- if ctx == nil {
- panic("nil context")
- }
- clone := *c
- clone.cmdable = clone.Process
- clone.hooks.lock()
- clone.ctx = ctx
- return &clone
-}
-
-// Options returns read-only Options that were used to create the client.
-func (c *ClusterClient) Options() *ClusterOptions {
- return c.opt
-}
-
-// ReloadState reloads cluster state. If available it calls ClusterSlots func
-// to get cluster slots information.
-func (c *ClusterClient) ReloadState(ctx context.Context) {
- c.state.LazyReload()
-}
-
-// Close closes the cluster client, releasing any open resources.
-//
-// It is rare to Close a ClusterClient, as the ClusterClient is meant
-// to be long-lived and shared between many goroutines.
-func (c *ClusterClient) Close() error {
- return c.nodes.Close()
-}
-
-// Do creates a Cmd from the args and processes the cmd.
-func (c *ClusterClient) Do(ctx context.Context, args ...interface{}) *Cmd {
- cmd := NewCmd(ctx, args...)
- _ = c.Process(ctx, cmd)
- return cmd
-}
-
-func (c *ClusterClient) Process(ctx context.Context, cmd Cmder) error {
- return c.hooks.process(ctx, cmd, c.process)
-}
-
-func (c *ClusterClient) process(ctx context.Context, cmd Cmder) error {
- cmdInfo := c.cmdInfo(cmd.Name())
- slot := c.cmdSlot(cmd)
-
- var node *clusterNode
- var ask bool
- var lastErr error
- for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ {
- if attempt > 0 {
- if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
- return err
- }
- }
-
- if node == nil {
- var err error
- node, err = c.cmdNode(ctx, cmdInfo, slot)
- if err != nil {
- return err
- }
- }
-
- if ask {
- pipe := node.Client.Pipeline()
- _ = pipe.Process(ctx, NewCmd(ctx, "asking"))
- _ = pipe.Process(ctx, cmd)
- _, lastErr = pipe.Exec(ctx)
- _ = pipe.Close()
- ask = false
- } else {
- lastErr = node.Client.Process(ctx, cmd)
- }
-
- // If there is no error - we are done.
- if lastErr == nil {
- return nil
- }
- if isReadOnly := isReadOnlyError(lastErr); isReadOnly || lastErr == pool.ErrClosed {
- if isReadOnly {
- c.state.LazyReload()
- }
- node = nil
- continue
- }
-
- // If slave is loading - pick another node.
- if c.opt.ReadOnly && isLoadingError(lastErr) {
- node.MarkAsFailing()
- node = nil
- continue
- }
-
- var moved bool
- var addr string
- moved, ask, addr = isMovedError(lastErr)
- if moved || ask {
- c.state.LazyReload()
-
- var err error
- node, err = c.nodes.GetOrCreate(addr)
- if err != nil {
- return err
- }
- continue
- }
-
- if shouldRetry(lastErr, cmd.readTimeout() == nil) {
- // First retry the same node.
- if attempt == 0 {
- continue
- }
-
- // Second try another node.
- node.MarkAsFailing()
- node = nil
- continue
- }
-
- return lastErr
- }
- return lastErr
-}
-
-// ForEachMaster concurrently calls the fn on each master node in the cluster.
-// It returns the first error if any.
-func (c *ClusterClient) ForEachMaster(
- ctx context.Context,
- fn func(ctx context.Context, client *Client) error,
-) error {
- state, err := c.state.ReloadOrGet(ctx)
- if err != nil {
- return err
- }
-
- var wg sync.WaitGroup
- errCh := make(chan error, 1)
-
- for _, master := range state.Masters {
- wg.Add(1)
- go func(node *clusterNode) {
- defer wg.Done()
- err := fn(ctx, node.Client)
- if err != nil {
- select {
- case errCh <- err:
- default:
- }
- }
- }(master)
- }
-
- wg.Wait()
-
- select {
- case err := <-errCh:
- return err
- default:
- return nil
- }
-}
-
-// ForEachSlave concurrently calls the fn on each slave node in the cluster.
-// It returns the first error if any.
-func (c *ClusterClient) ForEachSlave(
- ctx context.Context,
- fn func(ctx context.Context, client *Client) error,
-) error {
- state, err := c.state.ReloadOrGet(ctx)
- if err != nil {
- return err
- }
-
- var wg sync.WaitGroup
- errCh := make(chan error, 1)
-
- for _, slave := range state.Slaves {
- wg.Add(1)
- go func(node *clusterNode) {
- defer wg.Done()
- err := fn(ctx, node.Client)
- if err != nil {
- select {
- case errCh <- err:
- default:
- }
- }
- }(slave)
- }
-
- wg.Wait()
-
- select {
- case err := <-errCh:
- return err
- default:
- return nil
- }
-}
-
-// ForEachShard concurrently calls the fn on each known node in the cluster.
-// It returns the first error if any.
-func (c *ClusterClient) ForEachShard(
- ctx context.Context,
- fn func(ctx context.Context, client *Client) error,
-) error {
- state, err := c.state.ReloadOrGet(ctx)
- if err != nil {
- return err
- }
-
- var wg sync.WaitGroup
- errCh := make(chan error, 1)
-
- worker := func(node *clusterNode) {
- defer wg.Done()
- err := fn(ctx, node.Client)
- if err != nil {
- select {
- case errCh <- err:
- default:
- }
- }
- }
-
- for _, node := range state.Masters {
- wg.Add(1)
- go worker(node)
- }
- for _, node := range state.Slaves {
- wg.Add(1)
- go worker(node)
- }
-
- wg.Wait()
-
- select {
- case err := <-errCh:
- return err
- default:
- return nil
- }
-}
-
-// PoolStats returns accumulated connection pool stats.
-func (c *ClusterClient) PoolStats() *PoolStats {
- var acc PoolStats
-
- state, _ := c.state.Get(context.TODO())
- if state == nil {
- return &acc
- }
-
- for _, node := range state.Masters {
- s := node.Client.connPool.Stats()
- acc.Hits += s.Hits
- acc.Misses += s.Misses
- acc.Timeouts += s.Timeouts
-
- acc.TotalConns += s.TotalConns
- acc.IdleConns += s.IdleConns
- acc.StaleConns += s.StaleConns
- }
-
- for _, node := range state.Slaves {
- s := node.Client.connPool.Stats()
- acc.Hits += s.Hits
- acc.Misses += s.Misses
- acc.Timeouts += s.Timeouts
-
- acc.TotalConns += s.TotalConns
- acc.IdleConns += s.IdleConns
- acc.StaleConns += s.StaleConns
- }
-
- return &acc
-}
-
-func (c *ClusterClient) loadState(ctx context.Context) (*clusterState, error) {
- if c.opt.ClusterSlots != nil {
- slots, err := c.opt.ClusterSlots(ctx)
- if err != nil {
- return nil, err
- }
- return newClusterState(c.nodes, slots, "")
- }
-
- addrs, err := c.nodes.Addrs()
- if err != nil {
- return nil, err
- }
-
- var firstErr error
-
- for _, idx := range rand.Perm(len(addrs)) {
- addr := addrs[idx]
-
- node, err := c.nodes.GetOrCreate(addr)
- if err != nil {
- if firstErr == nil {
- firstErr = err
- }
- continue
- }
-
- slots, err := node.Client.ClusterSlots(ctx).Result()
- if err != nil {
- if firstErr == nil {
- firstErr = err
- }
- continue
- }
-
- return newClusterState(c.nodes, slots, node.Client.opt.Addr)
- }
-
- /*
- * No node is connectable. It's possible that all nodes' IP has changed.
- * Clear activeAddrs to let client be able to re-connect using the initial
- * setting of the addresses (e.g. [redis-cluster-0:6379, redis-cluster-1:6379]),
- * which might have chance to resolve domain name and get updated IP address.
- */
- c.nodes.mu.Lock()
- c.nodes.activeAddrs = nil
- c.nodes.mu.Unlock()
-
- return nil, firstErr
-}
-
-// reaper closes idle connections to the cluster.
-func (c *ClusterClient) reaper(idleCheckFrequency time.Duration) {
- ticker := time.NewTicker(idleCheckFrequency)
- defer ticker.Stop()
-
- for range ticker.C {
- nodes, err := c.nodes.All()
- if err != nil {
- break
- }
-
- for _, node := range nodes {
- _, err := node.Client.connPool.(*pool.ConnPool).ReapStaleConns()
- if err != nil {
- internal.Logger.Printf(c.Context(), "ReapStaleConns failed: %s", err)
- }
- }
- }
-}
-
-func (c *ClusterClient) Pipeline() Pipeliner {
- pipe := Pipeline{
- ctx: c.ctx,
- exec: c.processPipeline,
- }
- pipe.init()
- return &pipe
-}
-
-func (c *ClusterClient) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
- return c.Pipeline().Pipelined(ctx, fn)
-}
-
-func (c *ClusterClient) processPipeline(ctx context.Context, cmds []Cmder) error {
- return c.hooks.processPipeline(ctx, cmds, c._processPipeline)
-}
-
-func (c *ClusterClient) _processPipeline(ctx context.Context, cmds []Cmder) error {
- cmdsMap := newCmdsMap()
- err := c.mapCmdsByNode(ctx, cmdsMap, cmds)
- if err != nil {
- setCmdsErr(cmds, err)
- return err
- }
-
- for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ {
- if attempt > 0 {
- if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
- setCmdsErr(cmds, err)
- return err
- }
- }
-
- failedCmds := newCmdsMap()
- var wg sync.WaitGroup
-
- for node, cmds := range cmdsMap.m {
- wg.Add(1)
- go func(node *clusterNode, cmds []Cmder) {
- defer wg.Done()
-
- err := c._processPipelineNode(ctx, node, cmds, failedCmds)
- if err == nil {
- return
- }
- if attempt < c.opt.MaxRedirects {
- if err := c.mapCmdsByNode(ctx, failedCmds, cmds); err != nil {
- setCmdsErr(cmds, err)
- }
- } else {
- setCmdsErr(cmds, err)
- }
- }(node, cmds)
- }
-
- wg.Wait()
- if len(failedCmds.m) == 0 {
- break
- }
- cmdsMap = failedCmds
- }
-
- return cmdsFirstErr(cmds)
-}
-
-func (c *ClusterClient) mapCmdsByNode(ctx context.Context, cmdsMap *cmdsMap, cmds []Cmder) error {
- state, err := c.state.Get(ctx)
- if err != nil {
- return err
- }
-
- if c.opt.ReadOnly && c.cmdsAreReadOnly(cmds) {
- for _, cmd := range cmds {
- slot := c.cmdSlot(cmd)
- node, err := c.slotReadOnlyNode(state, slot)
- if err != nil {
- return err
- }
- cmdsMap.Add(node, cmd)
- }
- return nil
- }
-
- for _, cmd := range cmds {
- slot := c.cmdSlot(cmd)
- node, err := state.slotMasterNode(slot)
- if err != nil {
- return err
- }
- cmdsMap.Add(node, cmd)
- }
- return nil
-}
-
-func (c *ClusterClient) cmdsAreReadOnly(cmds []Cmder) bool {
- for _, cmd := range cmds {
- cmdInfo := c.cmdInfo(cmd.Name())
- if cmdInfo == nil || !cmdInfo.ReadOnly {
- return false
- }
- }
- return true
-}
-
-func (c *ClusterClient) _processPipelineNode(
- ctx context.Context, node *clusterNode, cmds []Cmder, failedCmds *cmdsMap,
-) error {
- return node.Client.hooks.processPipeline(ctx, cmds, func(ctx context.Context, cmds []Cmder) error {
- return node.Client.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
- err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
- return writeCmds(wr, cmds)
- })
- if err != nil {
- return err
- }
-
- return cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error {
- return c.pipelineReadCmds(ctx, node, rd, cmds, failedCmds)
- })
- })
- })
-}
-
-func (c *ClusterClient) pipelineReadCmds(
- ctx context.Context,
- node *clusterNode,
- rd *proto.Reader,
- cmds []Cmder,
- failedCmds *cmdsMap,
-) error {
- for _, cmd := range cmds {
- err := cmd.readReply(rd)
- cmd.SetErr(err)
-
- if err == nil {
- continue
- }
-
- if c.checkMovedErr(ctx, cmd, err, failedCmds) {
- continue
- }
-
- if c.opt.ReadOnly && isLoadingError(err) {
- node.MarkAsFailing()
- return err
- }
- if isRedisError(err) {
- continue
- }
- return err
- }
- return nil
-}
-
-func (c *ClusterClient) checkMovedErr(
- ctx context.Context, cmd Cmder, err error, failedCmds *cmdsMap,
-) bool {
- moved, ask, addr := isMovedError(err)
- if !moved && !ask {
- return false
- }
-
- node, err := c.nodes.GetOrCreate(addr)
- if err != nil {
- return false
- }
-
- if moved {
- c.state.LazyReload()
- failedCmds.Add(node, cmd)
- return true
- }
-
- if ask {
- failedCmds.Add(node, NewCmd(ctx, "asking"), cmd)
- return true
- }
-
- panic("not reached")
-}
-
-// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC.
-func (c *ClusterClient) TxPipeline() Pipeliner {
- pipe := Pipeline{
- ctx: c.ctx,
- exec: c.processTxPipeline,
- }
- pipe.init()
- return &pipe
-}
-
-func (c *ClusterClient) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
- return c.TxPipeline().Pipelined(ctx, fn)
-}
-
-func (c *ClusterClient) processTxPipeline(ctx context.Context, cmds []Cmder) error {
- return c.hooks.processTxPipeline(ctx, cmds, c._processTxPipeline)
-}
-
-func (c *ClusterClient) _processTxPipeline(ctx context.Context, cmds []Cmder) error {
- // Trim multi .. exec.
- cmds = cmds[1 : len(cmds)-1]
-
- state, err := c.state.Get(ctx)
- if err != nil {
- setCmdsErr(cmds, err)
- return err
- }
-
- cmdsMap := c.mapCmdsBySlot(cmds)
- for slot, cmds := range cmdsMap {
- node, err := state.slotMasterNode(slot)
- if err != nil {
- setCmdsErr(cmds, err)
- continue
- }
-
- cmdsMap := map[*clusterNode][]Cmder{node: cmds}
- for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ {
- if attempt > 0 {
- if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
- setCmdsErr(cmds, err)
- return err
- }
- }
-
- failedCmds := newCmdsMap()
- var wg sync.WaitGroup
-
- for node, cmds := range cmdsMap {
- wg.Add(1)
- go func(node *clusterNode, cmds []Cmder) {
- defer wg.Done()
-
- err := c._processTxPipelineNode(ctx, node, cmds, failedCmds)
- if err == nil {
- return
- }
-
- if attempt < c.opt.MaxRedirects {
- if err := c.mapCmdsByNode(ctx, failedCmds, cmds); err != nil {
- setCmdsErr(cmds, err)
- }
- } else {
- setCmdsErr(cmds, err)
- }
- }(node, cmds)
- }
-
- wg.Wait()
- if len(failedCmds.m) == 0 {
- break
- }
- cmdsMap = failedCmds.m
- }
- }
-
- return cmdsFirstErr(cmds)
-}
-
-func (c *ClusterClient) mapCmdsBySlot(cmds []Cmder) map[int][]Cmder {
- cmdsMap := make(map[int][]Cmder)
- for _, cmd := range cmds {
- slot := c.cmdSlot(cmd)
- cmdsMap[slot] = append(cmdsMap[slot], cmd)
- }
- return cmdsMap
-}
-
-func (c *ClusterClient) _processTxPipelineNode(
- ctx context.Context, node *clusterNode, cmds []Cmder, failedCmds *cmdsMap,
-) error {
- return node.Client.hooks.processTxPipeline(ctx, cmds, func(ctx context.Context, cmds []Cmder) error {
- return node.Client.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
- err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
- return writeCmds(wr, cmds)
- })
- if err != nil {
- return err
- }
-
- return cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error {
- statusCmd := cmds[0].(*StatusCmd)
- // Trim multi and exec.
- cmds = cmds[1 : len(cmds)-1]
-
- err := c.txPipelineReadQueued(ctx, rd, statusCmd, cmds, failedCmds)
- if err != nil {
- moved, ask, addr := isMovedError(err)
- if moved || ask {
- return c.cmdsMoved(ctx, cmds, moved, ask, addr, failedCmds)
- }
- return err
- }
-
- return pipelineReadCmds(rd, cmds)
- })
- })
- })
-}
-
-func (c *ClusterClient) txPipelineReadQueued(
- ctx context.Context,
- rd *proto.Reader,
- statusCmd *StatusCmd,
- cmds []Cmder,
- failedCmds *cmdsMap,
-) error {
- // Parse queued replies.
- if err := statusCmd.readReply(rd); err != nil {
- return err
- }
-
- for _, cmd := range cmds {
- err := statusCmd.readReply(rd)
- if err == nil || c.checkMovedErr(ctx, cmd, err, failedCmds) || isRedisError(err) {
- continue
- }
- return err
- }
-
- // Parse number of replies.
- line, err := rd.ReadLine()
- if err != nil {
- if err == Nil {
- err = TxFailedErr
- }
- return err
- }
-
- switch line[0] {
- case proto.ErrorReply:
- return proto.ParseErrorReply(line)
- case proto.ArrayReply:
- // ok
- default:
- return fmt.Errorf("redis: expected '*', but got line %q", line)
- }
-
- return nil
-}
-
-func (c *ClusterClient) cmdsMoved(
- ctx context.Context, cmds []Cmder,
- moved, ask bool,
- addr string,
- failedCmds *cmdsMap,
-) error {
- node, err := c.nodes.GetOrCreate(addr)
- if err != nil {
- return err
- }
-
- if moved {
- c.state.LazyReload()
- for _, cmd := range cmds {
- failedCmds.Add(node, cmd)
- }
- return nil
- }
-
- if ask {
- for _, cmd := range cmds {
- failedCmds.Add(node, NewCmd(ctx, "asking"), cmd)
- }
- return nil
- }
-
- return nil
-}
-
-func (c *ClusterClient) Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error {
- if len(keys) == 0 {
- return fmt.Errorf("redis: Watch requires at least one key")
- }
-
- slot := hashtag.Slot(keys[0])
- for _, key := range keys[1:] {
- if hashtag.Slot(key) != slot {
- err := fmt.Errorf("redis: Watch requires all keys to be in the same slot")
- return err
- }
- }
-
- node, err := c.slotMasterNode(ctx, slot)
- if err != nil {
- return err
- }
-
- for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ {
- if attempt > 0 {
- if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
- return err
- }
- }
-
- err = node.Client.Watch(ctx, fn, keys...)
- if err == nil {
- break
- }
-
- moved, ask, addr := isMovedError(err)
- if moved || ask {
- node, err = c.nodes.GetOrCreate(addr)
- if err != nil {
- return err
- }
- continue
- }
-
- if isReadOnly := isReadOnlyError(err); isReadOnly || err == pool.ErrClosed {
- if isReadOnly {
- c.state.LazyReload()
- }
- node, err = c.slotMasterNode(ctx, slot)
- if err != nil {
- return err
- }
- continue
- }
-
- if shouldRetry(err, true) {
- continue
- }
-
- return err
- }
-
- return err
-}
-
-func (c *ClusterClient) pubSub() *PubSub {
- var node *clusterNode
- pubsub := &PubSub{
- opt: c.opt.clientOptions(),
-
- newConn: func(ctx context.Context, channels []string) (*pool.Conn, error) {
- if node != nil {
- panic("node != nil")
- }
-
- var err error
- if len(channels) > 0 {
- slot := hashtag.Slot(channels[0])
- node, err = c.slotMasterNode(ctx, slot)
- } else {
- node, err = c.nodes.Random()
- }
- if err != nil {
- return nil, err
- }
-
- cn, err := node.Client.newConn(context.TODO())
- if err != nil {
- node = nil
-
- return nil, err
- }
-
- return cn, nil
- },
- closeConn: func(cn *pool.Conn) error {
- err := node.Client.connPool.CloseConn(cn)
- node = nil
- return err
- },
- }
- pubsub.init()
-
- return pubsub
-}
-
-// Subscribe subscribes the client to the specified channels.
-// Channels can be omitted to create empty subscription.
-func (c *ClusterClient) Subscribe(ctx context.Context, channels ...string) *PubSub {
- pubsub := c.pubSub()
- if len(channels) > 0 {
- _ = pubsub.Subscribe(ctx, channels...)
- }
- return pubsub
-}
-
-// PSubscribe subscribes the client to the given patterns.
-// Patterns can be omitted to create empty subscription.
-func (c *ClusterClient) PSubscribe(ctx context.Context, channels ...string) *PubSub {
- pubsub := c.pubSub()
- if len(channels) > 0 {
- _ = pubsub.PSubscribe(ctx, channels...)
- }
- return pubsub
-}
-
-func (c *ClusterClient) retryBackoff(attempt int) time.Duration {
- return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff)
-}
-
-func (c *ClusterClient) cmdsInfo(ctx context.Context) (map[string]*CommandInfo, error) {
- // Try 3 random nodes.
- const nodeLimit = 3
-
- addrs, err := c.nodes.Addrs()
- if err != nil {
- return nil, err
- }
-
- var firstErr error
-
- perm := rand.Perm(len(addrs))
- if len(perm) > nodeLimit {
- perm = perm[:nodeLimit]
- }
-
- for _, idx := range perm {
- addr := addrs[idx]
-
- node, err := c.nodes.GetOrCreate(addr)
- if err != nil {
- if firstErr == nil {
- firstErr = err
- }
- continue
- }
-
- info, err := node.Client.Command(ctx).Result()
- if err == nil {
- return info, nil
- }
- if firstErr == nil {
- firstErr = err
- }
- }
-
- if firstErr == nil {
- panic("not reached")
- }
- return nil, firstErr
-}
-
-func (c *ClusterClient) cmdInfo(name string) *CommandInfo {
- cmdsInfo, err := c.cmdsInfoCache.Get(c.ctx)
- if err != nil {
- return nil
- }
-
- info := cmdsInfo[name]
- if info == nil {
- internal.Logger.Printf(c.Context(), "info for cmd=%s not found", name)
- }
- return info
-}
-
-func (c *ClusterClient) cmdSlot(cmd Cmder) int {
- args := cmd.Args()
- if args[0] == "cluster" && args[1] == "getkeysinslot" {
- return args[2].(int)
- }
-
- cmdInfo := c.cmdInfo(cmd.Name())
- return cmdSlot(cmd, cmdFirstKeyPos(cmd, cmdInfo))
-}
-
-func cmdSlot(cmd Cmder, pos int) int {
- if pos == 0 {
- return hashtag.RandomSlot()
- }
- firstKey := cmd.stringArg(pos)
- return hashtag.Slot(firstKey)
-}
-
-func (c *ClusterClient) cmdNode(
- ctx context.Context,
- cmdInfo *CommandInfo,
- slot int,
-) (*clusterNode, error) {
- state, err := c.state.Get(ctx)
- if err != nil {
- return nil, err
- }
-
- if c.opt.ReadOnly && cmdInfo != nil && cmdInfo.ReadOnly {
- return c.slotReadOnlyNode(state, slot)
- }
- return state.slotMasterNode(slot)
-}
-
-func (c *clusterClient) slotReadOnlyNode(state *clusterState, slot int) (*clusterNode, error) {
- if c.opt.RouteByLatency {
- return state.slotClosestNode(slot)
- }
- if c.opt.RouteRandomly {
- return state.slotRandomNode(slot)
- }
- return state.slotSlaveNode(slot)
-}
-
-func (c *ClusterClient) slotMasterNode(ctx context.Context, slot int) (*clusterNode, error) {
- state, err := c.state.Get(ctx)
- if err != nil {
- return nil, err
- }
- return state.slotMasterNode(slot)
-}
-
-// SlaveForKey gets a client for a replica node to run any command on it.
-// This is especially useful if we want to run a particular lua script which has
-// only read only commands on the replica.
-// This is because other redis commands generally have a flag that points that
-// they are read only and automatically run on the replica nodes
-// if ClusterOptions.ReadOnly flag is set to true.
-func (c *ClusterClient) SlaveForKey(ctx context.Context, key string) (*Client, error) {
- state, err := c.state.Get(ctx)
- if err != nil {
- return nil, err
- }
- slot := hashtag.Slot(key)
- node, err := c.slotReadOnlyNode(state, slot)
- if err != nil {
- return nil, err
- }
- return node.Client, err
-}
-
-// MasterForKey return a client to the master node for a particular key.
-func (c *ClusterClient) MasterForKey(ctx context.Context, key string) (*Client, error) {
- slot := hashtag.Slot(key)
- node, err := c.slotMasterNode(ctx, slot)
- if err != nil {
- return nil, err
- }
- return node.Client, err
-}
-
-func appendUniqueNode(nodes []*clusterNode, node *clusterNode) []*clusterNode {
- for _, n := range nodes {
- if n == node {
- return nodes
- }
- }
- return append(nodes, node)
-}
-
-func appendIfNotExists(ss []string, es ...string) []string {
-loop:
- for _, e := range es {
- for _, s := range ss {
- if s == e {
- continue loop
- }
- }
- ss = append(ss, e)
- }
- return ss
-}
-
-//------------------------------------------------------------------------------
-
-type cmdsMap struct {
- mu sync.Mutex
- m map[*clusterNode][]Cmder
-}
-
-func newCmdsMap() *cmdsMap {
- return &cmdsMap{
- m: make(map[*clusterNode][]Cmder),
- }
-}
-
-func (m *cmdsMap) Add(node *clusterNode, cmds ...Cmder) {
- m.mu.Lock()
- m.m[node] = append(m.m[node], cmds...)
- m.mu.Unlock()
-}
diff --git a/vendor/github.com/go-redis/redis/v8/cluster_commands.go b/vendor/github.com/go-redis/redis/v8/cluster_commands.go
deleted file mode 100644
index 085bce83..00000000
--- a/vendor/github.com/go-redis/redis/v8/cluster_commands.go
+++ /dev/null
@@ -1,109 +0,0 @@
-package redis
-
-import (
- "context"
- "sync"
- "sync/atomic"
-)
-
-func (c *ClusterClient) DBSize(ctx context.Context) *IntCmd {
- cmd := NewIntCmd(ctx, "dbsize")
- _ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error {
- var size int64
- err := c.ForEachMaster(ctx, func(ctx context.Context, master *Client) error {
- n, err := master.DBSize(ctx).Result()
- if err != nil {
- return err
- }
- atomic.AddInt64(&size, n)
- return nil
- })
- if err != nil {
- cmd.SetErr(err)
- } else {
- cmd.val = size
- }
- return nil
- })
- return cmd
-}
-
-func (c *ClusterClient) ScriptLoad(ctx context.Context, script string) *StringCmd {
- cmd := NewStringCmd(ctx, "script", "load", script)
- _ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error {
- mu := &sync.Mutex{}
- err := c.ForEachShard(ctx, func(ctx context.Context, shard *Client) error {
- val, err := shard.ScriptLoad(ctx, script).Result()
- if err != nil {
- return err
- }
-
- mu.Lock()
- if cmd.Val() == "" {
- cmd.val = val
- }
- mu.Unlock()
-
- return nil
- })
- if err != nil {
- cmd.SetErr(err)
- }
- return nil
- })
- return cmd
-}
-
-func (c *ClusterClient) ScriptFlush(ctx context.Context) *StatusCmd {
- cmd := NewStatusCmd(ctx, "script", "flush")
- _ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error {
- err := c.ForEachShard(ctx, func(ctx context.Context, shard *Client) error {
- return shard.ScriptFlush(ctx).Err()
- })
- if err != nil {
- cmd.SetErr(err)
- }
- return nil
- })
- return cmd
-}
-
-func (c *ClusterClient) ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd {
- args := make([]interface{}, 2+len(hashes))
- args[0] = "script"
- args[1] = "exists"
- for i, hash := range hashes {
- args[2+i] = hash
- }
- cmd := NewBoolSliceCmd(ctx, args...)
-
- result := make([]bool, len(hashes))
- for i := range result {
- result[i] = true
- }
-
- _ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error {
- mu := &sync.Mutex{}
- err := c.ForEachShard(ctx, func(ctx context.Context, shard *Client) error {
- val, err := shard.ScriptExists(ctx, hashes...).Result()
- if err != nil {
- return err
- }
-
- mu.Lock()
- for i, v := range val {
- result[i] = result[i] && v
- }
- mu.Unlock()
-
- return nil
- })
- if err != nil {
- cmd.SetErr(err)
- } else {
- cmd.val = result
- }
- return nil
- })
- return cmd
-}
diff --git a/vendor/github.com/go-redis/redis/v8/command.go b/vendor/github.com/go-redis/redis/v8/command.go
deleted file mode 100644
index 4bb12a85..00000000
--- a/vendor/github.com/go-redis/redis/v8/command.go
+++ /dev/null
@@ -1,3478 +0,0 @@
-package redis
-
-import (
- "context"
- "fmt"
- "net"
- "strconv"
- "time"
-
- "github.com/go-redis/redis/v8/internal"
- "github.com/go-redis/redis/v8/internal/hscan"
- "github.com/go-redis/redis/v8/internal/proto"
- "github.com/go-redis/redis/v8/internal/util"
-)
-
-type Cmder interface {
- Name() string
- FullName() string
- Args() []interface{}
- String() string
- stringArg(int) string
- firstKeyPos() int8
- SetFirstKeyPos(int8)
-
- readTimeout() *time.Duration
- readReply(rd *proto.Reader) error
-
- SetErr(error)
- Err() error
-}
-
-func setCmdsErr(cmds []Cmder, e error) {
- for _, cmd := range cmds {
- if cmd.Err() == nil {
- cmd.SetErr(e)
- }
- }
-}
-
-func cmdsFirstErr(cmds []Cmder) error {
- for _, cmd := range cmds {
- if err := cmd.Err(); err != nil {
- return err
- }
- }
- return nil
-}
-
-func writeCmds(wr *proto.Writer, cmds []Cmder) error {
- for _, cmd := range cmds {
- if err := writeCmd(wr, cmd); err != nil {
- return err
- }
- }
- return nil
-}
-
-func writeCmd(wr *proto.Writer, cmd Cmder) error {
- return wr.WriteArgs(cmd.Args())
-}
-
-func cmdFirstKeyPos(cmd Cmder, info *CommandInfo) int {
- if pos := cmd.firstKeyPos(); pos != 0 {
- return int(pos)
- }
-
- switch cmd.Name() {
- case "eval", "evalsha":
- if cmd.stringArg(2) != "0" {
- return 3
- }
-
- return 0
- case "publish":
- return 1
- case "memory":
- // https://github.com/redis/redis/issues/7493
- if cmd.stringArg(1) == "usage" {
- return 2
- }
- }
-
- if info != nil {
- return int(info.FirstKeyPos)
- }
- return 0
-}
-
-func cmdString(cmd Cmder, val interface{}) string {
- b := make([]byte, 0, 64)
-
- for i, arg := range cmd.Args() {
- if i > 0 {
- b = append(b, ' ')
- }
- b = internal.AppendArg(b, arg)
- }
-
- if err := cmd.Err(); err != nil {
- b = append(b, ": "...)
- b = append(b, err.Error()...)
- } else if val != nil {
- b = append(b, ": "...)
- b = internal.AppendArg(b, val)
- }
-
- return internal.String(b)
-}
-
-//------------------------------------------------------------------------------
-
-type baseCmd struct {
- ctx context.Context
- args []interface{}
- err error
- keyPos int8
-
- _readTimeout *time.Duration
-}
-
-var _ Cmder = (*Cmd)(nil)
-
-func (cmd *baseCmd) Name() string {
- if len(cmd.args) == 0 {
- return ""
- }
- // Cmd name must be lower cased.
- return internal.ToLower(cmd.stringArg(0))
-}
-
-func (cmd *baseCmd) FullName() string {
- switch name := cmd.Name(); name {
- case "cluster", "command":
- if len(cmd.args) == 1 {
- return name
- }
- if s2, ok := cmd.args[1].(string); ok {
- return name + " " + s2
- }
- return name
- default:
- return name
- }
-}
-
-func (cmd *baseCmd) Args() []interface{} {
- return cmd.args
-}
-
-func (cmd *baseCmd) stringArg(pos int) string {
- if pos < 0 || pos >= len(cmd.args) {
- return ""
- }
- arg := cmd.args[pos]
- switch v := arg.(type) {
- case string:
- return v
- default:
- // TODO: consider using appendArg
- return fmt.Sprint(v)
- }
-}
-
-func (cmd *baseCmd) firstKeyPos() int8 {
- return cmd.keyPos
-}
-
-func (cmd *baseCmd) SetFirstKeyPos(keyPos int8) {
- cmd.keyPos = keyPos
-}
-
-func (cmd *baseCmd) SetErr(e error) {
- cmd.err = e
-}
-
-func (cmd *baseCmd) Err() error {
- return cmd.err
-}
-
-func (cmd *baseCmd) readTimeout() *time.Duration {
- return cmd._readTimeout
-}
-
-func (cmd *baseCmd) setReadTimeout(d time.Duration) {
- cmd._readTimeout = &d
-}
-
-//------------------------------------------------------------------------------
-
-type Cmd struct {
- baseCmd
-
- val interface{}
-}
-
-func NewCmd(ctx context.Context, args ...interface{}) *Cmd {
- return &Cmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *Cmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *Cmd) SetVal(val interface{}) {
- cmd.val = val
-}
-
-func (cmd *Cmd) Val() interface{} {
- return cmd.val
-}
-
-func (cmd *Cmd) Result() (interface{}, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *Cmd) Text() (string, error) {
- if cmd.err != nil {
- return "", cmd.err
- }
- return toString(cmd.val)
-}
-
-func toString(val interface{}) (string, error) {
- switch val := val.(type) {
- case string:
- return val, nil
- default:
- err := fmt.Errorf("redis: unexpected type=%T for String", val)
- return "", err
- }
-}
-
-func (cmd *Cmd) Int() (int, error) {
- if cmd.err != nil {
- return 0, cmd.err
- }
- switch val := cmd.val.(type) {
- case int64:
- return int(val), nil
- case string:
- return strconv.Atoi(val)
- default:
- err := fmt.Errorf("redis: unexpected type=%T for Int", val)
- return 0, err
- }
-}
-
-func (cmd *Cmd) Int64() (int64, error) {
- if cmd.err != nil {
- return 0, cmd.err
- }
- return toInt64(cmd.val)
-}
-
-func toInt64(val interface{}) (int64, error) {
- switch val := val.(type) {
- case int64:
- return val, nil
- case string:
- return strconv.ParseInt(val, 10, 64)
- default:
- err := fmt.Errorf("redis: unexpected type=%T for Int64", val)
- return 0, err
- }
-}
-
-func (cmd *Cmd) Uint64() (uint64, error) {
- if cmd.err != nil {
- return 0, cmd.err
- }
- return toUint64(cmd.val)
-}
-
-func toUint64(val interface{}) (uint64, error) {
- switch val := val.(type) {
- case int64:
- return uint64(val), nil
- case string:
- return strconv.ParseUint(val, 10, 64)
- default:
- err := fmt.Errorf("redis: unexpected type=%T for Uint64", val)
- return 0, err
- }
-}
-
-func (cmd *Cmd) Float32() (float32, error) {
- if cmd.err != nil {
- return 0, cmd.err
- }
- return toFloat32(cmd.val)
-}
-
-func toFloat32(val interface{}) (float32, error) {
- switch val := val.(type) {
- case int64:
- return float32(val), nil
- case string:
- f, err := strconv.ParseFloat(val, 32)
- if err != nil {
- return 0, err
- }
- return float32(f), nil
- default:
- err := fmt.Errorf("redis: unexpected type=%T for Float32", val)
- return 0, err
- }
-}
-
-func (cmd *Cmd) Float64() (float64, error) {
- if cmd.err != nil {
- return 0, cmd.err
- }
- return toFloat64(cmd.val)
-}
-
-func toFloat64(val interface{}) (float64, error) {
- switch val := val.(type) {
- case int64:
- return float64(val), nil
- case string:
- return strconv.ParseFloat(val, 64)
- default:
- err := fmt.Errorf("redis: unexpected type=%T for Float64", val)
- return 0, err
- }
-}
-
-func (cmd *Cmd) Bool() (bool, error) {
- if cmd.err != nil {
- return false, cmd.err
- }
- return toBool(cmd.val)
-}
-
-func toBool(val interface{}) (bool, error) {
- switch val := val.(type) {
- case int64:
- return val != 0, nil
- case string:
- return strconv.ParseBool(val)
- default:
- err := fmt.Errorf("redis: unexpected type=%T for Bool", val)
- return false, err
- }
-}
-
-func (cmd *Cmd) Slice() ([]interface{}, error) {
- if cmd.err != nil {
- return nil, cmd.err
- }
- switch val := cmd.val.(type) {
- case []interface{}:
- return val, nil
- default:
- return nil, fmt.Errorf("redis: unexpected type=%T for Slice", val)
- }
-}
-
-func (cmd *Cmd) StringSlice() ([]string, error) {
- slice, err := cmd.Slice()
- if err != nil {
- return nil, err
- }
-
- ss := make([]string, len(slice))
- for i, iface := range slice {
- val, err := toString(iface)
- if err != nil {
- return nil, err
- }
- ss[i] = val
- }
- return ss, nil
-}
-
-func (cmd *Cmd) Int64Slice() ([]int64, error) {
- slice, err := cmd.Slice()
- if err != nil {
- return nil, err
- }
-
- nums := make([]int64, len(slice))
- for i, iface := range slice {
- val, err := toInt64(iface)
- if err != nil {
- return nil, err
- }
- nums[i] = val
- }
- return nums, nil
-}
-
-func (cmd *Cmd) Uint64Slice() ([]uint64, error) {
- slice, err := cmd.Slice()
- if err != nil {
- return nil, err
- }
-
- nums := make([]uint64, len(slice))
- for i, iface := range slice {
- val, err := toUint64(iface)
- if err != nil {
- return nil, err
- }
- nums[i] = val
- }
- return nums, nil
-}
-
-func (cmd *Cmd) Float32Slice() ([]float32, error) {
- slice, err := cmd.Slice()
- if err != nil {
- return nil, err
- }
-
- floats := make([]float32, len(slice))
- for i, iface := range slice {
- val, err := toFloat32(iface)
- if err != nil {
- return nil, err
- }
- floats[i] = val
- }
- return floats, nil
-}
-
-func (cmd *Cmd) Float64Slice() ([]float64, error) {
- slice, err := cmd.Slice()
- if err != nil {
- return nil, err
- }
-
- floats := make([]float64, len(slice))
- for i, iface := range slice {
- val, err := toFloat64(iface)
- if err != nil {
- return nil, err
- }
- floats[i] = val
- }
- return floats, nil
-}
-
-func (cmd *Cmd) BoolSlice() ([]bool, error) {
- slice, err := cmd.Slice()
- if err != nil {
- return nil, err
- }
-
- bools := make([]bool, len(slice))
- for i, iface := range slice {
- val, err := toBool(iface)
- if err != nil {
- return nil, err
- }
- bools[i] = val
- }
- return bools, nil
-}
-
-func (cmd *Cmd) readReply(rd *proto.Reader) (err error) {
- cmd.val, err = rd.ReadReply(sliceParser)
- return err
-}
-
-// sliceParser implements proto.MultiBulkParse.
-func sliceParser(rd *proto.Reader, n int64) (interface{}, error) {
- vals := make([]interface{}, n)
- for i := 0; i < len(vals); i++ {
- v, err := rd.ReadReply(sliceParser)
- if err != nil {
- if err == Nil {
- vals[i] = nil
- continue
- }
- if err, ok := err.(proto.RedisError); ok {
- vals[i] = err
- continue
- }
- return nil, err
- }
- vals[i] = v
- }
- return vals, nil
-}
-
-//------------------------------------------------------------------------------
-
-type SliceCmd struct {
- baseCmd
-
- val []interface{}
-}
-
-var _ Cmder = (*SliceCmd)(nil)
-
-func NewSliceCmd(ctx context.Context, args ...interface{}) *SliceCmd {
- return &SliceCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *SliceCmd) SetVal(val []interface{}) {
- cmd.val = val
-}
-
-func (cmd *SliceCmd) Val() []interface{} {
- return cmd.val
-}
-
-func (cmd *SliceCmd) Result() ([]interface{}, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *SliceCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-// Scan scans the results from the map into a destination struct. The map keys
-// are matched in the Redis struct fields by the `redis:"field"` tag.
-func (cmd *SliceCmd) Scan(dst interface{}) error {
- if cmd.err != nil {
- return cmd.err
- }
-
- // Pass the list of keys and values.
- // Skip the first two args for: HMGET key
- var args []interface{}
- if cmd.args[0] == "hmget" {
- args = cmd.args[2:]
- } else {
- // Otherwise, it's: MGET field field ...
- args = cmd.args[1:]
- }
-
- return hscan.Scan(dst, args, cmd.val)
-}
-
-func (cmd *SliceCmd) readReply(rd *proto.Reader) error {
- v, err := rd.ReadArrayReply(sliceParser)
- if err != nil {
- return err
- }
- cmd.val = v.([]interface{})
- return nil
-}
-
-//------------------------------------------------------------------------------
-
-type StatusCmd struct {
- baseCmd
-
- val string
-}
-
-var _ Cmder = (*StatusCmd)(nil)
-
-func NewStatusCmd(ctx context.Context, args ...interface{}) *StatusCmd {
- return &StatusCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *StatusCmd) SetVal(val string) {
- cmd.val = val
-}
-
-func (cmd *StatusCmd) Val() string {
- return cmd.val
-}
-
-func (cmd *StatusCmd) Result() (string, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *StatusCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *StatusCmd) readReply(rd *proto.Reader) (err error) {
- cmd.val, err = rd.ReadString()
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type IntCmd struct {
- baseCmd
-
- val int64
-}
-
-var _ Cmder = (*IntCmd)(nil)
-
-func NewIntCmd(ctx context.Context, args ...interface{}) *IntCmd {
- return &IntCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *IntCmd) SetVal(val int64) {
- cmd.val = val
-}
-
-func (cmd *IntCmd) Val() int64 {
- return cmd.val
-}
-
-func (cmd *IntCmd) Result() (int64, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *IntCmd) Uint64() (uint64, error) {
- return uint64(cmd.val), cmd.err
-}
-
-func (cmd *IntCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *IntCmd) readReply(rd *proto.Reader) (err error) {
- cmd.val, err = rd.ReadIntReply()
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type IntSliceCmd struct {
- baseCmd
-
- val []int64
-}
-
-var _ Cmder = (*IntSliceCmd)(nil)
-
-func NewIntSliceCmd(ctx context.Context, args ...interface{}) *IntSliceCmd {
- return &IntSliceCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *IntSliceCmd) SetVal(val []int64) {
- cmd.val = val
-}
-
-func (cmd *IntSliceCmd) Val() []int64 {
- return cmd.val
-}
-
-func (cmd *IntSliceCmd) Result() ([]int64, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *IntSliceCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *IntSliceCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- cmd.val = make([]int64, n)
- for i := 0; i < len(cmd.val); i++ {
- num, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
- cmd.val[i] = num
- }
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type DurationCmd struct {
- baseCmd
-
- val time.Duration
- precision time.Duration
-}
-
-var _ Cmder = (*DurationCmd)(nil)
-
-func NewDurationCmd(ctx context.Context, precision time.Duration, args ...interface{}) *DurationCmd {
- return &DurationCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- precision: precision,
- }
-}
-
-func (cmd *DurationCmd) SetVal(val time.Duration) {
- cmd.val = val
-}
-
-func (cmd *DurationCmd) Val() time.Duration {
- return cmd.val
-}
-
-func (cmd *DurationCmd) Result() (time.Duration, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *DurationCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *DurationCmd) readReply(rd *proto.Reader) error {
- n, err := rd.ReadIntReply()
- if err != nil {
- return err
- }
- switch n {
- // -2 if the key does not exist
- // -1 if the key exists but has no associated expire
- case -2, -1:
- cmd.val = time.Duration(n)
- default:
- cmd.val = time.Duration(n) * cmd.precision
- }
- return nil
-}
-
-//------------------------------------------------------------------------------
-
-type TimeCmd struct {
- baseCmd
-
- val time.Time
-}
-
-var _ Cmder = (*TimeCmd)(nil)
-
-func NewTimeCmd(ctx context.Context, args ...interface{}) *TimeCmd {
- return &TimeCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *TimeCmd) SetVal(val time.Time) {
- cmd.val = val
-}
-
-func (cmd *TimeCmd) Val() time.Time {
- return cmd.val
-}
-
-func (cmd *TimeCmd) Result() (time.Time, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *TimeCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *TimeCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- if n != 2 {
- return nil, fmt.Errorf("got %d elements, expected 2", n)
- }
-
- sec, err := rd.ReadInt()
- if err != nil {
- return nil, err
- }
-
- microsec, err := rd.ReadInt()
- if err != nil {
- return nil, err
- }
-
- cmd.val = time.Unix(sec, microsec*1000)
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type BoolCmd struct {
- baseCmd
-
- val bool
-}
-
-var _ Cmder = (*BoolCmd)(nil)
-
-func NewBoolCmd(ctx context.Context, args ...interface{}) *BoolCmd {
- return &BoolCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *BoolCmd) SetVal(val bool) {
- cmd.val = val
-}
-
-func (cmd *BoolCmd) Val() bool {
- return cmd.val
-}
-
-func (cmd *BoolCmd) Result() (bool, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *BoolCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *BoolCmd) readReply(rd *proto.Reader) error {
- v, err := rd.ReadReply(nil)
- // `SET key value NX` returns nil when key already exists. But
- // `SETNX key value` returns bool (0/1). So convert nil to bool.
- if err == Nil {
- cmd.val = false
- return nil
- }
- if err != nil {
- return err
- }
- switch v := v.(type) {
- case int64:
- cmd.val = v == 1
- return nil
- case string:
- cmd.val = v == "OK"
- return nil
- default:
- return fmt.Errorf("got %T, wanted int64 or string", v)
- }
-}
-
-//------------------------------------------------------------------------------
-
-type StringCmd struct {
- baseCmd
-
- val string
-}
-
-var _ Cmder = (*StringCmd)(nil)
-
-func NewStringCmd(ctx context.Context, args ...interface{}) *StringCmd {
- return &StringCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *StringCmd) SetVal(val string) {
- cmd.val = val
-}
-
-func (cmd *StringCmd) Val() string {
- return cmd.val
-}
-
-func (cmd *StringCmd) Result() (string, error) {
- return cmd.Val(), cmd.err
-}
-
-func (cmd *StringCmd) Bytes() ([]byte, error) {
- return util.StringToBytes(cmd.val), cmd.err
-}
-
-func (cmd *StringCmd) Bool() (bool, error) {
- if cmd.err != nil {
- return false, cmd.err
- }
- return strconv.ParseBool(cmd.val)
-}
-
-func (cmd *StringCmd) Int() (int, error) {
- if cmd.err != nil {
- return 0, cmd.err
- }
- return strconv.Atoi(cmd.Val())
-}
-
-func (cmd *StringCmd) Int64() (int64, error) {
- if cmd.err != nil {
- return 0, cmd.err
- }
- return strconv.ParseInt(cmd.Val(), 10, 64)
-}
-
-func (cmd *StringCmd) Uint64() (uint64, error) {
- if cmd.err != nil {
- return 0, cmd.err
- }
- return strconv.ParseUint(cmd.Val(), 10, 64)
-}
-
-func (cmd *StringCmd) Float32() (float32, error) {
- if cmd.err != nil {
- return 0, cmd.err
- }
- f, err := strconv.ParseFloat(cmd.Val(), 32)
- if err != nil {
- return 0, err
- }
- return float32(f), nil
-}
-
-func (cmd *StringCmd) Float64() (float64, error) {
- if cmd.err != nil {
- return 0, cmd.err
- }
- return strconv.ParseFloat(cmd.Val(), 64)
-}
-
-func (cmd *StringCmd) Time() (time.Time, error) {
- if cmd.err != nil {
- return time.Time{}, cmd.err
- }
- return time.Parse(time.RFC3339Nano, cmd.Val())
-}
-
-func (cmd *StringCmd) Scan(val interface{}) error {
- if cmd.err != nil {
- return cmd.err
- }
- return proto.Scan([]byte(cmd.val), val)
-}
-
-func (cmd *StringCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *StringCmd) readReply(rd *proto.Reader) (err error) {
- cmd.val, err = rd.ReadString()
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type FloatCmd struct {
- baseCmd
-
- val float64
-}
-
-var _ Cmder = (*FloatCmd)(nil)
-
-func NewFloatCmd(ctx context.Context, args ...interface{}) *FloatCmd {
- return &FloatCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *FloatCmd) SetVal(val float64) {
- cmd.val = val
-}
-
-func (cmd *FloatCmd) Val() float64 {
- return cmd.val
-}
-
-func (cmd *FloatCmd) Result() (float64, error) {
- return cmd.Val(), cmd.Err()
-}
-
-func (cmd *FloatCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *FloatCmd) readReply(rd *proto.Reader) (err error) {
- cmd.val, err = rd.ReadFloatReply()
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type FloatSliceCmd struct {
- baseCmd
-
- val []float64
-}
-
-var _ Cmder = (*FloatSliceCmd)(nil)
-
-func NewFloatSliceCmd(ctx context.Context, args ...interface{}) *FloatSliceCmd {
- return &FloatSliceCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *FloatSliceCmd) SetVal(val []float64) {
- cmd.val = val
-}
-
-func (cmd *FloatSliceCmd) Val() []float64 {
- return cmd.val
-}
-
-func (cmd *FloatSliceCmd) Result() ([]float64, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *FloatSliceCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *FloatSliceCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- cmd.val = make([]float64, n)
- for i := 0; i < len(cmd.val); i++ {
- switch num, err := rd.ReadFloatReply(); {
- case err == Nil:
- cmd.val[i] = 0
- case err != nil:
- return nil, err
- default:
- cmd.val[i] = num
- }
- }
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type StringSliceCmd struct {
- baseCmd
-
- val []string
-}
-
-var _ Cmder = (*StringSliceCmd)(nil)
-
-func NewStringSliceCmd(ctx context.Context, args ...interface{}) *StringSliceCmd {
- return &StringSliceCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *StringSliceCmd) SetVal(val []string) {
- cmd.val = val
-}
-
-func (cmd *StringSliceCmd) Val() []string {
- return cmd.val
-}
-
-func (cmd *StringSliceCmd) Result() ([]string, error) {
- return cmd.Val(), cmd.Err()
-}
-
-func (cmd *StringSliceCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *StringSliceCmd) ScanSlice(container interface{}) error {
- return proto.ScanSlice(cmd.Val(), container)
-}
-
-func (cmd *StringSliceCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- cmd.val = make([]string, n)
- for i := 0; i < len(cmd.val); i++ {
- switch s, err := rd.ReadString(); {
- case err == Nil:
- cmd.val[i] = ""
- case err != nil:
- return nil, err
- default:
- cmd.val[i] = s
- }
- }
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type BoolSliceCmd struct {
- baseCmd
-
- val []bool
-}
-
-var _ Cmder = (*BoolSliceCmd)(nil)
-
-func NewBoolSliceCmd(ctx context.Context, args ...interface{}) *BoolSliceCmd {
- return &BoolSliceCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *BoolSliceCmd) SetVal(val []bool) {
- cmd.val = val
-}
-
-func (cmd *BoolSliceCmd) Val() []bool {
- return cmd.val
-}
-
-func (cmd *BoolSliceCmd) Result() ([]bool, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *BoolSliceCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *BoolSliceCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- cmd.val = make([]bool, n)
- for i := 0; i < len(cmd.val); i++ {
- n, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
- cmd.val[i] = n == 1
- }
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type StringStringMapCmd struct {
- baseCmd
-
- val map[string]string
-}
-
-var _ Cmder = (*StringStringMapCmd)(nil)
-
-func NewStringStringMapCmd(ctx context.Context, args ...interface{}) *StringStringMapCmd {
- return &StringStringMapCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *StringStringMapCmd) SetVal(val map[string]string) {
- cmd.val = val
-}
-
-func (cmd *StringStringMapCmd) Val() map[string]string {
- return cmd.val
-}
-
-func (cmd *StringStringMapCmd) Result() (map[string]string, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *StringStringMapCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-// Scan scans the results from the map into a destination struct. The map keys
-// are matched in the Redis struct fields by the `redis:"field"` tag.
-func (cmd *StringStringMapCmd) Scan(dest interface{}) error {
- if cmd.err != nil {
- return cmd.err
- }
-
- strct, err := hscan.Struct(dest)
- if err != nil {
- return err
- }
-
- for k, v := range cmd.val {
- if err := strct.Scan(k, v); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (cmd *StringStringMapCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- cmd.val = make(map[string]string, n/2)
- for i := int64(0); i < n; i += 2 {
- key, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- value, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- cmd.val[key] = value
- }
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type StringIntMapCmd struct {
- baseCmd
-
- val map[string]int64
-}
-
-var _ Cmder = (*StringIntMapCmd)(nil)
-
-func NewStringIntMapCmd(ctx context.Context, args ...interface{}) *StringIntMapCmd {
- return &StringIntMapCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *StringIntMapCmd) SetVal(val map[string]int64) {
- cmd.val = val
-}
-
-func (cmd *StringIntMapCmd) Val() map[string]int64 {
- return cmd.val
-}
-
-func (cmd *StringIntMapCmd) Result() (map[string]int64, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *StringIntMapCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *StringIntMapCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- cmd.val = make(map[string]int64, n/2)
- for i := int64(0); i < n; i += 2 {
- key, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- n, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
-
- cmd.val[key] = n
- }
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type StringStructMapCmd struct {
- baseCmd
-
- val map[string]struct{}
-}
-
-var _ Cmder = (*StringStructMapCmd)(nil)
-
-func NewStringStructMapCmd(ctx context.Context, args ...interface{}) *StringStructMapCmd {
- return &StringStructMapCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *StringStructMapCmd) SetVal(val map[string]struct{}) {
- cmd.val = val
-}
-
-func (cmd *StringStructMapCmd) Val() map[string]struct{} {
- return cmd.val
-}
-
-func (cmd *StringStructMapCmd) Result() (map[string]struct{}, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *StringStructMapCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *StringStructMapCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- cmd.val = make(map[string]struct{}, n)
- for i := int64(0); i < n; i++ {
- key, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
- cmd.val[key] = struct{}{}
- }
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type XMessage struct {
- ID string
- Values map[string]interface{}
-}
-
-type XMessageSliceCmd struct {
- baseCmd
-
- val []XMessage
-}
-
-var _ Cmder = (*XMessageSliceCmd)(nil)
-
-func NewXMessageSliceCmd(ctx context.Context, args ...interface{}) *XMessageSliceCmd {
- return &XMessageSliceCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *XMessageSliceCmd) SetVal(val []XMessage) {
- cmd.val = val
-}
-
-func (cmd *XMessageSliceCmd) Val() []XMessage {
- return cmd.val
-}
-
-func (cmd *XMessageSliceCmd) Result() ([]XMessage, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *XMessageSliceCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *XMessageSliceCmd) readReply(rd *proto.Reader) error {
- var err error
- cmd.val, err = readXMessageSlice(rd)
- return err
-}
-
-func readXMessageSlice(rd *proto.Reader) ([]XMessage, error) {
- n, err := rd.ReadArrayLen()
- if err != nil {
- return nil, err
- }
-
- msgs := make([]XMessage, n)
- for i := 0; i < n; i++ {
- var err error
- msgs[i], err = readXMessage(rd)
- if err != nil {
- return nil, err
- }
- }
- return msgs, nil
-}
-
-func readXMessage(rd *proto.Reader) (XMessage, error) {
- n, err := rd.ReadArrayLen()
- if err != nil {
- return XMessage{}, err
- }
- if n != 2 {
- return XMessage{}, fmt.Errorf("got %d, wanted 2", n)
- }
-
- id, err := rd.ReadString()
- if err != nil {
- return XMessage{}, err
- }
-
- var values map[string]interface{}
-
- v, err := rd.ReadArrayReply(stringInterfaceMapParser)
- if err != nil {
- if err != proto.Nil {
- return XMessage{}, err
- }
- } else {
- values = v.(map[string]interface{})
- }
-
- return XMessage{
- ID: id,
- Values: values,
- }, nil
-}
-
-// stringInterfaceMapParser implements proto.MultiBulkParse.
-func stringInterfaceMapParser(rd *proto.Reader, n int64) (interface{}, error) {
- m := make(map[string]interface{}, n/2)
- for i := int64(0); i < n; i += 2 {
- key, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- value, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- m[key] = value
- }
- return m, nil
-}
-
-//------------------------------------------------------------------------------
-
-type XStream struct {
- Stream string
- Messages []XMessage
-}
-
-type XStreamSliceCmd struct {
- baseCmd
-
- val []XStream
-}
-
-var _ Cmder = (*XStreamSliceCmd)(nil)
-
-func NewXStreamSliceCmd(ctx context.Context, args ...interface{}) *XStreamSliceCmd {
- return &XStreamSliceCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *XStreamSliceCmd) SetVal(val []XStream) {
- cmd.val = val
-}
-
-func (cmd *XStreamSliceCmd) Val() []XStream {
- return cmd.val
-}
-
-func (cmd *XStreamSliceCmd) Result() ([]XStream, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *XStreamSliceCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *XStreamSliceCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- cmd.val = make([]XStream, n)
- for i := 0; i < len(cmd.val); i++ {
- i := i
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- if n != 2 {
- return nil, fmt.Errorf("got %d, wanted 2", n)
- }
-
- stream, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- msgs, err := readXMessageSlice(rd)
- if err != nil {
- return nil, err
- }
-
- cmd.val[i] = XStream{
- Stream: stream,
- Messages: msgs,
- }
- return nil, nil
- })
- if err != nil {
- return nil, err
- }
- }
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type XPending struct {
- Count int64
- Lower string
- Higher string
- Consumers map[string]int64
-}
-
-type XPendingCmd struct {
- baseCmd
- val *XPending
-}
-
-var _ Cmder = (*XPendingCmd)(nil)
-
-func NewXPendingCmd(ctx context.Context, args ...interface{}) *XPendingCmd {
- return &XPendingCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *XPendingCmd) SetVal(val *XPending) {
- cmd.val = val
-}
-
-func (cmd *XPendingCmd) Val() *XPending {
- return cmd.val
-}
-
-func (cmd *XPendingCmd) Result() (*XPending, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *XPendingCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *XPendingCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- if n != 4 {
- return nil, fmt.Errorf("got %d, wanted 4", n)
- }
-
- count, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
-
- lower, err := rd.ReadString()
- if err != nil && err != Nil {
- return nil, err
- }
-
- higher, err := rd.ReadString()
- if err != nil && err != Nil {
- return nil, err
- }
-
- cmd.val = &XPending{
- Count: count,
- Lower: lower,
- Higher: higher,
- }
- _, err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- for i := int64(0); i < n; i++ {
- _, err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- if n != 2 {
- return nil, fmt.Errorf("got %d, wanted 2", n)
- }
-
- consumerName, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- consumerPending, err := rd.ReadInt()
- if err != nil {
- return nil, err
- }
-
- if cmd.val.Consumers == nil {
- cmd.val.Consumers = make(map[string]int64)
- }
- cmd.val.Consumers[consumerName] = consumerPending
-
- return nil, nil
- })
- if err != nil {
- return nil, err
- }
- }
- return nil, nil
- })
- if err != nil && err != Nil {
- return nil, err
- }
-
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type XPendingExt struct {
- ID string
- Consumer string
- Idle time.Duration
- RetryCount int64
-}
-
-type XPendingExtCmd struct {
- baseCmd
- val []XPendingExt
-}
-
-var _ Cmder = (*XPendingExtCmd)(nil)
-
-func NewXPendingExtCmd(ctx context.Context, args ...interface{}) *XPendingExtCmd {
- return &XPendingExtCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *XPendingExtCmd) SetVal(val []XPendingExt) {
- cmd.val = val
-}
-
-func (cmd *XPendingExtCmd) Val() []XPendingExt {
- return cmd.val
-}
-
-func (cmd *XPendingExtCmd) Result() ([]XPendingExt, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *XPendingExtCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *XPendingExtCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- cmd.val = make([]XPendingExt, 0, n)
- for i := int64(0); i < n; i++ {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- if n != 4 {
- return nil, fmt.Errorf("got %d, wanted 4", n)
- }
-
- id, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- consumer, err := rd.ReadString()
- if err != nil && err != Nil {
- return nil, err
- }
-
- idle, err := rd.ReadIntReply()
- if err != nil && err != Nil {
- return nil, err
- }
-
- retryCount, err := rd.ReadIntReply()
- if err != nil && err != Nil {
- return nil, err
- }
-
- cmd.val = append(cmd.val, XPendingExt{
- ID: id,
- Consumer: consumer,
- Idle: time.Duration(idle) * time.Millisecond,
- RetryCount: retryCount,
- })
- return nil, nil
- })
- if err != nil {
- return nil, err
- }
- }
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type XAutoClaimCmd struct {
- baseCmd
-
- start string
- val []XMessage
-}
-
-var _ Cmder = (*XAutoClaimCmd)(nil)
-
-func NewXAutoClaimCmd(ctx context.Context, args ...interface{}) *XAutoClaimCmd {
- return &XAutoClaimCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *XAutoClaimCmd) SetVal(val []XMessage, start string) {
- cmd.val = val
- cmd.start = start
-}
-
-func (cmd *XAutoClaimCmd) Val() (messages []XMessage, start string) {
- return cmd.val, cmd.start
-}
-
-func (cmd *XAutoClaimCmd) Result() (messages []XMessage, start string, err error) {
- return cmd.val, cmd.start, cmd.err
-}
-
-func (cmd *XAutoClaimCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *XAutoClaimCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- if n != 2 {
- return nil, fmt.Errorf("got %d, wanted 2", n)
- }
- var err error
-
- cmd.start, err = rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- cmd.val, err = readXMessageSlice(rd)
- if err != nil {
- return nil, err
- }
-
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type XAutoClaimJustIDCmd struct {
- baseCmd
-
- start string
- val []string
-}
-
-var _ Cmder = (*XAutoClaimJustIDCmd)(nil)
-
-func NewXAutoClaimJustIDCmd(ctx context.Context, args ...interface{}) *XAutoClaimJustIDCmd {
- return &XAutoClaimJustIDCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *XAutoClaimJustIDCmd) SetVal(val []string, start string) {
- cmd.val = val
- cmd.start = start
-}
-
-func (cmd *XAutoClaimJustIDCmd) Val() (ids []string, start string) {
- return cmd.val, cmd.start
-}
-
-func (cmd *XAutoClaimJustIDCmd) Result() (ids []string, start string, err error) {
- return cmd.val, cmd.start, cmd.err
-}
-
-func (cmd *XAutoClaimJustIDCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *XAutoClaimJustIDCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- if n != 2 {
- return nil, fmt.Errorf("got %d, wanted 2", n)
- }
- var err error
-
- cmd.start, err = rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- nn, err := rd.ReadArrayLen()
- if err != nil {
- return nil, err
- }
-
- cmd.val = make([]string, nn)
- for i := 0; i < nn; i++ {
- cmd.val[i], err = rd.ReadString()
- if err != nil {
- return nil, err
- }
- }
-
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type XInfoConsumersCmd struct {
- baseCmd
- val []XInfoConsumer
-}
-
-type XInfoConsumer struct {
- Name string
- Pending int64
- Idle int64
-}
-
-var _ Cmder = (*XInfoConsumersCmd)(nil)
-
-func NewXInfoConsumersCmd(ctx context.Context, stream string, group string) *XInfoConsumersCmd {
- return &XInfoConsumersCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: []interface{}{"xinfo", "consumers", stream, group},
- },
- }
-}
-
-func (cmd *XInfoConsumersCmd) SetVal(val []XInfoConsumer) {
- cmd.val = val
-}
-
-func (cmd *XInfoConsumersCmd) Val() []XInfoConsumer {
- return cmd.val
-}
-
-func (cmd *XInfoConsumersCmd) Result() ([]XInfoConsumer, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *XInfoConsumersCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *XInfoConsumersCmd) readReply(rd *proto.Reader) error {
- n, err := rd.ReadArrayLen()
- if err != nil {
- return err
- }
-
- cmd.val = make([]XInfoConsumer, n)
-
- for i := 0; i < n; i++ {
- cmd.val[i], err = readXConsumerInfo(rd)
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func readXConsumerInfo(rd *proto.Reader) (XInfoConsumer, error) {
- var consumer XInfoConsumer
-
- n, err := rd.ReadArrayLen()
- if err != nil {
- return consumer, err
- }
- if n != 6 {
- return consumer, fmt.Errorf("redis: got %d elements in XINFO CONSUMERS reply, wanted 6", n)
- }
-
- for i := 0; i < 3; i++ {
- key, err := rd.ReadString()
- if err != nil {
- return consumer, err
- }
-
- val, err := rd.ReadString()
- if err != nil {
- return consumer, err
- }
-
- switch key {
- case "name":
- consumer.Name = val
- case "pending":
- consumer.Pending, err = strconv.ParseInt(val, 0, 64)
- if err != nil {
- return consumer, err
- }
- case "idle":
- consumer.Idle, err = strconv.ParseInt(val, 0, 64)
- if err != nil {
- return consumer, err
- }
- default:
- return consumer, fmt.Errorf("redis: unexpected content %s in XINFO CONSUMERS reply", key)
- }
- }
-
- return consumer, nil
-}
-
-//------------------------------------------------------------------------------
-
-type XInfoGroupsCmd struct {
- baseCmd
- val []XInfoGroup
-}
-
-type XInfoGroup struct {
- Name string
- Consumers int64
- Pending int64
- LastDeliveredID string
-}
-
-var _ Cmder = (*XInfoGroupsCmd)(nil)
-
-func NewXInfoGroupsCmd(ctx context.Context, stream string) *XInfoGroupsCmd {
- return &XInfoGroupsCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: []interface{}{"xinfo", "groups", stream},
- },
- }
-}
-
-func (cmd *XInfoGroupsCmd) SetVal(val []XInfoGroup) {
- cmd.val = val
-}
-
-func (cmd *XInfoGroupsCmd) Val() []XInfoGroup {
- return cmd.val
-}
-
-func (cmd *XInfoGroupsCmd) Result() ([]XInfoGroup, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *XInfoGroupsCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *XInfoGroupsCmd) readReply(rd *proto.Reader) error {
- n, err := rd.ReadArrayLen()
- if err != nil {
- return err
- }
-
- cmd.val = make([]XInfoGroup, n)
-
- for i := 0; i < n; i++ {
- cmd.val[i], err = readXGroupInfo(rd)
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func readXGroupInfo(rd *proto.Reader) (XInfoGroup, error) {
- var group XInfoGroup
-
- n, err := rd.ReadArrayLen()
- if err != nil {
- return group, err
- }
- if n != 8 {
- return group, fmt.Errorf("redis: got %d elements in XINFO GROUPS reply, wanted 8", n)
- }
-
- for i := 0; i < 4; i++ {
- key, err := rd.ReadString()
- if err != nil {
- return group, err
- }
-
- val, err := rd.ReadString()
- if err != nil {
- return group, err
- }
-
- switch key {
- case "name":
- group.Name = val
- case "consumers":
- group.Consumers, err = strconv.ParseInt(val, 0, 64)
- if err != nil {
- return group, err
- }
- case "pending":
- group.Pending, err = strconv.ParseInt(val, 0, 64)
- if err != nil {
- return group, err
- }
- case "last-delivered-id":
- group.LastDeliveredID = val
- default:
- return group, fmt.Errorf("redis: unexpected content %s in XINFO GROUPS reply", key)
- }
- }
-
- return group, nil
-}
-
-//------------------------------------------------------------------------------
-
-type XInfoStreamCmd struct {
- baseCmd
- val *XInfoStream
-}
-
-type XInfoStream struct {
- Length int64
- RadixTreeKeys int64
- RadixTreeNodes int64
- Groups int64
- LastGeneratedID string
- FirstEntry XMessage
- LastEntry XMessage
-}
-
-var _ Cmder = (*XInfoStreamCmd)(nil)
-
-func NewXInfoStreamCmd(ctx context.Context, stream string) *XInfoStreamCmd {
- return &XInfoStreamCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: []interface{}{"xinfo", "stream", stream},
- },
- }
-}
-
-func (cmd *XInfoStreamCmd) SetVal(val *XInfoStream) {
- cmd.val = val
-}
-
-func (cmd *XInfoStreamCmd) Val() *XInfoStream {
- return cmd.val
-}
-
-func (cmd *XInfoStreamCmd) Result() (*XInfoStream, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *XInfoStreamCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *XInfoStreamCmd) readReply(rd *proto.Reader) error {
- v, err := rd.ReadReply(xStreamInfoParser)
- if err != nil {
- return err
- }
- cmd.val = v.(*XInfoStream)
- return nil
-}
-
-func xStreamInfoParser(rd *proto.Reader, n int64) (interface{}, error) {
- if n != 14 {
- return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM reply,"+
- "wanted 14", n)
- }
- var info XInfoStream
- for i := 0; i < 7; i++ {
- key, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
- switch key {
- case "length":
- info.Length, err = rd.ReadIntReply()
- case "radix-tree-keys":
- info.RadixTreeKeys, err = rd.ReadIntReply()
- case "radix-tree-nodes":
- info.RadixTreeNodes, err = rd.ReadIntReply()
- case "groups":
- info.Groups, err = rd.ReadIntReply()
- case "last-generated-id":
- info.LastGeneratedID, err = rd.ReadString()
- case "first-entry":
- info.FirstEntry, err = readXMessage(rd)
- if err == Nil {
- err = nil
- }
- case "last-entry":
- info.LastEntry, err = readXMessage(rd)
- if err == Nil {
- err = nil
- }
- default:
- return nil, fmt.Errorf("redis: unexpected content %s "+
- "in XINFO STREAM reply", key)
- }
- if err != nil {
- return nil, err
- }
- }
- return &info, nil
-}
-
-//------------------------------------------------------------------------------
-
-type XInfoStreamFullCmd struct {
- baseCmd
- val *XInfoStreamFull
-}
-
-type XInfoStreamFull struct {
- Length int64
- RadixTreeKeys int64
- RadixTreeNodes int64
- LastGeneratedID string
- Entries []XMessage
- Groups []XInfoStreamGroup
-}
-
-type XInfoStreamGroup struct {
- Name string
- LastDeliveredID string
- PelCount int64
- Pending []XInfoStreamGroupPending
- Consumers []XInfoStreamConsumer
-}
-
-type XInfoStreamGroupPending struct {
- ID string
- Consumer string
- DeliveryTime time.Time
- DeliveryCount int64
-}
-
-type XInfoStreamConsumer struct {
- Name string
- SeenTime time.Time
- PelCount int64
- Pending []XInfoStreamConsumerPending
-}
-
-type XInfoStreamConsumerPending struct {
- ID string
- DeliveryTime time.Time
- DeliveryCount int64
-}
-
-var _ Cmder = (*XInfoStreamFullCmd)(nil)
-
-func NewXInfoStreamFullCmd(ctx context.Context, args ...interface{}) *XInfoStreamFullCmd {
- return &XInfoStreamFullCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *XInfoStreamFullCmd) SetVal(val *XInfoStreamFull) {
- cmd.val = val
-}
-
-func (cmd *XInfoStreamFullCmd) Val() *XInfoStreamFull {
- return cmd.val
-}
-
-func (cmd *XInfoStreamFullCmd) Result() (*XInfoStreamFull, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *XInfoStreamFullCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *XInfoStreamFullCmd) readReply(rd *proto.Reader) error {
- n, err := rd.ReadArrayLen()
- if err != nil {
- return err
- }
- if n != 12 {
- return fmt.Errorf("redis: got %d elements in XINFO STREAM FULL reply,"+
- "wanted 12", n)
- }
-
- cmd.val = &XInfoStreamFull{}
-
- for i := 0; i < 6; i++ {
- key, err := rd.ReadString()
- if err != nil {
- return err
- }
-
- switch key {
- case "length":
- cmd.val.Length, err = rd.ReadIntReply()
- case "radix-tree-keys":
- cmd.val.RadixTreeKeys, err = rd.ReadIntReply()
- case "radix-tree-nodes":
- cmd.val.RadixTreeNodes, err = rd.ReadIntReply()
- case "last-generated-id":
- cmd.val.LastGeneratedID, err = rd.ReadString()
- case "entries":
- cmd.val.Entries, err = readXMessageSlice(rd)
- case "groups":
- cmd.val.Groups, err = readStreamGroups(rd)
- default:
- return fmt.Errorf("redis: unexpected content %s "+
- "in XINFO STREAM reply", key)
- }
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-func readStreamGroups(rd *proto.Reader) ([]XInfoStreamGroup, error) {
- n, err := rd.ReadArrayLen()
- if err != nil {
- return nil, err
- }
- groups := make([]XInfoStreamGroup, 0, n)
- for i := 0; i < n; i++ {
- nn, err := rd.ReadArrayLen()
- if err != nil {
- return nil, err
- }
- if nn != 10 {
- return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM FULL reply,"+
- "wanted 10", nn)
- }
-
- group := XInfoStreamGroup{}
-
- for f := 0; f < 5; f++ {
- key, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- switch key {
- case "name":
- group.Name, err = rd.ReadString()
- case "last-delivered-id":
- group.LastDeliveredID, err = rd.ReadString()
- case "pel-count":
- group.PelCount, err = rd.ReadIntReply()
- case "pending":
- group.Pending, err = readXInfoStreamGroupPending(rd)
- case "consumers":
- group.Consumers, err = readXInfoStreamConsumers(rd)
- default:
- return nil, fmt.Errorf("redis: unexpected content %s "+
- "in XINFO STREAM reply", key)
- }
-
- if err != nil {
- return nil, err
- }
- }
-
- groups = append(groups, group)
- }
-
- return groups, nil
-}
-
-func readXInfoStreamGroupPending(rd *proto.Reader) ([]XInfoStreamGroupPending, error) {
- n, err := rd.ReadArrayLen()
- if err != nil {
- return nil, err
- }
-
- pending := make([]XInfoStreamGroupPending, 0, n)
-
- for i := 0; i < n; i++ {
- nn, err := rd.ReadArrayLen()
- if err != nil {
- return nil, err
- }
- if nn != 4 {
- return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM FULL reply,"+
- "wanted 4", nn)
- }
-
- p := XInfoStreamGroupPending{}
-
- p.ID, err = rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- p.Consumer, err = rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- delivery, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
- p.DeliveryTime = time.Unix(delivery/1000, delivery%1000*int64(time.Millisecond))
-
- p.DeliveryCount, err = rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
-
- pending = append(pending, p)
- }
-
- return pending, nil
-}
-
-func readXInfoStreamConsumers(rd *proto.Reader) ([]XInfoStreamConsumer, error) {
- n, err := rd.ReadArrayLen()
- if err != nil {
- return nil, err
- }
-
- consumers := make([]XInfoStreamConsumer, 0, n)
-
- for i := 0; i < n; i++ {
- nn, err := rd.ReadArrayLen()
- if err != nil {
- return nil, err
- }
- if nn != 8 {
- return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM FULL reply,"+
- "wanted 8", nn)
- }
-
- c := XInfoStreamConsumer{}
-
- for f := 0; f < 4; f++ {
- cKey, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- switch cKey {
- case "name":
- c.Name, err = rd.ReadString()
- case "seen-time":
- seen, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
- c.SeenTime = time.Unix(seen/1000, seen%1000*int64(time.Millisecond))
- case "pel-count":
- c.PelCount, err = rd.ReadIntReply()
- case "pending":
- pendingNumber, err := rd.ReadArrayLen()
- if err != nil {
- return nil, err
- }
-
- c.Pending = make([]XInfoStreamConsumerPending, 0, pendingNumber)
-
- for pn := 0; pn < pendingNumber; pn++ {
- nn, err := rd.ReadArrayLen()
- if err != nil {
- return nil, err
- }
- if nn != 3 {
- return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM reply,"+
- "wanted 3", nn)
- }
-
- p := XInfoStreamConsumerPending{}
-
- p.ID, err = rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- delivery, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
- p.DeliveryTime = time.Unix(delivery/1000, delivery%1000*int64(time.Millisecond))
-
- p.DeliveryCount, err = rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
-
- c.Pending = append(c.Pending, p)
- }
- default:
- return nil, fmt.Errorf("redis: unexpected content %s "+
- "in XINFO STREAM reply", cKey)
- }
- if err != nil {
- return nil, err
- }
- }
- consumers = append(consumers, c)
- }
-
- return consumers, nil
-}
-
-//------------------------------------------------------------------------------
-
-type ZSliceCmd struct {
- baseCmd
-
- val []Z
-}
-
-var _ Cmder = (*ZSliceCmd)(nil)
-
-func NewZSliceCmd(ctx context.Context, args ...interface{}) *ZSliceCmd {
- return &ZSliceCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *ZSliceCmd) SetVal(val []Z) {
- cmd.val = val
-}
-
-func (cmd *ZSliceCmd) Val() []Z {
- return cmd.val
-}
-
-func (cmd *ZSliceCmd) Result() ([]Z, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *ZSliceCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *ZSliceCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- cmd.val = make([]Z, n/2)
- for i := 0; i < len(cmd.val); i++ {
- member, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- score, err := rd.ReadFloatReply()
- if err != nil {
- return nil, err
- }
-
- cmd.val[i] = Z{
- Member: member,
- Score: score,
- }
- }
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type ZWithKeyCmd struct {
- baseCmd
-
- val *ZWithKey
-}
-
-var _ Cmder = (*ZWithKeyCmd)(nil)
-
-func NewZWithKeyCmd(ctx context.Context, args ...interface{}) *ZWithKeyCmd {
- return &ZWithKeyCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *ZWithKeyCmd) SetVal(val *ZWithKey) {
- cmd.val = val
-}
-
-func (cmd *ZWithKeyCmd) Val() *ZWithKey {
- return cmd.val
-}
-
-func (cmd *ZWithKeyCmd) Result() (*ZWithKey, error) {
- return cmd.Val(), cmd.Err()
-}
-
-func (cmd *ZWithKeyCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *ZWithKeyCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- if n != 3 {
- return nil, fmt.Errorf("got %d elements, expected 3", n)
- }
-
- cmd.val = &ZWithKey{}
- var err error
-
- cmd.val.Key, err = rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- cmd.val.Member, err = rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- cmd.val.Score, err = rd.ReadFloatReply()
- if err != nil {
- return nil, err
- }
-
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type ScanCmd struct {
- baseCmd
-
- page []string
- cursor uint64
-
- process cmdable
-}
-
-var _ Cmder = (*ScanCmd)(nil)
-
-func NewScanCmd(ctx context.Context, process cmdable, args ...interface{}) *ScanCmd {
- return &ScanCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- process: process,
- }
-}
-
-func (cmd *ScanCmd) SetVal(page []string, cursor uint64) {
- cmd.page = page
- cmd.cursor = cursor
-}
-
-func (cmd *ScanCmd) Val() (keys []string, cursor uint64) {
- return cmd.page, cmd.cursor
-}
-
-func (cmd *ScanCmd) Result() (keys []string, cursor uint64, err error) {
- return cmd.page, cmd.cursor, cmd.err
-}
-
-func (cmd *ScanCmd) String() string {
- return cmdString(cmd, cmd.page)
-}
-
-func (cmd *ScanCmd) readReply(rd *proto.Reader) (err error) {
- cmd.page, cmd.cursor, err = rd.ReadScanReply()
- return err
-}
-
-// Iterator creates a new ScanIterator.
-func (cmd *ScanCmd) Iterator() *ScanIterator {
- return &ScanIterator{
- cmd: cmd,
- }
-}
-
-//------------------------------------------------------------------------------
-
-type ClusterNode struct {
- ID string
- Addr string
-}
-
-type ClusterSlot struct {
- Start int
- End int
- Nodes []ClusterNode
-}
-
-type ClusterSlotsCmd struct {
- baseCmd
-
- val []ClusterSlot
-}
-
-var _ Cmder = (*ClusterSlotsCmd)(nil)
-
-func NewClusterSlotsCmd(ctx context.Context, args ...interface{}) *ClusterSlotsCmd {
- return &ClusterSlotsCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *ClusterSlotsCmd) SetVal(val []ClusterSlot) {
- cmd.val = val
-}
-
-func (cmd *ClusterSlotsCmd) Val() []ClusterSlot {
- return cmd.val
-}
-
-func (cmd *ClusterSlotsCmd) Result() ([]ClusterSlot, error) {
- return cmd.Val(), cmd.Err()
-}
-
-func (cmd *ClusterSlotsCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *ClusterSlotsCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- cmd.val = make([]ClusterSlot, n)
- for i := 0; i < len(cmd.val); i++ {
- n, err := rd.ReadArrayLen()
- if err != nil {
- return nil, err
- }
- if n < 2 {
- err := fmt.Errorf("redis: got %d elements in cluster info, expected at least 2", n)
- return nil, err
- }
-
- start, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
-
- end, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
-
- nodes := make([]ClusterNode, n-2)
- for j := 0; j < len(nodes); j++ {
- n, err := rd.ReadArrayLen()
- if err != nil {
- return nil, err
- }
- if n != 2 && n != 3 {
- err := fmt.Errorf("got %d elements in cluster info address, expected 2 or 3", n)
- return nil, err
- }
-
- ip, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- port, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- nodes[j].Addr = net.JoinHostPort(ip, port)
-
- if n == 3 {
- id, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
- nodes[j].ID = id
- }
- }
-
- cmd.val[i] = ClusterSlot{
- Start: int(start),
- End: int(end),
- Nodes: nodes,
- }
- }
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-// GeoLocation is used with GeoAdd to add geospatial location.
-type GeoLocation struct {
- Name string
- Longitude, Latitude, Dist float64
- GeoHash int64
-}
-
-// GeoRadiusQuery is used with GeoRadius to query geospatial index.
-type GeoRadiusQuery struct {
- Radius float64
- // Can be m, km, ft, or mi. Default is km.
- Unit string
- WithCoord bool
- WithDist bool
- WithGeoHash bool
- Count int
- // Can be ASC or DESC. Default is no sort order.
- Sort string
- Store string
- StoreDist string
-}
-
-type GeoLocationCmd struct {
- baseCmd
-
- q *GeoRadiusQuery
- locations []GeoLocation
-}
-
-var _ Cmder = (*GeoLocationCmd)(nil)
-
-func NewGeoLocationCmd(ctx context.Context, q *GeoRadiusQuery, args ...interface{}) *GeoLocationCmd {
- return &GeoLocationCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: geoLocationArgs(q, args...),
- },
- q: q,
- }
-}
-
-func geoLocationArgs(q *GeoRadiusQuery, args ...interface{}) []interface{} {
- args = append(args, q.Radius)
- if q.Unit != "" {
- args = append(args, q.Unit)
- } else {
- args = append(args, "km")
- }
- if q.WithCoord {
- args = append(args, "withcoord")
- }
- if q.WithDist {
- args = append(args, "withdist")
- }
- if q.WithGeoHash {
- args = append(args, "withhash")
- }
- if q.Count > 0 {
- args = append(args, "count", q.Count)
- }
- if q.Sort != "" {
- args = append(args, q.Sort)
- }
- if q.Store != "" {
- args = append(args, "store")
- args = append(args, q.Store)
- }
- if q.StoreDist != "" {
- args = append(args, "storedist")
- args = append(args, q.StoreDist)
- }
- return args
-}
-
-func (cmd *GeoLocationCmd) SetVal(locations []GeoLocation) {
- cmd.locations = locations
-}
-
-func (cmd *GeoLocationCmd) Val() []GeoLocation {
- return cmd.locations
-}
-
-func (cmd *GeoLocationCmd) Result() ([]GeoLocation, error) {
- return cmd.locations, cmd.err
-}
-
-func (cmd *GeoLocationCmd) String() string {
- return cmdString(cmd, cmd.locations)
-}
-
-func (cmd *GeoLocationCmd) readReply(rd *proto.Reader) error {
- v, err := rd.ReadArrayReply(newGeoLocationSliceParser(cmd.q))
- if err != nil {
- return err
- }
- cmd.locations = v.([]GeoLocation)
- return nil
-}
-
-func newGeoLocationSliceParser(q *GeoRadiusQuery) proto.MultiBulkParse {
- return func(rd *proto.Reader, n int64) (interface{}, error) {
- locs := make([]GeoLocation, 0, n)
- for i := int64(0); i < n; i++ {
- v, err := rd.ReadReply(newGeoLocationParser(q))
- if err != nil {
- return nil, err
- }
- switch vv := v.(type) {
- case string:
- locs = append(locs, GeoLocation{
- Name: vv,
- })
- case *GeoLocation:
- // TODO: avoid copying
- locs = append(locs, *vv)
- default:
- return nil, fmt.Errorf("got %T, expected string or *GeoLocation", v)
- }
- }
- return locs, nil
- }
-}
-
-func newGeoLocationParser(q *GeoRadiusQuery) proto.MultiBulkParse {
- return func(rd *proto.Reader, n int64) (interface{}, error) {
- var loc GeoLocation
- var err error
-
- loc.Name, err = rd.ReadString()
- if err != nil {
- return nil, err
- }
- if q.WithDist {
- loc.Dist, err = rd.ReadFloatReply()
- if err != nil {
- return nil, err
- }
- }
- if q.WithGeoHash {
- loc.GeoHash, err = rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
- }
- if q.WithCoord {
- n, err := rd.ReadArrayLen()
- if err != nil {
- return nil, err
- }
- if n != 2 {
- return nil, fmt.Errorf("got %d coordinates, expected 2", n)
- }
-
- loc.Longitude, err = rd.ReadFloatReply()
- if err != nil {
- return nil, err
- }
- loc.Latitude, err = rd.ReadFloatReply()
- if err != nil {
- return nil, err
- }
- }
-
- return &loc, nil
- }
-}
-
-//------------------------------------------------------------------------------
-
-// GeoSearchQuery is used for GEOSearch/GEOSearchStore command query.
-type GeoSearchQuery struct {
- Member string
-
- // Latitude and Longitude when using FromLonLat option.
- Longitude float64
- Latitude float64
-
- // Distance and unit when using ByRadius option.
- // Can use m, km, ft, or mi. Default is km.
- Radius float64
- RadiusUnit string
-
- // Height, width and unit when using ByBox option.
- // Can be m, km, ft, or mi. Default is km.
- BoxWidth float64
- BoxHeight float64
- BoxUnit string
-
- // Can be ASC or DESC. Default is no sort order.
- Sort string
- Count int
- CountAny bool
-}
-
-type GeoSearchLocationQuery struct {
- GeoSearchQuery
-
- WithCoord bool
- WithDist bool
- WithHash bool
-}
-
-type GeoSearchStoreQuery struct {
- GeoSearchQuery
-
- // When using the StoreDist option, the command stores the items in a
- // sorted set populated with their distance from the center of the circle or box,
- // as a floating-point number, in the same unit specified for that shape.
- StoreDist bool
-}
-
-func geoSearchLocationArgs(q *GeoSearchLocationQuery, args []interface{}) []interface{} {
- args = geoSearchArgs(&q.GeoSearchQuery, args)
-
- if q.WithCoord {
- args = append(args, "withcoord")
- }
- if q.WithDist {
- args = append(args, "withdist")
- }
- if q.WithHash {
- args = append(args, "withhash")
- }
-
- return args
-}
-
-func geoSearchArgs(q *GeoSearchQuery, args []interface{}) []interface{} {
- if q.Member != "" {
- args = append(args, "frommember", q.Member)
- } else {
- args = append(args, "fromlonlat", q.Longitude, q.Latitude)
- }
-
- if q.Radius > 0 {
- if q.RadiusUnit == "" {
- q.RadiusUnit = "km"
- }
- args = append(args, "byradius", q.Radius, q.RadiusUnit)
- } else {
- if q.BoxUnit == "" {
- q.BoxUnit = "km"
- }
- args = append(args, "bybox", q.BoxWidth, q.BoxHeight, q.BoxUnit)
- }
-
- if q.Sort != "" {
- args = append(args, q.Sort)
- }
-
- if q.Count > 0 {
- args = append(args, "count", q.Count)
- if q.CountAny {
- args = append(args, "any")
- }
- }
-
- return args
-}
-
-type GeoSearchLocationCmd struct {
- baseCmd
-
- opt *GeoSearchLocationQuery
- val []GeoLocation
-}
-
-var _ Cmder = (*GeoSearchLocationCmd)(nil)
-
-func NewGeoSearchLocationCmd(
- ctx context.Context, opt *GeoSearchLocationQuery, args ...interface{},
-) *GeoSearchLocationCmd {
- return &GeoSearchLocationCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- opt: opt,
- }
-}
-
-func (cmd *GeoSearchLocationCmd) SetVal(val []GeoLocation) {
- cmd.val = val
-}
-
-func (cmd *GeoSearchLocationCmd) Val() []GeoLocation {
- return cmd.val
-}
-
-func (cmd *GeoSearchLocationCmd) Result() ([]GeoLocation, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *GeoSearchLocationCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *GeoSearchLocationCmd) readReply(rd *proto.Reader) error {
- n, err := rd.ReadArrayLen()
- if err != nil {
- return err
- }
-
- cmd.val = make([]GeoLocation, n)
- for i := 0; i < n; i++ {
- _, err = rd.ReadArrayLen()
- if err != nil {
- return err
- }
-
- var loc GeoLocation
-
- loc.Name, err = rd.ReadString()
- if err != nil {
- return err
- }
- if cmd.opt.WithDist {
- loc.Dist, err = rd.ReadFloatReply()
- if err != nil {
- return err
- }
- }
- if cmd.opt.WithHash {
- loc.GeoHash, err = rd.ReadIntReply()
- if err != nil {
- return err
- }
- }
- if cmd.opt.WithCoord {
- nn, err := rd.ReadArrayLen()
- if err != nil {
- return err
- }
- if nn != 2 {
- return fmt.Errorf("got %d coordinates, expected 2", nn)
- }
-
- loc.Longitude, err = rd.ReadFloatReply()
- if err != nil {
- return err
- }
- loc.Latitude, err = rd.ReadFloatReply()
- if err != nil {
- return err
- }
- }
-
- cmd.val[i] = loc
- }
-
- return nil
-}
-
-//------------------------------------------------------------------------------
-
-type GeoPos struct {
- Longitude, Latitude float64
-}
-
-type GeoPosCmd struct {
- baseCmd
-
- val []*GeoPos
-}
-
-var _ Cmder = (*GeoPosCmd)(nil)
-
-func NewGeoPosCmd(ctx context.Context, args ...interface{}) *GeoPosCmd {
- return &GeoPosCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *GeoPosCmd) SetVal(val []*GeoPos) {
- cmd.val = val
-}
-
-func (cmd *GeoPosCmd) Val() []*GeoPos {
- return cmd.val
-}
-
-func (cmd *GeoPosCmd) Result() ([]*GeoPos, error) {
- return cmd.Val(), cmd.Err()
-}
-
-func (cmd *GeoPosCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *GeoPosCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- cmd.val = make([]*GeoPos, n)
- for i := 0; i < len(cmd.val); i++ {
- i := i
- _, err := rd.ReadReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- longitude, err := rd.ReadFloatReply()
- if err != nil {
- return nil, err
- }
-
- latitude, err := rd.ReadFloatReply()
- if err != nil {
- return nil, err
- }
-
- cmd.val[i] = &GeoPos{
- Longitude: longitude,
- Latitude: latitude,
- }
- return nil, nil
- })
- if err != nil {
- if err == Nil {
- cmd.val[i] = nil
- continue
- }
- return nil, err
- }
- }
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type CommandInfo struct {
- Name string
- Arity int8
- Flags []string
- ACLFlags []string
- FirstKeyPos int8
- LastKeyPos int8
- StepCount int8
- ReadOnly bool
-}
-
-type CommandsInfoCmd struct {
- baseCmd
-
- val map[string]*CommandInfo
-}
-
-var _ Cmder = (*CommandsInfoCmd)(nil)
-
-func NewCommandsInfoCmd(ctx context.Context, args ...interface{}) *CommandsInfoCmd {
- return &CommandsInfoCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *CommandsInfoCmd) SetVal(val map[string]*CommandInfo) {
- cmd.val = val
-}
-
-func (cmd *CommandsInfoCmd) Val() map[string]*CommandInfo {
- return cmd.val
-}
-
-func (cmd *CommandsInfoCmd) Result() (map[string]*CommandInfo, error) {
- return cmd.Val(), cmd.Err()
-}
-
-func (cmd *CommandsInfoCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *CommandsInfoCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- cmd.val = make(map[string]*CommandInfo, n)
- for i := int64(0); i < n; i++ {
- v, err := rd.ReadReply(commandInfoParser)
- if err != nil {
- return nil, err
- }
- vv := v.(*CommandInfo)
- cmd.val[vv.Name] = vv
- }
- return nil, nil
- })
- return err
-}
-
-func commandInfoParser(rd *proto.Reader, n int64) (interface{}, error) {
- const numArgRedis5 = 6
- const numArgRedis6 = 7
-
- switch n {
- case numArgRedis5, numArgRedis6:
- // continue
- default:
- return nil, fmt.Errorf("redis: got %d elements in COMMAND reply, wanted 7", n)
- }
-
- var cmd CommandInfo
- var err error
-
- cmd.Name, err = rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- arity, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
- cmd.Arity = int8(arity)
-
- _, err = rd.ReadReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- cmd.Flags = make([]string, n)
- for i := 0; i < len(cmd.Flags); i++ {
- switch s, err := rd.ReadString(); {
- case err == Nil:
- cmd.Flags[i] = ""
- case err != nil:
- return nil, err
- default:
- cmd.Flags[i] = s
- }
- }
- return nil, nil
- })
- if err != nil {
- return nil, err
- }
-
- firstKeyPos, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
- cmd.FirstKeyPos = int8(firstKeyPos)
-
- lastKeyPos, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
- cmd.LastKeyPos = int8(lastKeyPos)
-
- stepCount, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
- cmd.StepCount = int8(stepCount)
-
- for _, flag := range cmd.Flags {
- if flag == "readonly" {
- cmd.ReadOnly = true
- break
- }
- }
-
- if n == numArgRedis5 {
- return &cmd, nil
- }
-
- _, err = rd.ReadReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- cmd.ACLFlags = make([]string, n)
- for i := 0; i < len(cmd.ACLFlags); i++ {
- switch s, err := rd.ReadString(); {
- case err == Nil:
- cmd.ACLFlags[i] = ""
- case err != nil:
- return nil, err
- default:
- cmd.ACLFlags[i] = s
- }
- }
- return nil, nil
- })
- if err != nil {
- return nil, err
- }
-
- return &cmd, nil
-}
-
-//------------------------------------------------------------------------------
-
-type cmdsInfoCache struct {
- fn func(ctx context.Context) (map[string]*CommandInfo, error)
-
- once internal.Once
- cmds map[string]*CommandInfo
-}
-
-func newCmdsInfoCache(fn func(ctx context.Context) (map[string]*CommandInfo, error)) *cmdsInfoCache {
- return &cmdsInfoCache{
- fn: fn,
- }
-}
-
-func (c *cmdsInfoCache) Get(ctx context.Context) (map[string]*CommandInfo, error) {
- err := c.once.Do(func() error {
- cmds, err := c.fn(ctx)
- if err != nil {
- return err
- }
-
- // Extensions have cmd names in upper case. Convert them to lower case.
- for k, v := range cmds {
- lower := internal.ToLower(k)
- if lower != k {
- cmds[lower] = v
- }
- }
-
- c.cmds = cmds
- return nil
- })
- return c.cmds, err
-}
-
-//------------------------------------------------------------------------------
-
-type SlowLog struct {
- ID int64
- Time time.Time
- Duration time.Duration
- Args []string
- // These are also optional fields emitted only by Redis 4.0 or greater:
- // https://redis.io/commands/slowlog#output-format
- ClientAddr string
- ClientName string
-}
-
-type SlowLogCmd struct {
- baseCmd
-
- val []SlowLog
-}
-
-var _ Cmder = (*SlowLogCmd)(nil)
-
-func NewSlowLogCmd(ctx context.Context, args ...interface{}) *SlowLogCmd {
- return &SlowLogCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *SlowLogCmd) SetVal(val []SlowLog) {
- cmd.val = val
-}
-
-func (cmd *SlowLogCmd) Val() []SlowLog {
- return cmd.val
-}
-
-func (cmd *SlowLogCmd) Result() ([]SlowLog, error) {
- return cmd.Val(), cmd.Err()
-}
-
-func (cmd *SlowLogCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *SlowLogCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- cmd.val = make([]SlowLog, n)
- for i := 0; i < len(cmd.val); i++ {
- n, err := rd.ReadArrayLen()
- if err != nil {
- return nil, err
- }
- if n < 4 {
- err := fmt.Errorf("redis: got %d elements in slowlog get, expected at least 4", n)
- return nil, err
- }
-
- id, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
-
- createdAt, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
- createdAtTime := time.Unix(createdAt, 0)
-
- costs, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
- costsDuration := time.Duration(costs) * time.Microsecond
-
- cmdLen, err := rd.ReadArrayLen()
- if err != nil {
- return nil, err
- }
- if cmdLen < 1 {
- err := fmt.Errorf("redis: got %d elements commands reply in slowlog get, expected at least 1", cmdLen)
- return nil, err
- }
-
- cmdString := make([]string, cmdLen)
- for i := 0; i < cmdLen; i++ {
- cmdString[i], err = rd.ReadString()
- if err != nil {
- return nil, err
- }
- }
-
- var address, name string
- for i := 4; i < n; i++ {
- str, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
- if i == 4 {
- address = str
- } else if i == 5 {
- name = str
- }
- }
-
- cmd.val[i] = SlowLog{
- ID: id,
- Time: createdAtTime,
- Duration: costsDuration,
- Args: cmdString,
- ClientAddr: address,
- ClientName: name,
- }
- }
- return nil, nil
- })
- return err
-}
diff --git a/vendor/github.com/go-redis/redis/v8/commands.go b/vendor/github.com/go-redis/redis/v8/commands.go
deleted file mode 100644
index bbfe089d..00000000
--- a/vendor/github.com/go-redis/redis/v8/commands.go
+++ /dev/null
@@ -1,3475 +0,0 @@
-package redis
-
-import (
- "context"
- "errors"
- "io"
- "time"
-
- "github.com/go-redis/redis/v8/internal"
-)
-
-// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0,
-// otherwise you will receive an error: (error) ERR syntax error.
-// For example:
-//
-// rdb.Set(ctx, key, value, redis.KeepTTL)
-const KeepTTL = -1
-
-func usePrecise(dur time.Duration) bool {
- return dur < time.Second || dur%time.Second != 0
-}
-
-func formatMs(ctx context.Context, dur time.Duration) int64 {
- if dur > 0 && dur < time.Millisecond {
- internal.Logger.Printf(
- ctx,
- "specified duration is %s, but minimal supported value is %s - truncating to 1ms",
- dur, time.Millisecond,
- )
- return 1
- }
- return int64(dur / time.Millisecond)
-}
-
-func formatSec(ctx context.Context, dur time.Duration) int64 {
- if dur > 0 && dur < time.Second {
- internal.Logger.Printf(
- ctx,
- "specified duration is %s, but minimal supported value is %s - truncating to 1s",
- dur, time.Second,
- )
- return 1
- }
- return int64(dur / time.Second)
-}
-
-func appendArgs(dst, src []interface{}) []interface{} {
- if len(src) == 1 {
- return appendArg(dst, src[0])
- }
-
- dst = append(dst, src...)
- return dst
-}
-
-func appendArg(dst []interface{}, arg interface{}) []interface{} {
- switch arg := arg.(type) {
- case []string:
- for _, s := range arg {
- dst = append(dst, s)
- }
- return dst
- case []interface{}:
- dst = append(dst, arg...)
- return dst
- case map[string]interface{}:
- for k, v := range arg {
- dst = append(dst, k, v)
- }
- return dst
- case map[string]string:
- for k, v := range arg {
- dst = append(dst, k, v)
- }
- return dst
- default:
- return append(dst, arg)
- }
-}
-
-type Cmdable interface {
- Pipeline() Pipeliner
- Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error)
-
- TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error)
- TxPipeline() Pipeliner
-
- Command(ctx context.Context) *CommandsInfoCmd
- ClientGetName(ctx context.Context) *StringCmd
- Echo(ctx context.Context, message interface{}) *StringCmd
- Ping(ctx context.Context) *StatusCmd
- Quit(ctx context.Context) *StatusCmd
- Del(ctx context.Context, keys ...string) *IntCmd
- Unlink(ctx context.Context, keys ...string) *IntCmd
- Dump(ctx context.Context, key string) *StringCmd
- Exists(ctx context.Context, keys ...string) *IntCmd
- Expire(ctx context.Context, key string, expiration time.Duration) *BoolCmd
- ExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd
- ExpireNX(ctx context.Context, key string, expiration time.Duration) *BoolCmd
- ExpireXX(ctx context.Context, key string, expiration time.Duration) *BoolCmd
- ExpireGT(ctx context.Context, key string, expiration time.Duration) *BoolCmd
- ExpireLT(ctx context.Context, key string, expiration time.Duration) *BoolCmd
- Keys(ctx context.Context, pattern string) *StringSliceCmd
- Migrate(ctx context.Context, host, port, key string, db int, timeout time.Duration) *StatusCmd
- Move(ctx context.Context, key string, db int) *BoolCmd
- ObjectRefCount(ctx context.Context, key string) *IntCmd
- ObjectEncoding(ctx context.Context, key string) *StringCmd
- ObjectIdleTime(ctx context.Context, key string) *DurationCmd
- Persist(ctx context.Context, key string) *BoolCmd
- PExpire(ctx context.Context, key string, expiration time.Duration) *BoolCmd
- PExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd
- PTTL(ctx context.Context, key string) *DurationCmd
- RandomKey(ctx context.Context) *StringCmd
- Rename(ctx context.Context, key, newkey string) *StatusCmd
- RenameNX(ctx context.Context, key, newkey string) *BoolCmd
- Restore(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd
- RestoreReplace(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd
- Sort(ctx context.Context, key string, sort *Sort) *StringSliceCmd
- SortStore(ctx context.Context, key, store string, sort *Sort) *IntCmd
- SortInterfaces(ctx context.Context, key string, sort *Sort) *SliceCmd
- Touch(ctx context.Context, keys ...string) *IntCmd
- TTL(ctx context.Context, key string) *DurationCmd
- Type(ctx context.Context, key string) *StatusCmd
- Append(ctx context.Context, key, value string) *IntCmd
- Decr(ctx context.Context, key string) *IntCmd
- DecrBy(ctx context.Context, key string, decrement int64) *IntCmd
- Get(ctx context.Context, key string) *StringCmd
- GetRange(ctx context.Context, key string, start, end int64) *StringCmd
- GetSet(ctx context.Context, key string, value interface{}) *StringCmd
- GetEx(ctx context.Context, key string, expiration time.Duration) *StringCmd
- GetDel(ctx context.Context, key string) *StringCmd
- Incr(ctx context.Context, key string) *IntCmd
- IncrBy(ctx context.Context, key string, value int64) *IntCmd
- IncrByFloat(ctx context.Context, key string, value float64) *FloatCmd
- MGet(ctx context.Context, keys ...string) *SliceCmd
- MSet(ctx context.Context, values ...interface{}) *StatusCmd
- MSetNX(ctx context.Context, values ...interface{}) *BoolCmd
- Set(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd
- SetArgs(ctx context.Context, key string, value interface{}, a SetArgs) *StatusCmd
- // TODO: rename to SetEx
- SetEX(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd
- SetNX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd
- SetXX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd
- SetRange(ctx context.Context, key string, offset int64, value string) *IntCmd
- StrLen(ctx context.Context, key string) *IntCmd
- Copy(ctx context.Context, sourceKey string, destKey string, db int, replace bool) *IntCmd
-
- GetBit(ctx context.Context, key string, offset int64) *IntCmd
- SetBit(ctx context.Context, key string, offset int64, value int) *IntCmd
- BitCount(ctx context.Context, key string, bitCount *BitCount) *IntCmd
- BitOpAnd(ctx context.Context, destKey string, keys ...string) *IntCmd
- BitOpOr(ctx context.Context, destKey string, keys ...string) *IntCmd
- BitOpXor(ctx context.Context, destKey string, keys ...string) *IntCmd
- BitOpNot(ctx context.Context, destKey string, key string) *IntCmd
- BitPos(ctx context.Context, key string, bit int64, pos ...int64) *IntCmd
- BitField(ctx context.Context, key string, args ...interface{}) *IntSliceCmd
-
- Scan(ctx context.Context, cursor uint64, match string, count int64) *ScanCmd
- ScanType(ctx context.Context, cursor uint64, match string, count int64, keyType string) *ScanCmd
- SScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd
- HScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd
- ZScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd
-
- HDel(ctx context.Context, key string, fields ...string) *IntCmd
- HExists(ctx context.Context, key, field string) *BoolCmd
- HGet(ctx context.Context, key, field string) *StringCmd
- HGetAll(ctx context.Context, key string) *StringStringMapCmd
- HIncrBy(ctx context.Context, key, field string, incr int64) *IntCmd
- HIncrByFloat(ctx context.Context, key, field string, incr float64) *FloatCmd
- HKeys(ctx context.Context, key string) *StringSliceCmd
- HLen(ctx context.Context, key string) *IntCmd
- HMGet(ctx context.Context, key string, fields ...string) *SliceCmd
- HSet(ctx context.Context, key string, values ...interface{}) *IntCmd
- HMSet(ctx context.Context, key string, values ...interface{}) *BoolCmd
- HSetNX(ctx context.Context, key, field string, value interface{}) *BoolCmd
- HVals(ctx context.Context, key string) *StringSliceCmd
- HRandField(ctx context.Context, key string, count int, withValues bool) *StringSliceCmd
-
- BLPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd
- BRPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd
- BRPopLPush(ctx context.Context, source, destination string, timeout time.Duration) *StringCmd
- LIndex(ctx context.Context, key string, index int64) *StringCmd
- LInsert(ctx context.Context, key, op string, pivot, value interface{}) *IntCmd
- LInsertBefore(ctx context.Context, key string, pivot, value interface{}) *IntCmd
- LInsertAfter(ctx context.Context, key string, pivot, value interface{}) *IntCmd
- LLen(ctx context.Context, key string) *IntCmd
- LPop(ctx context.Context, key string) *StringCmd
- LPopCount(ctx context.Context, key string, count int) *StringSliceCmd
- LPos(ctx context.Context, key string, value string, args LPosArgs) *IntCmd
- LPosCount(ctx context.Context, key string, value string, count int64, args LPosArgs) *IntSliceCmd
- LPush(ctx context.Context, key string, values ...interface{}) *IntCmd
- LPushX(ctx context.Context, key string, values ...interface{}) *IntCmd
- LRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd
- LRem(ctx context.Context, key string, count int64, value interface{}) *IntCmd
- LSet(ctx context.Context, key string, index int64, value interface{}) *StatusCmd
- LTrim(ctx context.Context, key string, start, stop int64) *StatusCmd
- RPop(ctx context.Context, key string) *StringCmd
- RPopCount(ctx context.Context, key string, count int) *StringSliceCmd
- RPopLPush(ctx context.Context, source, destination string) *StringCmd
- RPush(ctx context.Context, key string, values ...interface{}) *IntCmd
- RPushX(ctx context.Context, key string, values ...interface{}) *IntCmd
- LMove(ctx context.Context, source, destination, srcpos, destpos string) *StringCmd
- BLMove(ctx context.Context, source, destination, srcpos, destpos string, timeout time.Duration) *StringCmd
-
- SAdd(ctx context.Context, key string, members ...interface{}) *IntCmd
- SCard(ctx context.Context, key string) *IntCmd
- SDiff(ctx context.Context, keys ...string) *StringSliceCmd
- SDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd
- SInter(ctx context.Context, keys ...string) *StringSliceCmd
- SInterStore(ctx context.Context, destination string, keys ...string) *IntCmd
- SIsMember(ctx context.Context, key string, member interface{}) *BoolCmd
- SMIsMember(ctx context.Context, key string, members ...interface{}) *BoolSliceCmd
- SMembers(ctx context.Context, key string) *StringSliceCmd
- SMembersMap(ctx context.Context, key string) *StringStructMapCmd
- SMove(ctx context.Context, source, destination string, member interface{}) *BoolCmd
- SPop(ctx context.Context, key string) *StringCmd
- SPopN(ctx context.Context, key string, count int64) *StringSliceCmd
- SRandMember(ctx context.Context, key string) *StringCmd
- SRandMemberN(ctx context.Context, key string, count int64) *StringSliceCmd
- SRem(ctx context.Context, key string, members ...interface{}) *IntCmd
- SUnion(ctx context.Context, keys ...string) *StringSliceCmd
- SUnionStore(ctx context.Context, destination string, keys ...string) *IntCmd
-
- XAdd(ctx context.Context, a *XAddArgs) *StringCmd
- XDel(ctx context.Context, stream string, ids ...string) *IntCmd
- XLen(ctx context.Context, stream string) *IntCmd
- XRange(ctx context.Context, stream, start, stop string) *XMessageSliceCmd
- XRangeN(ctx context.Context, stream, start, stop string, count int64) *XMessageSliceCmd
- XRevRange(ctx context.Context, stream string, start, stop string) *XMessageSliceCmd
- XRevRangeN(ctx context.Context, stream string, start, stop string, count int64) *XMessageSliceCmd
- XRead(ctx context.Context, a *XReadArgs) *XStreamSliceCmd
- XReadStreams(ctx context.Context, streams ...string) *XStreamSliceCmd
- XGroupCreate(ctx context.Context, stream, group, start string) *StatusCmd
- XGroupCreateMkStream(ctx context.Context, stream, group, start string) *StatusCmd
- XGroupSetID(ctx context.Context, stream, group, start string) *StatusCmd
- XGroupDestroy(ctx context.Context, stream, group string) *IntCmd
- XGroupCreateConsumer(ctx context.Context, stream, group, consumer string) *IntCmd
- XGroupDelConsumer(ctx context.Context, stream, group, consumer string) *IntCmd
- XReadGroup(ctx context.Context, a *XReadGroupArgs) *XStreamSliceCmd
- XAck(ctx context.Context, stream, group string, ids ...string) *IntCmd
- XPending(ctx context.Context, stream, group string) *XPendingCmd
- XPendingExt(ctx context.Context, a *XPendingExtArgs) *XPendingExtCmd
- XClaim(ctx context.Context, a *XClaimArgs) *XMessageSliceCmd
- XClaimJustID(ctx context.Context, a *XClaimArgs) *StringSliceCmd
- XAutoClaim(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimCmd
- XAutoClaimJustID(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimJustIDCmd
-
- // TODO: XTrim and XTrimApprox remove in v9.
- XTrim(ctx context.Context, key string, maxLen int64) *IntCmd
- XTrimApprox(ctx context.Context, key string, maxLen int64) *IntCmd
- XTrimMaxLen(ctx context.Context, key string, maxLen int64) *IntCmd
- XTrimMaxLenApprox(ctx context.Context, key string, maxLen, limit int64) *IntCmd
- XTrimMinID(ctx context.Context, key string, minID string) *IntCmd
- XTrimMinIDApprox(ctx context.Context, key string, minID string, limit int64) *IntCmd
- XInfoGroups(ctx context.Context, key string) *XInfoGroupsCmd
- XInfoStream(ctx context.Context, key string) *XInfoStreamCmd
- XInfoStreamFull(ctx context.Context, key string, count int) *XInfoStreamFullCmd
- XInfoConsumers(ctx context.Context, key string, group string) *XInfoConsumersCmd
-
- BZPopMax(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd
- BZPopMin(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd
-
- // TODO: remove
- // ZAddCh
- // ZIncr
- // ZAddNXCh
- // ZAddXXCh
- // ZIncrNX
- // ZIncrXX
- // in v9.
- // use ZAddArgs and ZAddArgsIncr.
-
- ZAdd(ctx context.Context, key string, members ...*Z) *IntCmd
- ZAddNX(ctx context.Context, key string, members ...*Z) *IntCmd
- ZAddXX(ctx context.Context, key string, members ...*Z) *IntCmd
- ZAddCh(ctx context.Context, key string, members ...*Z) *IntCmd
- ZAddNXCh(ctx context.Context, key string, members ...*Z) *IntCmd
- ZAddXXCh(ctx context.Context, key string, members ...*Z) *IntCmd
- ZAddArgs(ctx context.Context, key string, args ZAddArgs) *IntCmd
- ZAddArgsIncr(ctx context.Context, key string, args ZAddArgs) *FloatCmd
- ZIncr(ctx context.Context, key string, member *Z) *FloatCmd
- ZIncrNX(ctx context.Context, key string, member *Z) *FloatCmd
- ZIncrXX(ctx context.Context, key string, member *Z) *FloatCmd
- ZCard(ctx context.Context, key string) *IntCmd
- ZCount(ctx context.Context, key, min, max string) *IntCmd
- ZLexCount(ctx context.Context, key, min, max string) *IntCmd
- ZIncrBy(ctx context.Context, key string, increment float64, member string) *FloatCmd
- ZInter(ctx context.Context, store *ZStore) *StringSliceCmd
- ZInterWithScores(ctx context.Context, store *ZStore) *ZSliceCmd
- ZInterStore(ctx context.Context, destination string, store *ZStore) *IntCmd
- ZMScore(ctx context.Context, key string, members ...string) *FloatSliceCmd
- ZPopMax(ctx context.Context, key string, count ...int64) *ZSliceCmd
- ZPopMin(ctx context.Context, key string, count ...int64) *ZSliceCmd
- ZRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd
- ZRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd
- ZRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd
- ZRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd
- ZRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd
- ZRangeArgs(ctx context.Context, z ZRangeArgs) *StringSliceCmd
- ZRangeArgsWithScores(ctx context.Context, z ZRangeArgs) *ZSliceCmd
- ZRangeStore(ctx context.Context, dst string, z ZRangeArgs) *IntCmd
- ZRank(ctx context.Context, key, member string) *IntCmd
- ZRem(ctx context.Context, key string, members ...interface{}) *IntCmd
- ZRemRangeByRank(ctx context.Context, key string, start, stop int64) *IntCmd
- ZRemRangeByScore(ctx context.Context, key, min, max string) *IntCmd
- ZRemRangeByLex(ctx context.Context, key, min, max string) *IntCmd
- ZRevRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd
- ZRevRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd
- ZRevRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd
- ZRevRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd
- ZRevRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd
- ZRevRank(ctx context.Context, key, member string) *IntCmd
- ZScore(ctx context.Context, key, member string) *FloatCmd
- ZUnionStore(ctx context.Context, dest string, store *ZStore) *IntCmd
- ZUnion(ctx context.Context, store ZStore) *StringSliceCmd
- ZUnionWithScores(ctx context.Context, store ZStore) *ZSliceCmd
- ZRandMember(ctx context.Context, key string, count int, withScores bool) *StringSliceCmd
- ZDiff(ctx context.Context, keys ...string) *StringSliceCmd
- ZDiffWithScores(ctx context.Context, keys ...string) *ZSliceCmd
- ZDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd
-
- PFAdd(ctx context.Context, key string, els ...interface{}) *IntCmd
- PFCount(ctx context.Context, keys ...string) *IntCmd
- PFMerge(ctx context.Context, dest string, keys ...string) *StatusCmd
-
- BgRewriteAOF(ctx context.Context) *StatusCmd
- BgSave(ctx context.Context) *StatusCmd
- ClientKill(ctx context.Context, ipPort string) *StatusCmd
- ClientKillByFilter(ctx context.Context, keys ...string) *IntCmd
- ClientList(ctx context.Context) *StringCmd
- ClientPause(ctx context.Context, dur time.Duration) *BoolCmd
- ClientID(ctx context.Context) *IntCmd
- ConfigGet(ctx context.Context, parameter string) *SliceCmd
- ConfigResetStat(ctx context.Context) *StatusCmd
- ConfigSet(ctx context.Context, parameter, value string) *StatusCmd
- ConfigRewrite(ctx context.Context) *StatusCmd
- DBSize(ctx context.Context) *IntCmd
- FlushAll(ctx context.Context) *StatusCmd
- FlushAllAsync(ctx context.Context) *StatusCmd
- FlushDB(ctx context.Context) *StatusCmd
- FlushDBAsync(ctx context.Context) *StatusCmd
- Info(ctx context.Context, section ...string) *StringCmd
- LastSave(ctx context.Context) *IntCmd
- Save(ctx context.Context) *StatusCmd
- Shutdown(ctx context.Context) *StatusCmd
- ShutdownSave(ctx context.Context) *StatusCmd
- ShutdownNoSave(ctx context.Context) *StatusCmd
- SlaveOf(ctx context.Context, host, port string) *StatusCmd
- Time(ctx context.Context) *TimeCmd
- DebugObject(ctx context.Context, key string) *StringCmd
- ReadOnly(ctx context.Context) *StatusCmd
- ReadWrite(ctx context.Context) *StatusCmd
- MemoryUsage(ctx context.Context, key string, samples ...int) *IntCmd
-
- Eval(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd
- EvalSha(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd
- ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd
- ScriptFlush(ctx context.Context) *StatusCmd
- ScriptKill(ctx context.Context) *StatusCmd
- ScriptLoad(ctx context.Context, script string) *StringCmd
-
- Publish(ctx context.Context, channel string, message interface{}) *IntCmd
- PubSubChannels(ctx context.Context, pattern string) *StringSliceCmd
- PubSubNumSub(ctx context.Context, channels ...string) *StringIntMapCmd
- PubSubNumPat(ctx context.Context) *IntCmd
-
- ClusterSlots(ctx context.Context) *ClusterSlotsCmd
- ClusterNodes(ctx context.Context) *StringCmd
- ClusterMeet(ctx context.Context, host, port string) *StatusCmd
- ClusterForget(ctx context.Context, nodeID string) *StatusCmd
- ClusterReplicate(ctx context.Context, nodeID string) *StatusCmd
- ClusterResetSoft(ctx context.Context) *StatusCmd
- ClusterResetHard(ctx context.Context) *StatusCmd
- ClusterInfo(ctx context.Context) *StringCmd
- ClusterKeySlot(ctx context.Context, key string) *IntCmd
- ClusterGetKeysInSlot(ctx context.Context, slot int, count int) *StringSliceCmd
- ClusterCountFailureReports(ctx context.Context, nodeID string) *IntCmd
- ClusterCountKeysInSlot(ctx context.Context, slot int) *IntCmd
- ClusterDelSlots(ctx context.Context, slots ...int) *StatusCmd
- ClusterDelSlotsRange(ctx context.Context, min, max int) *StatusCmd
- ClusterSaveConfig(ctx context.Context) *StatusCmd
- ClusterSlaves(ctx context.Context, nodeID string) *StringSliceCmd
- ClusterFailover(ctx context.Context) *StatusCmd
- ClusterAddSlots(ctx context.Context, slots ...int) *StatusCmd
- ClusterAddSlotsRange(ctx context.Context, min, max int) *StatusCmd
-
- GeoAdd(ctx context.Context, key string, geoLocation ...*GeoLocation) *IntCmd
- GeoPos(ctx context.Context, key string, members ...string) *GeoPosCmd
- GeoRadius(ctx context.Context, key string, longitude, latitude float64, query *GeoRadiusQuery) *GeoLocationCmd
- GeoRadiusStore(ctx context.Context, key string, longitude, latitude float64, query *GeoRadiusQuery) *IntCmd
- GeoRadiusByMember(ctx context.Context, key, member string, query *GeoRadiusQuery) *GeoLocationCmd
- GeoRadiusByMemberStore(ctx context.Context, key, member string, query *GeoRadiusQuery) *IntCmd
- GeoSearch(ctx context.Context, key string, q *GeoSearchQuery) *StringSliceCmd
- GeoSearchLocation(ctx context.Context, key string, q *GeoSearchLocationQuery) *GeoSearchLocationCmd
- GeoSearchStore(ctx context.Context, key, store string, q *GeoSearchStoreQuery) *IntCmd
- GeoDist(ctx context.Context, key string, member1, member2, unit string) *FloatCmd
- GeoHash(ctx context.Context, key string, members ...string) *StringSliceCmd
-}
-
-type StatefulCmdable interface {
- Cmdable
- Auth(ctx context.Context, password string) *StatusCmd
- AuthACL(ctx context.Context, username, password string) *StatusCmd
- Select(ctx context.Context, index int) *StatusCmd
- SwapDB(ctx context.Context, index1, index2 int) *StatusCmd
- ClientSetName(ctx context.Context, name string) *BoolCmd
-}
-
-var (
- _ Cmdable = (*Client)(nil)
- _ Cmdable = (*Tx)(nil)
- _ Cmdable = (*Ring)(nil)
- _ Cmdable = (*ClusterClient)(nil)
-)
-
-type cmdable func(ctx context.Context, cmd Cmder) error
-
-type statefulCmdable func(ctx context.Context, cmd Cmder) error
-
-//------------------------------------------------------------------------------
-
-func (c statefulCmdable) Auth(ctx context.Context, password string) *StatusCmd {
- cmd := NewStatusCmd(ctx, "auth", password)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// AuthACL Perform an AUTH command, using the given user and pass.
-// Should be used to authenticate the current connection with one of the connections defined in the ACL list
-// when connecting to a Redis 6.0 instance, or greater, that is using the Redis ACL system.
-func (c statefulCmdable) AuthACL(ctx context.Context, username, password string) *StatusCmd {
- cmd := NewStatusCmd(ctx, "auth", username, password)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Wait(ctx context.Context, numSlaves int, timeout time.Duration) *IntCmd {
- cmd := NewIntCmd(ctx, "wait", numSlaves, int(timeout/time.Millisecond))
- cmd.setReadTimeout(timeout)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c statefulCmdable) Select(ctx context.Context, index int) *StatusCmd {
- cmd := NewStatusCmd(ctx, "select", index)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c statefulCmdable) SwapDB(ctx context.Context, index1, index2 int) *StatusCmd {
- cmd := NewStatusCmd(ctx, "swapdb", index1, index2)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// ClientSetName assigns a name to the connection.
-func (c statefulCmdable) ClientSetName(ctx context.Context, name string) *BoolCmd {
- cmd := NewBoolCmd(ctx, "client", "setname", name)
- _ = c(ctx, cmd)
- return cmd
-}
-
-//------------------------------------------------------------------------------
-
-func (c cmdable) Command(ctx context.Context) *CommandsInfoCmd {
- cmd := NewCommandsInfoCmd(ctx, "command")
- _ = c(ctx, cmd)
- return cmd
-}
-
-// ClientGetName returns the name of the connection.
-func (c cmdable) ClientGetName(ctx context.Context) *StringCmd {
- cmd := NewStringCmd(ctx, "client", "getname")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Echo(ctx context.Context, message interface{}) *StringCmd {
- cmd := NewStringCmd(ctx, "echo", message)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Ping(ctx context.Context) *StatusCmd {
- cmd := NewStatusCmd(ctx, "ping")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Quit(_ context.Context) *StatusCmd {
- panic("not implemented")
-}
-
-func (c cmdable) Del(ctx context.Context, keys ...string) *IntCmd {
- args := make([]interface{}, 1+len(keys))
- args[0] = "del"
- for i, key := range keys {
- args[1+i] = key
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Unlink(ctx context.Context, keys ...string) *IntCmd {
- args := make([]interface{}, 1+len(keys))
- args[0] = "unlink"
- for i, key := range keys {
- args[1+i] = key
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Dump(ctx context.Context, key string) *StringCmd {
- cmd := NewStringCmd(ctx, "dump", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Exists(ctx context.Context, keys ...string) *IntCmd {
- args := make([]interface{}, 1+len(keys))
- args[0] = "exists"
- for i, key := range keys {
- args[1+i] = key
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Expire(ctx context.Context, key string, expiration time.Duration) *BoolCmd {
- return c.expire(ctx, key, expiration, "")
-}
-
-func (c cmdable) ExpireNX(ctx context.Context, key string, expiration time.Duration) *BoolCmd {
- return c.expire(ctx, key, expiration, "NX")
-}
-
-func (c cmdable) ExpireXX(ctx context.Context, key string, expiration time.Duration) *BoolCmd {
- return c.expire(ctx, key, expiration, "XX")
-}
-
-func (c cmdable) ExpireGT(ctx context.Context, key string, expiration time.Duration) *BoolCmd {
- return c.expire(ctx, key, expiration, "GT")
-}
-
-func (c cmdable) ExpireLT(ctx context.Context, key string, expiration time.Duration) *BoolCmd {
- return c.expire(ctx, key, expiration, "LT")
-}
-
-func (c cmdable) expire(
- ctx context.Context, key string, expiration time.Duration, mode string,
-) *BoolCmd {
- args := make([]interface{}, 3, 4)
- args[0] = "expire"
- args[1] = key
- args[2] = formatSec(ctx, expiration)
- if mode != "" {
- args = append(args, mode)
- }
-
- cmd := NewBoolCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd {
- cmd := NewBoolCmd(ctx, "expireat", key, tm.Unix())
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Keys(ctx context.Context, pattern string) *StringSliceCmd {
- cmd := NewStringSliceCmd(ctx, "keys", pattern)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Migrate(ctx context.Context, host, port, key string, db int, timeout time.Duration) *StatusCmd {
- cmd := NewStatusCmd(
- ctx,
- "migrate",
- host,
- port,
- key,
- db,
- formatMs(ctx, timeout),
- )
- cmd.setReadTimeout(timeout)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Move(ctx context.Context, key string, db int) *BoolCmd {
- cmd := NewBoolCmd(ctx, "move", key, db)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ObjectRefCount(ctx context.Context, key string) *IntCmd {
- cmd := NewIntCmd(ctx, "object", "refcount", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ObjectEncoding(ctx context.Context, key string) *StringCmd {
- cmd := NewStringCmd(ctx, "object", "encoding", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ObjectIdleTime(ctx context.Context, key string) *DurationCmd {
- cmd := NewDurationCmd(ctx, time.Second, "object", "idletime", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Persist(ctx context.Context, key string) *BoolCmd {
- cmd := NewBoolCmd(ctx, "persist", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) PExpire(ctx context.Context, key string, expiration time.Duration) *BoolCmd {
- cmd := NewBoolCmd(ctx, "pexpire", key, formatMs(ctx, expiration))
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) PExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd {
- cmd := NewBoolCmd(
- ctx,
- "pexpireat",
- key,
- tm.UnixNano()/int64(time.Millisecond),
- )
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) PTTL(ctx context.Context, key string) *DurationCmd {
- cmd := NewDurationCmd(ctx, time.Millisecond, "pttl", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) RandomKey(ctx context.Context) *StringCmd {
- cmd := NewStringCmd(ctx, "randomkey")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Rename(ctx context.Context, key, newkey string) *StatusCmd {
- cmd := NewStatusCmd(ctx, "rename", key, newkey)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) RenameNX(ctx context.Context, key, newkey string) *BoolCmd {
- cmd := NewBoolCmd(ctx, "renamenx", key, newkey)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Restore(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd {
- cmd := NewStatusCmd(
- ctx,
- "restore",
- key,
- formatMs(ctx, ttl),
- value,
- )
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) RestoreReplace(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd {
- cmd := NewStatusCmd(
- ctx,
- "restore",
- key,
- formatMs(ctx, ttl),
- value,
- "replace",
- )
- _ = c(ctx, cmd)
- return cmd
-}
-
-type Sort struct {
- By string
- Offset, Count int64
- Get []string
- Order string
- Alpha bool
-}
-
-func (sort *Sort) args(key string) []interface{} {
- args := []interface{}{"sort", key}
- if sort.By != "" {
- args = append(args, "by", sort.By)
- }
- if sort.Offset != 0 || sort.Count != 0 {
- args = append(args, "limit", sort.Offset, sort.Count)
- }
- for _, get := range sort.Get {
- args = append(args, "get", get)
- }
- if sort.Order != "" {
- args = append(args, sort.Order)
- }
- if sort.Alpha {
- args = append(args, "alpha")
- }
- return args
-}
-
-func (c cmdable) Sort(ctx context.Context, key string, sort *Sort) *StringSliceCmd {
- cmd := NewStringSliceCmd(ctx, sort.args(key)...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) SortStore(ctx context.Context, key, store string, sort *Sort) *IntCmd {
- args := sort.args(key)
- if store != "" {
- args = append(args, "store", store)
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) SortInterfaces(ctx context.Context, key string, sort *Sort) *SliceCmd {
- cmd := NewSliceCmd(ctx, sort.args(key)...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Touch(ctx context.Context, keys ...string) *IntCmd {
- args := make([]interface{}, len(keys)+1)
- args[0] = "touch"
- for i, key := range keys {
- args[i+1] = key
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) TTL(ctx context.Context, key string) *DurationCmd {
- cmd := NewDurationCmd(ctx, time.Second, "ttl", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Type(ctx context.Context, key string) *StatusCmd {
- cmd := NewStatusCmd(ctx, "type", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Append(ctx context.Context, key, value string) *IntCmd {
- cmd := NewIntCmd(ctx, "append", key, value)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Decr(ctx context.Context, key string) *IntCmd {
- cmd := NewIntCmd(ctx, "decr", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) DecrBy(ctx context.Context, key string, decrement int64) *IntCmd {
- cmd := NewIntCmd(ctx, "decrby", key, decrement)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// Get Redis `GET key` command. It returns redis.Nil error when key does not exist.
-func (c cmdable) Get(ctx context.Context, key string) *StringCmd {
- cmd := NewStringCmd(ctx, "get", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) GetRange(ctx context.Context, key string, start, end int64) *StringCmd {
- cmd := NewStringCmd(ctx, "getrange", key, start, end)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) GetSet(ctx context.Context, key string, value interface{}) *StringCmd {
- cmd := NewStringCmd(ctx, "getset", key, value)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// GetEx An expiration of zero removes the TTL associated with the key (i.e. GETEX key persist).
-// Requires Redis >= 6.2.0.
-func (c cmdable) GetEx(ctx context.Context, key string, expiration time.Duration) *StringCmd {
- args := make([]interface{}, 0, 4)
- args = append(args, "getex", key)
- if expiration > 0 {
- if usePrecise(expiration) {
- args = append(args, "px", formatMs(ctx, expiration))
- } else {
- args = append(args, "ex", formatSec(ctx, expiration))
- }
- } else if expiration == 0 {
- args = append(args, "persist")
- }
-
- cmd := NewStringCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// GetDel redis-server version >= 6.2.0.
-func (c cmdable) GetDel(ctx context.Context, key string) *StringCmd {
- cmd := NewStringCmd(ctx, "getdel", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Incr(ctx context.Context, key string) *IntCmd {
- cmd := NewIntCmd(ctx, "incr", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) IncrBy(ctx context.Context, key string, value int64) *IntCmd {
- cmd := NewIntCmd(ctx, "incrby", key, value)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) IncrByFloat(ctx context.Context, key string, value float64) *FloatCmd {
- cmd := NewFloatCmd(ctx, "incrbyfloat", key, value)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) MGet(ctx context.Context, keys ...string) *SliceCmd {
- args := make([]interface{}, 1+len(keys))
- args[0] = "mget"
- for i, key := range keys {
- args[1+i] = key
- }
- cmd := NewSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// MSet is like Set but accepts multiple values:
-// - MSet("key1", "value1", "key2", "value2")
-// - MSet([]string{"key1", "value1", "key2", "value2"})
-// - MSet(map[string]interface{}{"key1": "value1", "key2": "value2"})
-func (c cmdable) MSet(ctx context.Context, values ...interface{}) *StatusCmd {
- args := make([]interface{}, 1, 1+len(values))
- args[0] = "mset"
- args = appendArgs(args, values)
- cmd := NewStatusCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// MSetNX is like SetNX but accepts multiple values:
-// - MSetNX("key1", "value1", "key2", "value2")
-// - MSetNX([]string{"key1", "value1", "key2", "value2"})
-// - MSetNX(map[string]interface{}{"key1": "value1", "key2": "value2"})
-func (c cmdable) MSetNX(ctx context.Context, values ...interface{}) *BoolCmd {
- args := make([]interface{}, 1, 1+len(values))
- args[0] = "msetnx"
- args = appendArgs(args, values)
- cmd := NewBoolCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// Set Redis `SET key value [expiration]` command.
-// Use expiration for `SETEX`-like behavior.
-//
-// Zero expiration means the key has no expiration time.
-// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0,
-// otherwise you will receive an error: (error) ERR syntax error.
-func (c cmdable) Set(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd {
- args := make([]interface{}, 3, 5)
- args[0] = "set"
- args[1] = key
- args[2] = value
- if expiration > 0 {
- if usePrecise(expiration) {
- args = append(args, "px", formatMs(ctx, expiration))
- } else {
- args = append(args, "ex", formatSec(ctx, expiration))
- }
- } else if expiration == KeepTTL {
- args = append(args, "keepttl")
- }
-
- cmd := NewStatusCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// SetArgs provides arguments for the SetArgs function.
-type SetArgs struct {
- // Mode can be `NX` or `XX` or empty.
- Mode string
-
- // Zero `TTL` or `Expiration` means that the key has no expiration time.
- TTL time.Duration
- ExpireAt time.Time
-
- // When Get is true, the command returns the old value stored at key, or nil when key did not exist.
- Get bool
-
- // KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0,
- // otherwise you will receive an error: (error) ERR syntax error.
- KeepTTL bool
-}
-
-// SetArgs supports all the options that the SET command supports.
-// It is the alternative to the Set function when you want
-// to have more control over the options.
-func (c cmdable) SetArgs(ctx context.Context, key string, value interface{}, a SetArgs) *StatusCmd {
- args := []interface{}{"set", key, value}
-
- if a.KeepTTL {
- args = append(args, "keepttl")
- }
-
- if !a.ExpireAt.IsZero() {
- args = append(args, "exat", a.ExpireAt.Unix())
- }
- if a.TTL > 0 {
- if usePrecise(a.TTL) {
- args = append(args, "px", formatMs(ctx, a.TTL))
- } else {
- args = append(args, "ex", formatSec(ctx, a.TTL))
- }
- }
-
- if a.Mode != "" {
- args = append(args, a.Mode)
- }
-
- if a.Get {
- args = append(args, "get")
- }
-
- cmd := NewStatusCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// SetEX Redis `SETEX key expiration value` command.
-func (c cmdable) SetEX(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd {
- cmd := NewStatusCmd(ctx, "setex", key, formatSec(ctx, expiration), value)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// SetNX Redis `SET key value [expiration] NX` command.
-//
-// Zero expiration means the key has no expiration time.
-// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0,
-// otherwise you will receive an error: (error) ERR syntax error.
-func (c cmdable) SetNX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd {
- var cmd *BoolCmd
- switch expiration {
- case 0:
- // Use old `SETNX` to support old Redis versions.
- cmd = NewBoolCmd(ctx, "setnx", key, value)
- case KeepTTL:
- cmd = NewBoolCmd(ctx, "set", key, value, "keepttl", "nx")
- default:
- if usePrecise(expiration) {
- cmd = NewBoolCmd(ctx, "set", key, value, "px", formatMs(ctx, expiration), "nx")
- } else {
- cmd = NewBoolCmd(ctx, "set", key, value, "ex", formatSec(ctx, expiration), "nx")
- }
- }
-
- _ = c(ctx, cmd)
- return cmd
-}
-
-// SetXX Redis `SET key value [expiration] XX` command.
-//
-// Zero expiration means the key has no expiration time.
-// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0,
-// otherwise you will receive an error: (error) ERR syntax error.
-func (c cmdable) SetXX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd {
- var cmd *BoolCmd
- switch expiration {
- case 0:
- cmd = NewBoolCmd(ctx, "set", key, value, "xx")
- case KeepTTL:
- cmd = NewBoolCmd(ctx, "set", key, value, "keepttl", "xx")
- default:
- if usePrecise(expiration) {
- cmd = NewBoolCmd(ctx, "set", key, value, "px", formatMs(ctx, expiration), "xx")
- } else {
- cmd = NewBoolCmd(ctx, "set", key, value, "ex", formatSec(ctx, expiration), "xx")
- }
- }
-
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) SetRange(ctx context.Context, key string, offset int64, value string) *IntCmd {
- cmd := NewIntCmd(ctx, "setrange", key, offset, value)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) StrLen(ctx context.Context, key string) *IntCmd {
- cmd := NewIntCmd(ctx, "strlen", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Copy(ctx context.Context, sourceKey, destKey string, db int, replace bool) *IntCmd {
- args := []interface{}{"copy", sourceKey, destKey, "DB", db}
- if replace {
- args = append(args, "REPLACE")
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-//------------------------------------------------------------------------------
-
-func (c cmdable) GetBit(ctx context.Context, key string, offset int64) *IntCmd {
- cmd := NewIntCmd(ctx, "getbit", key, offset)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) SetBit(ctx context.Context, key string, offset int64, value int) *IntCmd {
- cmd := NewIntCmd(
- ctx,
- "setbit",
- key,
- offset,
- value,
- )
- _ = c(ctx, cmd)
- return cmd
-}
-
-type BitCount struct {
- Start, End int64
-}
-
-func (c cmdable) BitCount(ctx context.Context, key string, bitCount *BitCount) *IntCmd {
- args := []interface{}{"bitcount", key}
- if bitCount != nil {
- args = append(
- args,
- bitCount.Start,
- bitCount.End,
- )
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) bitOp(ctx context.Context, op, destKey string, keys ...string) *IntCmd {
- args := make([]interface{}, 3+len(keys))
- args[0] = "bitop"
- args[1] = op
- args[2] = destKey
- for i, key := range keys {
- args[3+i] = key
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) BitOpAnd(ctx context.Context, destKey string, keys ...string) *IntCmd {
- return c.bitOp(ctx, "and", destKey, keys...)
-}
-
-func (c cmdable) BitOpOr(ctx context.Context, destKey string, keys ...string) *IntCmd {
- return c.bitOp(ctx, "or", destKey, keys...)
-}
-
-func (c cmdable) BitOpXor(ctx context.Context, destKey string, keys ...string) *IntCmd {
- return c.bitOp(ctx, "xor", destKey, keys...)
-}
-
-func (c cmdable) BitOpNot(ctx context.Context, destKey string, key string) *IntCmd {
- return c.bitOp(ctx, "not", destKey, key)
-}
-
-func (c cmdable) BitPos(ctx context.Context, key string, bit int64, pos ...int64) *IntCmd {
- args := make([]interface{}, 3+len(pos))
- args[0] = "bitpos"
- args[1] = key
- args[2] = bit
- switch len(pos) {
- case 0:
- case 1:
- args[3] = pos[0]
- case 2:
- args[3] = pos[0]
- args[4] = pos[1]
- default:
- panic("too many arguments")
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) BitField(ctx context.Context, key string, args ...interface{}) *IntSliceCmd {
- a := make([]interface{}, 0, 2+len(args))
- a = append(a, "bitfield")
- a = append(a, key)
- a = append(a, args...)
- cmd := NewIntSliceCmd(ctx, a...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-//------------------------------------------------------------------------------
-
-func (c cmdable) Scan(ctx context.Context, cursor uint64, match string, count int64) *ScanCmd {
- args := []interface{}{"scan", cursor}
- if match != "" {
- args = append(args, "match", match)
- }
- if count > 0 {
- args = append(args, "count", count)
- }
- cmd := NewScanCmd(ctx, c, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ScanType(ctx context.Context, cursor uint64, match string, count int64, keyType string) *ScanCmd {
- args := []interface{}{"scan", cursor}
- if match != "" {
- args = append(args, "match", match)
- }
- if count > 0 {
- args = append(args, "count", count)
- }
- if keyType != "" {
- args = append(args, "type", keyType)
- }
- cmd := NewScanCmd(ctx, c, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) SScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd {
- args := []interface{}{"sscan", key, cursor}
- if match != "" {
- args = append(args, "match", match)
- }
- if count > 0 {
- args = append(args, "count", count)
- }
- cmd := NewScanCmd(ctx, c, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) HScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd {
- args := []interface{}{"hscan", key, cursor}
- if match != "" {
- args = append(args, "match", match)
- }
- if count > 0 {
- args = append(args, "count", count)
- }
- cmd := NewScanCmd(ctx, c, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd {
- args := []interface{}{"zscan", key, cursor}
- if match != "" {
- args = append(args, "match", match)
- }
- if count > 0 {
- args = append(args, "count", count)
- }
- cmd := NewScanCmd(ctx, c, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-//------------------------------------------------------------------------------
-
-func (c cmdable) HDel(ctx context.Context, key string, fields ...string) *IntCmd {
- args := make([]interface{}, 2+len(fields))
- args[0] = "hdel"
- args[1] = key
- for i, field := range fields {
- args[2+i] = field
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) HExists(ctx context.Context, key, field string) *BoolCmd {
- cmd := NewBoolCmd(ctx, "hexists", key, field)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) HGet(ctx context.Context, key, field string) *StringCmd {
- cmd := NewStringCmd(ctx, "hget", key, field)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) HGetAll(ctx context.Context, key string) *StringStringMapCmd {
- cmd := NewStringStringMapCmd(ctx, "hgetall", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) HIncrBy(ctx context.Context, key, field string, incr int64) *IntCmd {
- cmd := NewIntCmd(ctx, "hincrby", key, field, incr)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) HIncrByFloat(ctx context.Context, key, field string, incr float64) *FloatCmd {
- cmd := NewFloatCmd(ctx, "hincrbyfloat", key, field, incr)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) HKeys(ctx context.Context, key string) *StringSliceCmd {
- cmd := NewStringSliceCmd(ctx, "hkeys", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) HLen(ctx context.Context, key string) *IntCmd {
- cmd := NewIntCmd(ctx, "hlen", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// HMGet returns the values for the specified fields in the hash stored at key.
-// It returns an interface{} to distinguish between empty string and nil value.
-func (c cmdable) HMGet(ctx context.Context, key string, fields ...string) *SliceCmd {
- args := make([]interface{}, 2+len(fields))
- args[0] = "hmget"
- args[1] = key
- for i, field := range fields {
- args[2+i] = field
- }
- cmd := NewSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// HSet accepts values in following formats:
-// - HSet("myhash", "key1", "value1", "key2", "value2")
-// - HSet("myhash", []string{"key1", "value1", "key2", "value2"})
-// - HSet("myhash", map[string]interface{}{"key1": "value1", "key2": "value2"})
-//
-// Note that it requires Redis v4 for multiple field/value pairs support.
-func (c cmdable) HSet(ctx context.Context, key string, values ...interface{}) *IntCmd {
- args := make([]interface{}, 2, 2+len(values))
- args[0] = "hset"
- args[1] = key
- args = appendArgs(args, values)
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// HMSet is a deprecated version of HSet left for compatibility with Redis 3.
-func (c cmdable) HMSet(ctx context.Context, key string, values ...interface{}) *BoolCmd {
- args := make([]interface{}, 2, 2+len(values))
- args[0] = "hmset"
- args[1] = key
- args = appendArgs(args, values)
- cmd := NewBoolCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) HSetNX(ctx context.Context, key, field string, value interface{}) *BoolCmd {
- cmd := NewBoolCmd(ctx, "hsetnx", key, field, value)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) HVals(ctx context.Context, key string) *StringSliceCmd {
- cmd := NewStringSliceCmd(ctx, "hvals", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// HRandField redis-server version >= 6.2.0.
-func (c cmdable) HRandField(ctx context.Context, key string, count int, withValues bool) *StringSliceCmd {
- args := make([]interface{}, 0, 4)
-
- // Although count=0 is meaningless, redis accepts count=0.
- args = append(args, "hrandfield", key, count)
- if withValues {
- args = append(args, "withvalues")
- }
-
- cmd := NewStringSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-//------------------------------------------------------------------------------
-
-func (c cmdable) BLPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd {
- args := make([]interface{}, 1+len(keys)+1)
- args[0] = "blpop"
- for i, key := range keys {
- args[1+i] = key
- }
- args[len(args)-1] = formatSec(ctx, timeout)
- cmd := NewStringSliceCmd(ctx, args...)
- cmd.setReadTimeout(timeout)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) BRPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd {
- args := make([]interface{}, 1+len(keys)+1)
- args[0] = "brpop"
- for i, key := range keys {
- args[1+i] = key
- }
- args[len(keys)+1] = formatSec(ctx, timeout)
- cmd := NewStringSliceCmd(ctx, args...)
- cmd.setReadTimeout(timeout)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) BRPopLPush(ctx context.Context, source, destination string, timeout time.Duration) *StringCmd {
- cmd := NewStringCmd(
- ctx,
- "brpoplpush",
- source,
- destination,
- formatSec(ctx, timeout),
- )
- cmd.setReadTimeout(timeout)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) LIndex(ctx context.Context, key string, index int64) *StringCmd {
- cmd := NewStringCmd(ctx, "lindex", key, index)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) LInsert(ctx context.Context, key, op string, pivot, value interface{}) *IntCmd {
- cmd := NewIntCmd(ctx, "linsert", key, op, pivot, value)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) LInsertBefore(ctx context.Context, key string, pivot, value interface{}) *IntCmd {
- cmd := NewIntCmd(ctx, "linsert", key, "before", pivot, value)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) LInsertAfter(ctx context.Context, key string, pivot, value interface{}) *IntCmd {
- cmd := NewIntCmd(ctx, "linsert", key, "after", pivot, value)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) LLen(ctx context.Context, key string) *IntCmd {
- cmd := NewIntCmd(ctx, "llen", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) LPop(ctx context.Context, key string) *StringCmd {
- cmd := NewStringCmd(ctx, "lpop", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) LPopCount(ctx context.Context, key string, count int) *StringSliceCmd {
- cmd := NewStringSliceCmd(ctx, "lpop", key, count)
- _ = c(ctx, cmd)
- return cmd
-}
-
-type LPosArgs struct {
- Rank, MaxLen int64
-}
-
-func (c cmdable) LPos(ctx context.Context, key string, value string, a LPosArgs) *IntCmd {
- args := []interface{}{"lpos", key, value}
- if a.Rank != 0 {
- args = append(args, "rank", a.Rank)
- }
- if a.MaxLen != 0 {
- args = append(args, "maxlen", a.MaxLen)
- }
-
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) LPosCount(ctx context.Context, key string, value string, count int64, a LPosArgs) *IntSliceCmd {
- args := []interface{}{"lpos", key, value, "count", count}
- if a.Rank != 0 {
- args = append(args, "rank", a.Rank)
- }
- if a.MaxLen != 0 {
- args = append(args, "maxlen", a.MaxLen)
- }
- cmd := NewIntSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) LPush(ctx context.Context, key string, values ...interface{}) *IntCmd {
- args := make([]interface{}, 2, 2+len(values))
- args[0] = "lpush"
- args[1] = key
- args = appendArgs(args, values)
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) LPushX(ctx context.Context, key string, values ...interface{}) *IntCmd {
- args := make([]interface{}, 2, 2+len(values))
- args[0] = "lpushx"
- args[1] = key
- args = appendArgs(args, values)
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) LRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd {
- cmd := NewStringSliceCmd(
- ctx,
- "lrange",
- key,
- start,
- stop,
- )
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) LRem(ctx context.Context, key string, count int64, value interface{}) *IntCmd {
- cmd := NewIntCmd(ctx, "lrem", key, count, value)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) LSet(ctx context.Context, key string, index int64, value interface{}) *StatusCmd {
- cmd := NewStatusCmd(ctx, "lset", key, index, value)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) LTrim(ctx context.Context, key string, start, stop int64) *StatusCmd {
- cmd := NewStatusCmd(
- ctx,
- "ltrim",
- key,
- start,
- stop,
- )
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) RPop(ctx context.Context, key string) *StringCmd {
- cmd := NewStringCmd(ctx, "rpop", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) RPopCount(ctx context.Context, key string, count int) *StringSliceCmd {
- cmd := NewStringSliceCmd(ctx, "rpop", key, count)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) RPopLPush(ctx context.Context, source, destination string) *StringCmd {
- cmd := NewStringCmd(ctx, "rpoplpush", source, destination)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) RPush(ctx context.Context, key string, values ...interface{}) *IntCmd {
- args := make([]interface{}, 2, 2+len(values))
- args[0] = "rpush"
- args[1] = key
- args = appendArgs(args, values)
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) RPushX(ctx context.Context, key string, values ...interface{}) *IntCmd {
- args := make([]interface{}, 2, 2+len(values))
- args[0] = "rpushx"
- args[1] = key
- args = appendArgs(args, values)
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) LMove(ctx context.Context, source, destination, srcpos, destpos string) *StringCmd {
- cmd := NewStringCmd(ctx, "lmove", source, destination, srcpos, destpos)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) BLMove(
- ctx context.Context, source, destination, srcpos, destpos string, timeout time.Duration,
-) *StringCmd {
- cmd := NewStringCmd(ctx, "blmove", source, destination, srcpos, destpos, formatSec(ctx, timeout))
- cmd.setReadTimeout(timeout)
- _ = c(ctx, cmd)
- return cmd
-}
-
-//------------------------------------------------------------------------------
-
-func (c cmdable) SAdd(ctx context.Context, key string, members ...interface{}) *IntCmd {
- args := make([]interface{}, 2, 2+len(members))
- args[0] = "sadd"
- args[1] = key
- args = appendArgs(args, members)
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) SCard(ctx context.Context, key string) *IntCmd {
- cmd := NewIntCmd(ctx, "scard", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) SDiff(ctx context.Context, keys ...string) *StringSliceCmd {
- args := make([]interface{}, 1+len(keys))
- args[0] = "sdiff"
- for i, key := range keys {
- args[1+i] = key
- }
- cmd := NewStringSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) SDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd {
- args := make([]interface{}, 2+len(keys))
- args[0] = "sdiffstore"
- args[1] = destination
- for i, key := range keys {
- args[2+i] = key
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) SInter(ctx context.Context, keys ...string) *StringSliceCmd {
- args := make([]interface{}, 1+len(keys))
- args[0] = "sinter"
- for i, key := range keys {
- args[1+i] = key
- }
- cmd := NewStringSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) SInterStore(ctx context.Context, destination string, keys ...string) *IntCmd {
- args := make([]interface{}, 2+len(keys))
- args[0] = "sinterstore"
- args[1] = destination
- for i, key := range keys {
- args[2+i] = key
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) SIsMember(ctx context.Context, key string, member interface{}) *BoolCmd {
- cmd := NewBoolCmd(ctx, "sismember", key, member)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// SMIsMember Redis `SMISMEMBER key member [member ...]` command.
-func (c cmdable) SMIsMember(ctx context.Context, key string, members ...interface{}) *BoolSliceCmd {
- args := make([]interface{}, 2, 2+len(members))
- args[0] = "smismember"
- args[1] = key
- args = appendArgs(args, members)
- cmd := NewBoolSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// SMembers Redis `SMEMBERS key` command output as a slice.
-func (c cmdable) SMembers(ctx context.Context, key string) *StringSliceCmd {
- cmd := NewStringSliceCmd(ctx, "smembers", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// SMembersMap Redis `SMEMBERS key` command output as a map.
-func (c cmdable) SMembersMap(ctx context.Context, key string) *StringStructMapCmd {
- cmd := NewStringStructMapCmd(ctx, "smembers", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) SMove(ctx context.Context, source, destination string, member interface{}) *BoolCmd {
- cmd := NewBoolCmd(ctx, "smove", source, destination, member)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// SPop Redis `SPOP key` command.
-func (c cmdable) SPop(ctx context.Context, key string) *StringCmd {
- cmd := NewStringCmd(ctx, "spop", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// SPopN Redis `SPOP key count` command.
-func (c cmdable) SPopN(ctx context.Context, key string, count int64) *StringSliceCmd {
- cmd := NewStringSliceCmd(ctx, "spop", key, count)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// SRandMember Redis `SRANDMEMBER key` command.
-func (c cmdable) SRandMember(ctx context.Context, key string) *StringCmd {
- cmd := NewStringCmd(ctx, "srandmember", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// SRandMemberN Redis `SRANDMEMBER key count` command.
-func (c cmdable) SRandMemberN(ctx context.Context, key string, count int64) *StringSliceCmd {
- cmd := NewStringSliceCmd(ctx, "srandmember", key, count)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) SRem(ctx context.Context, key string, members ...interface{}) *IntCmd {
- args := make([]interface{}, 2, 2+len(members))
- args[0] = "srem"
- args[1] = key
- args = appendArgs(args, members)
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) SUnion(ctx context.Context, keys ...string) *StringSliceCmd {
- args := make([]interface{}, 1+len(keys))
- args[0] = "sunion"
- for i, key := range keys {
- args[1+i] = key
- }
- cmd := NewStringSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) SUnionStore(ctx context.Context, destination string, keys ...string) *IntCmd {
- args := make([]interface{}, 2+len(keys))
- args[0] = "sunionstore"
- args[1] = destination
- for i, key := range keys {
- args[2+i] = key
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-//------------------------------------------------------------------------------
-
-// XAddArgs accepts values in the following formats:
-// - XAddArgs.Values = []interface{}{"key1", "value1", "key2", "value2"}
-// - XAddArgs.Values = []string("key1", "value1", "key2", "value2")
-// - XAddArgs.Values = map[string]interface{}{"key1": "value1", "key2": "value2"}
-//
-// Note that map will not preserve the order of key-value pairs.
-// MaxLen/MaxLenApprox and MinID are in conflict, only one of them can be used.
-type XAddArgs struct {
- Stream string
- NoMkStream bool
- MaxLen int64 // MAXLEN N
-
- // Deprecated: use MaxLen+Approx, remove in v9.
- MaxLenApprox int64 // MAXLEN ~ N
-
- MinID string
- // Approx causes MaxLen and MinID to use "~" matcher (instead of "=").
- Approx bool
- Limit int64
- ID string
- Values interface{}
-}
-
-// XAdd a.Limit has a bug, please confirm it and use it.
-// issue: https://github.com/redis/redis/issues/9046
-func (c cmdable) XAdd(ctx context.Context, a *XAddArgs) *StringCmd {
- args := make([]interface{}, 0, 11)
- args = append(args, "xadd", a.Stream)
- if a.NoMkStream {
- args = append(args, "nomkstream")
- }
- switch {
- case a.MaxLen > 0:
- if a.Approx {
- args = append(args, "maxlen", "~", a.MaxLen)
- } else {
- args = append(args, "maxlen", a.MaxLen)
- }
- case a.MaxLenApprox > 0:
- // TODO remove in v9.
- args = append(args, "maxlen", "~", a.MaxLenApprox)
- case a.MinID != "":
- if a.Approx {
- args = append(args, "minid", "~", a.MinID)
- } else {
- args = append(args, "minid", a.MinID)
- }
- }
- if a.Limit > 0 {
- args = append(args, "limit", a.Limit)
- }
- if a.ID != "" {
- args = append(args, a.ID)
- } else {
- args = append(args, "*")
- }
- args = appendArg(args, a.Values)
-
- cmd := NewStringCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) XDel(ctx context.Context, stream string, ids ...string) *IntCmd {
- args := []interface{}{"xdel", stream}
- for _, id := range ids {
- args = append(args, id)
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) XLen(ctx context.Context, stream string) *IntCmd {
- cmd := NewIntCmd(ctx, "xlen", stream)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) XRange(ctx context.Context, stream, start, stop string) *XMessageSliceCmd {
- cmd := NewXMessageSliceCmd(ctx, "xrange", stream, start, stop)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) XRangeN(ctx context.Context, stream, start, stop string, count int64) *XMessageSliceCmd {
- cmd := NewXMessageSliceCmd(ctx, "xrange", stream, start, stop, "count", count)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) XRevRange(ctx context.Context, stream, start, stop string) *XMessageSliceCmd {
- cmd := NewXMessageSliceCmd(ctx, "xrevrange", stream, start, stop)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) XRevRangeN(ctx context.Context, stream, start, stop string, count int64) *XMessageSliceCmd {
- cmd := NewXMessageSliceCmd(ctx, "xrevrange", stream, start, stop, "count", count)
- _ = c(ctx, cmd)
- return cmd
-}
-
-type XReadArgs struct {
- Streams []string // list of streams and ids, e.g. stream1 stream2 id1 id2
- Count int64
- Block time.Duration
-}
-
-func (c cmdable) XRead(ctx context.Context, a *XReadArgs) *XStreamSliceCmd {
- args := make([]interface{}, 0, 6+len(a.Streams))
- args = append(args, "xread")
-
- keyPos := int8(1)
- if a.Count > 0 {
- args = append(args, "count")
- args = append(args, a.Count)
- keyPos += 2
- }
- if a.Block >= 0 {
- args = append(args, "block")
- args = append(args, int64(a.Block/time.Millisecond))
- keyPos += 2
- }
- args = append(args, "streams")
- keyPos++
- for _, s := range a.Streams {
- args = append(args, s)
- }
-
- cmd := NewXStreamSliceCmd(ctx, args...)
- if a.Block >= 0 {
- cmd.setReadTimeout(a.Block)
- }
- cmd.SetFirstKeyPos(keyPos)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) XReadStreams(ctx context.Context, streams ...string) *XStreamSliceCmd {
- return c.XRead(ctx, &XReadArgs{
- Streams: streams,
- Block: -1,
- })
-}
-
-func (c cmdable) XGroupCreate(ctx context.Context, stream, group, start string) *StatusCmd {
- cmd := NewStatusCmd(ctx, "xgroup", "create", stream, group, start)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) XGroupCreateMkStream(ctx context.Context, stream, group, start string) *StatusCmd {
- cmd := NewStatusCmd(ctx, "xgroup", "create", stream, group, start, "mkstream")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) XGroupSetID(ctx context.Context, stream, group, start string) *StatusCmd {
- cmd := NewStatusCmd(ctx, "xgroup", "setid", stream, group, start)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) XGroupDestroy(ctx context.Context, stream, group string) *IntCmd {
- cmd := NewIntCmd(ctx, "xgroup", "destroy", stream, group)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) XGroupCreateConsumer(ctx context.Context, stream, group, consumer string) *IntCmd {
- cmd := NewIntCmd(ctx, "xgroup", "createconsumer", stream, group, consumer)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) XGroupDelConsumer(ctx context.Context, stream, group, consumer string) *IntCmd {
- cmd := NewIntCmd(ctx, "xgroup", "delconsumer", stream, group, consumer)
- _ = c(ctx, cmd)
- return cmd
-}
-
-type XReadGroupArgs struct {
- Group string
- Consumer string
- Streams []string // list of streams and ids, e.g. stream1 stream2 id1 id2
- Count int64
- Block time.Duration
- NoAck bool
-}
-
-func (c cmdable) XReadGroup(ctx context.Context, a *XReadGroupArgs) *XStreamSliceCmd {
- args := make([]interface{}, 0, 10+len(a.Streams))
- args = append(args, "xreadgroup", "group", a.Group, a.Consumer)
-
- keyPos := int8(4)
- if a.Count > 0 {
- args = append(args, "count", a.Count)
- keyPos += 2
- }
- if a.Block >= 0 {
- args = append(args, "block", int64(a.Block/time.Millisecond))
- keyPos += 2
- }
- if a.NoAck {
- args = append(args, "noack")
- keyPos++
- }
- args = append(args, "streams")
- keyPos++
- for _, s := range a.Streams {
- args = append(args, s)
- }
-
- cmd := NewXStreamSliceCmd(ctx, args...)
- if a.Block >= 0 {
- cmd.setReadTimeout(a.Block)
- }
- cmd.SetFirstKeyPos(keyPos)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) XAck(ctx context.Context, stream, group string, ids ...string) *IntCmd {
- args := []interface{}{"xack", stream, group}
- for _, id := range ids {
- args = append(args, id)
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) XPending(ctx context.Context, stream, group string) *XPendingCmd {
- cmd := NewXPendingCmd(ctx, "xpending", stream, group)
- _ = c(ctx, cmd)
- return cmd
-}
-
-type XPendingExtArgs struct {
- Stream string
- Group string
- Idle time.Duration
- Start string
- End string
- Count int64
- Consumer string
-}
-
-func (c cmdable) XPendingExt(ctx context.Context, a *XPendingExtArgs) *XPendingExtCmd {
- args := make([]interface{}, 0, 9)
- args = append(args, "xpending", a.Stream, a.Group)
- if a.Idle != 0 {
- args = append(args, "idle", formatMs(ctx, a.Idle))
- }
- args = append(args, a.Start, a.End, a.Count)
- if a.Consumer != "" {
- args = append(args, a.Consumer)
- }
- cmd := NewXPendingExtCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-type XAutoClaimArgs struct {
- Stream string
- Group string
- MinIdle time.Duration
- Start string
- Count int64
- Consumer string
-}
-
-func (c cmdable) XAutoClaim(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimCmd {
- args := xAutoClaimArgs(ctx, a)
- cmd := NewXAutoClaimCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) XAutoClaimJustID(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimJustIDCmd {
- args := xAutoClaimArgs(ctx, a)
- args = append(args, "justid")
- cmd := NewXAutoClaimJustIDCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func xAutoClaimArgs(ctx context.Context, a *XAutoClaimArgs) []interface{} {
- args := make([]interface{}, 0, 8)
- args = append(args, "xautoclaim", a.Stream, a.Group, a.Consumer, formatMs(ctx, a.MinIdle), a.Start)
- if a.Count > 0 {
- args = append(args, "count", a.Count)
- }
- return args
-}
-
-type XClaimArgs struct {
- Stream string
- Group string
- Consumer string
- MinIdle time.Duration
- Messages []string
-}
-
-func (c cmdable) XClaim(ctx context.Context, a *XClaimArgs) *XMessageSliceCmd {
- args := xClaimArgs(a)
- cmd := NewXMessageSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) XClaimJustID(ctx context.Context, a *XClaimArgs) *StringSliceCmd {
- args := xClaimArgs(a)
- args = append(args, "justid")
- cmd := NewStringSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func xClaimArgs(a *XClaimArgs) []interface{} {
- args := make([]interface{}, 0, 5+len(a.Messages))
- args = append(args,
- "xclaim",
- a.Stream,
- a.Group, a.Consumer,
- int64(a.MinIdle/time.Millisecond))
- for _, id := range a.Messages {
- args = append(args, id)
- }
- return args
-}
-
-// xTrim If approx is true, add the "~" parameter, otherwise it is the default "=" (redis default).
-// example:
-// XTRIM key MAXLEN/MINID threshold LIMIT limit.
-// XTRIM key MAXLEN/MINID ~ threshold LIMIT limit.
-// The redis-server version is lower than 6.2, please set limit to 0.
-func (c cmdable) xTrim(
- ctx context.Context, key, strategy string,
- approx bool, threshold interface{}, limit int64,
-) *IntCmd {
- args := make([]interface{}, 0, 7)
- args = append(args, "xtrim", key, strategy)
- if approx {
- args = append(args, "~")
- }
- args = append(args, threshold)
- if limit > 0 {
- args = append(args, "limit", limit)
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// Deprecated: use XTrimMaxLen, remove in v9.
-func (c cmdable) XTrim(ctx context.Context, key string, maxLen int64) *IntCmd {
- return c.xTrim(ctx, key, "maxlen", false, maxLen, 0)
-}
-
-// Deprecated: use XTrimMaxLenApprox, remove in v9.
-func (c cmdable) XTrimApprox(ctx context.Context, key string, maxLen int64) *IntCmd {
- return c.xTrim(ctx, key, "maxlen", true, maxLen, 0)
-}
-
-// XTrimMaxLen No `~` rules are used, `limit` cannot be used.
-// cmd: XTRIM key MAXLEN maxLen
-func (c cmdable) XTrimMaxLen(ctx context.Context, key string, maxLen int64) *IntCmd {
- return c.xTrim(ctx, key, "maxlen", false, maxLen, 0)
-}
-
-// XTrimMaxLenApprox LIMIT has a bug, please confirm it and use it.
-// issue: https://github.com/redis/redis/issues/9046
-// cmd: XTRIM key MAXLEN ~ maxLen LIMIT limit
-func (c cmdable) XTrimMaxLenApprox(ctx context.Context, key string, maxLen, limit int64) *IntCmd {
- return c.xTrim(ctx, key, "maxlen", true, maxLen, limit)
-}
-
-// XTrimMinID No `~` rules are used, `limit` cannot be used.
-// cmd: XTRIM key MINID minID
-func (c cmdable) XTrimMinID(ctx context.Context, key string, minID string) *IntCmd {
- return c.xTrim(ctx, key, "minid", false, minID, 0)
-}
-
-// XTrimMinIDApprox LIMIT has a bug, please confirm it and use it.
-// issue: https://github.com/redis/redis/issues/9046
-// cmd: XTRIM key MINID ~ minID LIMIT limit
-func (c cmdable) XTrimMinIDApprox(ctx context.Context, key string, minID string, limit int64) *IntCmd {
- return c.xTrim(ctx, key, "minid", true, minID, limit)
-}
-
-func (c cmdable) XInfoConsumers(ctx context.Context, key string, group string) *XInfoConsumersCmd {
- cmd := NewXInfoConsumersCmd(ctx, key, group)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) XInfoGroups(ctx context.Context, key string) *XInfoGroupsCmd {
- cmd := NewXInfoGroupsCmd(ctx, key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) XInfoStream(ctx context.Context, key string) *XInfoStreamCmd {
- cmd := NewXInfoStreamCmd(ctx, key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// XInfoStreamFull XINFO STREAM FULL [COUNT count]
-// redis-server >= 6.0.
-func (c cmdable) XInfoStreamFull(ctx context.Context, key string, count int) *XInfoStreamFullCmd {
- args := make([]interface{}, 0, 6)
- args = append(args, "xinfo", "stream", key, "full")
- if count > 0 {
- args = append(args, "count", count)
- }
- cmd := NewXInfoStreamFullCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-//------------------------------------------------------------------------------
-
-// Z represents sorted set member.
-type Z struct {
- Score float64
- Member interface{}
-}
-
-// ZWithKey represents sorted set member including the name of the key where it was popped.
-type ZWithKey struct {
- Z
- Key string
-}
-
-// ZStore is used as an arg to ZInter/ZInterStore and ZUnion/ZUnionStore.
-type ZStore struct {
- Keys []string
- Weights []float64
- // Can be SUM, MIN or MAX.
- Aggregate string
-}
-
-func (z ZStore) len() (n int) {
- n = len(z.Keys)
- if len(z.Weights) > 0 {
- n += 1 + len(z.Weights)
- }
- if z.Aggregate != "" {
- n += 2
- }
- return n
-}
-
-func (z ZStore) appendArgs(args []interface{}) []interface{} {
- for _, key := range z.Keys {
- args = append(args, key)
- }
- if len(z.Weights) > 0 {
- args = append(args, "weights")
- for _, weights := range z.Weights {
- args = append(args, weights)
- }
- }
- if z.Aggregate != "" {
- args = append(args, "aggregate", z.Aggregate)
- }
- return args
-}
-
-// BZPopMax Redis `BZPOPMAX key [key ...] timeout` command.
-func (c cmdable) BZPopMax(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd {
- args := make([]interface{}, 1+len(keys)+1)
- args[0] = "bzpopmax"
- for i, key := range keys {
- args[1+i] = key
- }
- args[len(args)-1] = formatSec(ctx, timeout)
- cmd := NewZWithKeyCmd(ctx, args...)
- cmd.setReadTimeout(timeout)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// BZPopMin Redis `BZPOPMIN key [key ...] timeout` command.
-func (c cmdable) BZPopMin(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd {
- args := make([]interface{}, 1+len(keys)+1)
- args[0] = "bzpopmin"
- for i, key := range keys {
- args[1+i] = key
- }
- args[len(args)-1] = formatSec(ctx, timeout)
- cmd := NewZWithKeyCmd(ctx, args...)
- cmd.setReadTimeout(timeout)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// ZAddArgs WARN: The GT, LT and NX options are mutually exclusive.
-type ZAddArgs struct {
- NX bool
- XX bool
- LT bool
- GT bool
- Ch bool
- Members []Z
-}
-
-func (c cmdable) zAddArgs(key string, args ZAddArgs, incr bool) []interface{} {
- a := make([]interface{}, 0, 6+2*len(args.Members))
- a = append(a, "zadd", key)
-
- // The GT, LT and NX options are mutually exclusive.
- if args.NX {
- a = append(a, "nx")
- } else {
- if args.XX {
- a = append(a, "xx")
- }
- if args.GT {
- a = append(a, "gt")
- } else if args.LT {
- a = append(a, "lt")
- }
- }
- if args.Ch {
- a = append(a, "ch")
- }
- if incr {
- a = append(a, "incr")
- }
- for _, m := range args.Members {
- a = append(a, m.Score)
- a = append(a, m.Member)
- }
- return a
-}
-
-func (c cmdable) ZAddArgs(ctx context.Context, key string, args ZAddArgs) *IntCmd {
- cmd := NewIntCmd(ctx, c.zAddArgs(key, args, false)...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZAddArgsIncr(ctx context.Context, key string, args ZAddArgs) *FloatCmd {
- cmd := NewFloatCmd(ctx, c.zAddArgs(key, args, true)...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// TODO: Compatible with v8 api, will be removed in v9.
-func (c cmdable) zAdd(ctx context.Context, key string, args ZAddArgs, members ...*Z) *IntCmd {
- args.Members = make([]Z, len(members))
- for i, m := range members {
- args.Members[i] = *m
- }
- cmd := NewIntCmd(ctx, c.zAddArgs(key, args, false)...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// ZAdd Redis `ZADD key score member [score member ...]` command.
-func (c cmdable) ZAdd(ctx context.Context, key string, members ...*Z) *IntCmd {
- return c.zAdd(ctx, key, ZAddArgs{}, members...)
-}
-
-// ZAddNX Redis `ZADD key NX score member [score member ...]` command.
-func (c cmdable) ZAddNX(ctx context.Context, key string, members ...*Z) *IntCmd {
- return c.zAdd(ctx, key, ZAddArgs{
- NX: true,
- }, members...)
-}
-
-// ZAddXX Redis `ZADD key XX score member [score member ...]` command.
-func (c cmdable) ZAddXX(ctx context.Context, key string, members ...*Z) *IntCmd {
- return c.zAdd(ctx, key, ZAddArgs{
- XX: true,
- }, members...)
-}
-
-// ZAddCh Redis `ZADD key CH score member [score member ...]` command.
-// Deprecated: Use
-// client.ZAddArgs(ctx, ZAddArgs{
-// Ch: true,
-// Members: []Z,
-// })
-// remove in v9.
-func (c cmdable) ZAddCh(ctx context.Context, key string, members ...*Z) *IntCmd {
- return c.zAdd(ctx, key, ZAddArgs{
- Ch: true,
- }, members...)
-}
-
-// ZAddNXCh Redis `ZADD key NX CH score member [score member ...]` command.
-// Deprecated: Use
-// client.ZAddArgs(ctx, ZAddArgs{
-// NX: true,
-// Ch: true,
-// Members: []Z,
-// })
-// remove in v9.
-func (c cmdable) ZAddNXCh(ctx context.Context, key string, members ...*Z) *IntCmd {
- return c.zAdd(ctx, key, ZAddArgs{
- NX: true,
- Ch: true,
- }, members...)
-}
-
-// ZAddXXCh Redis `ZADD key XX CH score member [score member ...]` command.
-// Deprecated: Use
-// client.ZAddArgs(ctx, ZAddArgs{
-// XX: true,
-// Ch: true,
-// Members: []Z,
-// })
-// remove in v9.
-func (c cmdable) ZAddXXCh(ctx context.Context, key string, members ...*Z) *IntCmd {
- return c.zAdd(ctx, key, ZAddArgs{
- XX: true,
- Ch: true,
- }, members...)
-}
-
-// ZIncr Redis `ZADD key INCR score member` command.
-// Deprecated: Use
-// client.ZAddArgsIncr(ctx, ZAddArgs{
-// Members: []Z,
-// })
-// remove in v9.
-func (c cmdable) ZIncr(ctx context.Context, key string, member *Z) *FloatCmd {
- return c.ZAddArgsIncr(ctx, key, ZAddArgs{
- Members: []Z{*member},
- })
-}
-
-// ZIncrNX Redis `ZADD key NX INCR score member` command.
-// Deprecated: Use
-// client.ZAddArgsIncr(ctx, ZAddArgs{
-// NX: true,
-// Members: []Z,
-// })
-// remove in v9.
-func (c cmdable) ZIncrNX(ctx context.Context, key string, member *Z) *FloatCmd {
- return c.ZAddArgsIncr(ctx, key, ZAddArgs{
- NX: true,
- Members: []Z{*member},
- })
-}
-
-// ZIncrXX Redis `ZADD key XX INCR score member` command.
-// Deprecated: Use
-// client.ZAddArgsIncr(ctx, ZAddArgs{
-// XX: true,
-// Members: []Z,
-// })
-// remove in v9.
-func (c cmdable) ZIncrXX(ctx context.Context, key string, member *Z) *FloatCmd {
- return c.ZAddArgsIncr(ctx, key, ZAddArgs{
- XX: true,
- Members: []Z{*member},
- })
-}
-
-func (c cmdable) ZCard(ctx context.Context, key string) *IntCmd {
- cmd := NewIntCmd(ctx, "zcard", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZCount(ctx context.Context, key, min, max string) *IntCmd {
- cmd := NewIntCmd(ctx, "zcount", key, min, max)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZLexCount(ctx context.Context, key, min, max string) *IntCmd {
- cmd := NewIntCmd(ctx, "zlexcount", key, min, max)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZIncrBy(ctx context.Context, key string, increment float64, member string) *FloatCmd {
- cmd := NewFloatCmd(ctx, "zincrby", key, increment, member)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZInterStore(ctx context.Context, destination string, store *ZStore) *IntCmd {
- args := make([]interface{}, 0, 3+store.len())
- args = append(args, "zinterstore", destination, len(store.Keys))
- args = store.appendArgs(args)
- cmd := NewIntCmd(ctx, args...)
- cmd.SetFirstKeyPos(3)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZInter(ctx context.Context, store *ZStore) *StringSliceCmd {
- args := make([]interface{}, 0, 2+store.len())
- args = append(args, "zinter", len(store.Keys))
- args = store.appendArgs(args)
- cmd := NewStringSliceCmd(ctx, args...)
- cmd.SetFirstKeyPos(2)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZInterWithScores(ctx context.Context, store *ZStore) *ZSliceCmd {
- args := make([]interface{}, 0, 3+store.len())
- args = append(args, "zinter", len(store.Keys))
- args = store.appendArgs(args)
- args = append(args, "withscores")
- cmd := NewZSliceCmd(ctx, args...)
- cmd.SetFirstKeyPos(2)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZMScore(ctx context.Context, key string, members ...string) *FloatSliceCmd {
- args := make([]interface{}, 2+len(members))
- args[0] = "zmscore"
- args[1] = key
- for i, member := range members {
- args[2+i] = member
- }
- cmd := NewFloatSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZPopMax(ctx context.Context, key string, count ...int64) *ZSliceCmd {
- args := []interface{}{
- "zpopmax",
- key,
- }
-
- switch len(count) {
- case 0:
- break
- case 1:
- args = append(args, count[0])
- default:
- panic("too many arguments")
- }
-
- cmd := NewZSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZPopMin(ctx context.Context, key string, count ...int64) *ZSliceCmd {
- args := []interface{}{
- "zpopmin",
- key,
- }
-
- switch len(count) {
- case 0:
- break
- case 1:
- args = append(args, count[0])
- default:
- panic("too many arguments")
- }
-
- cmd := NewZSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// ZRangeArgs is all the options of the ZRange command.
-// In version> 6.2.0, you can replace the(cmd):
-// ZREVRANGE,
-// ZRANGEBYSCORE,
-// ZREVRANGEBYSCORE,
-// ZRANGEBYLEX,
-// ZREVRANGEBYLEX.
-// Please pay attention to your redis-server version.
-//
-// Rev, ByScore, ByLex and Offset+Count options require redis-server 6.2.0 and higher.
-type ZRangeArgs struct {
- Key string
-
- // When the ByScore option is provided, the open interval(exclusive) can be set.
- // By default, the score intervals specified by and are closed (inclusive).
- // It is similar to the deprecated(6.2.0+) ZRangeByScore command.
- // For example:
- // ZRangeArgs{
- // Key: "example-key",
- // Start: "(3",
- // Stop: 8,
- // ByScore: true,
- // }
- // cmd: "ZRange example-key (3 8 ByScore" (3 < score <= 8).
- //
- // For the ByLex option, it is similar to the deprecated(6.2.0+) ZRangeByLex command.
- // You can set the and options as follows:
- // ZRangeArgs{
- // Key: "example-key",
- // Start: "[abc",
- // Stop: "(def",
- // ByLex: true,
- // }
- // cmd: "ZRange example-key [abc (def ByLex"
- //
- // For normal cases (ByScore==false && ByLex==false), and should be set to the index range (int).
- // You can read the documentation for more information: https://redis.io/commands/zrange
- Start interface{}
- Stop interface{}
-
- // The ByScore and ByLex options are mutually exclusive.
- ByScore bool
- ByLex bool
-
- Rev bool
-
- // limit offset count.
- Offset int64
- Count int64
-}
-
-func (z ZRangeArgs) appendArgs(args []interface{}) []interface{} {
- // For Rev+ByScore/ByLex, we need to adjust the position of and .
- if z.Rev && (z.ByScore || z.ByLex) {
- args = append(args, z.Key, z.Stop, z.Start)
- } else {
- args = append(args, z.Key, z.Start, z.Stop)
- }
-
- if z.ByScore {
- args = append(args, "byscore")
- } else if z.ByLex {
- args = append(args, "bylex")
- }
- if z.Rev {
- args = append(args, "rev")
- }
- if z.Offset != 0 || z.Count != 0 {
- args = append(args, "limit", z.Offset, z.Count)
- }
- return args
-}
-
-func (c cmdable) ZRangeArgs(ctx context.Context, z ZRangeArgs) *StringSliceCmd {
- args := make([]interface{}, 0, 9)
- args = append(args, "zrange")
- args = z.appendArgs(args)
- cmd := NewStringSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZRangeArgsWithScores(ctx context.Context, z ZRangeArgs) *ZSliceCmd {
- args := make([]interface{}, 0, 10)
- args = append(args, "zrange")
- args = z.appendArgs(args)
- args = append(args, "withscores")
- cmd := NewZSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd {
- return c.ZRangeArgs(ctx, ZRangeArgs{
- Key: key,
- Start: start,
- Stop: stop,
- })
-}
-
-func (c cmdable) ZRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd {
- return c.ZRangeArgsWithScores(ctx, ZRangeArgs{
- Key: key,
- Start: start,
- Stop: stop,
- })
-}
-
-type ZRangeBy struct {
- Min, Max string
- Offset, Count int64
-}
-
-func (c cmdable) zRangeBy(ctx context.Context, zcmd, key string, opt *ZRangeBy, withScores bool) *StringSliceCmd {
- args := []interface{}{zcmd, key, opt.Min, opt.Max}
- if withScores {
- args = append(args, "withscores")
- }
- if opt.Offset != 0 || opt.Count != 0 {
- args = append(
- args,
- "limit",
- opt.Offset,
- opt.Count,
- )
- }
- cmd := NewStringSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd {
- return c.zRangeBy(ctx, "zrangebyscore", key, opt, false)
-}
-
-func (c cmdable) ZRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd {
- return c.zRangeBy(ctx, "zrangebylex", key, opt, false)
-}
-
-func (c cmdable) ZRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd {
- args := []interface{}{"zrangebyscore", key, opt.Min, opt.Max, "withscores"}
- if opt.Offset != 0 || opt.Count != 0 {
- args = append(
- args,
- "limit",
- opt.Offset,
- opt.Count,
- )
- }
- cmd := NewZSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZRangeStore(ctx context.Context, dst string, z ZRangeArgs) *IntCmd {
- args := make([]interface{}, 0, 10)
- args = append(args, "zrangestore", dst)
- args = z.appendArgs(args)
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZRank(ctx context.Context, key, member string) *IntCmd {
- cmd := NewIntCmd(ctx, "zrank", key, member)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZRem(ctx context.Context, key string, members ...interface{}) *IntCmd {
- args := make([]interface{}, 2, 2+len(members))
- args[0] = "zrem"
- args[1] = key
- args = appendArgs(args, members)
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZRemRangeByRank(ctx context.Context, key string, start, stop int64) *IntCmd {
- cmd := NewIntCmd(
- ctx,
- "zremrangebyrank",
- key,
- start,
- stop,
- )
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZRemRangeByScore(ctx context.Context, key, min, max string) *IntCmd {
- cmd := NewIntCmd(ctx, "zremrangebyscore", key, min, max)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZRemRangeByLex(ctx context.Context, key, min, max string) *IntCmd {
- cmd := NewIntCmd(ctx, "zremrangebylex", key, min, max)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZRevRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd {
- cmd := NewStringSliceCmd(ctx, "zrevrange", key, start, stop)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZRevRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd {
- cmd := NewZSliceCmd(ctx, "zrevrange", key, start, stop, "withscores")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) zRevRangeBy(ctx context.Context, zcmd, key string, opt *ZRangeBy) *StringSliceCmd {
- args := []interface{}{zcmd, key, opt.Max, opt.Min}
- if opt.Offset != 0 || opt.Count != 0 {
- args = append(
- args,
- "limit",
- opt.Offset,
- opt.Count,
- )
- }
- cmd := NewStringSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZRevRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd {
- return c.zRevRangeBy(ctx, "zrevrangebyscore", key, opt)
-}
-
-func (c cmdable) ZRevRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd {
- return c.zRevRangeBy(ctx, "zrevrangebylex", key, opt)
-}
-
-func (c cmdable) ZRevRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd {
- args := []interface{}{"zrevrangebyscore", key, opt.Max, opt.Min, "withscores"}
- if opt.Offset != 0 || opt.Count != 0 {
- args = append(
- args,
- "limit",
- opt.Offset,
- opt.Count,
- )
- }
- cmd := NewZSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZRevRank(ctx context.Context, key, member string) *IntCmd {
- cmd := NewIntCmd(ctx, "zrevrank", key, member)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZScore(ctx context.Context, key, member string) *FloatCmd {
- cmd := NewFloatCmd(ctx, "zscore", key, member)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZUnion(ctx context.Context, store ZStore) *StringSliceCmd {
- args := make([]interface{}, 0, 2+store.len())
- args = append(args, "zunion", len(store.Keys))
- args = store.appendArgs(args)
- cmd := NewStringSliceCmd(ctx, args...)
- cmd.SetFirstKeyPos(2)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZUnionWithScores(ctx context.Context, store ZStore) *ZSliceCmd {
- args := make([]interface{}, 0, 3+store.len())
- args = append(args, "zunion", len(store.Keys))
- args = store.appendArgs(args)
- args = append(args, "withscores")
- cmd := NewZSliceCmd(ctx, args...)
- cmd.SetFirstKeyPos(2)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZUnionStore(ctx context.Context, dest string, store *ZStore) *IntCmd {
- args := make([]interface{}, 0, 3+store.len())
- args = append(args, "zunionstore", dest, len(store.Keys))
- args = store.appendArgs(args)
- cmd := NewIntCmd(ctx, args...)
- cmd.SetFirstKeyPos(3)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// ZRandMember redis-server version >= 6.2.0.
-func (c cmdable) ZRandMember(ctx context.Context, key string, count int, withScores bool) *StringSliceCmd {
- args := make([]interface{}, 0, 4)
-
- // Although count=0 is meaningless, redis accepts count=0.
- args = append(args, "zrandmember", key, count)
- if withScores {
- args = append(args, "withscores")
- }
-
- cmd := NewStringSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// ZDiff redis-server version >= 6.2.0.
-func (c cmdable) ZDiff(ctx context.Context, keys ...string) *StringSliceCmd {
- args := make([]interface{}, 2+len(keys))
- args[0] = "zdiff"
- args[1] = len(keys)
- for i, key := range keys {
- args[i+2] = key
- }
-
- cmd := NewStringSliceCmd(ctx, args...)
- cmd.SetFirstKeyPos(2)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// ZDiffWithScores redis-server version >= 6.2.0.
-func (c cmdable) ZDiffWithScores(ctx context.Context, keys ...string) *ZSliceCmd {
- args := make([]interface{}, 3+len(keys))
- args[0] = "zdiff"
- args[1] = len(keys)
- for i, key := range keys {
- args[i+2] = key
- }
- args[len(keys)+2] = "withscores"
-
- cmd := NewZSliceCmd(ctx, args...)
- cmd.SetFirstKeyPos(2)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// ZDiffStore redis-server version >=6.2.0.
-func (c cmdable) ZDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd {
- args := make([]interface{}, 0, 3+len(keys))
- args = append(args, "zdiffstore", destination, len(keys))
- for _, key := range keys {
- args = append(args, key)
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-//------------------------------------------------------------------------------
-
-func (c cmdable) PFAdd(ctx context.Context, key string, els ...interface{}) *IntCmd {
- args := make([]interface{}, 2, 2+len(els))
- args[0] = "pfadd"
- args[1] = key
- args = appendArgs(args, els)
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) PFCount(ctx context.Context, keys ...string) *IntCmd {
- args := make([]interface{}, 1+len(keys))
- args[0] = "pfcount"
- for i, key := range keys {
- args[1+i] = key
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) PFMerge(ctx context.Context, dest string, keys ...string) *StatusCmd {
- args := make([]interface{}, 2+len(keys))
- args[0] = "pfmerge"
- args[1] = dest
- for i, key := range keys {
- args[2+i] = key
- }
- cmd := NewStatusCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-//------------------------------------------------------------------------------
-
-func (c cmdable) BgRewriteAOF(ctx context.Context) *StatusCmd {
- cmd := NewStatusCmd(ctx, "bgrewriteaof")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) BgSave(ctx context.Context) *StatusCmd {
- cmd := NewStatusCmd(ctx, "bgsave")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClientKill(ctx context.Context, ipPort string) *StatusCmd {
- cmd := NewStatusCmd(ctx, "client", "kill", ipPort)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// ClientKillByFilter is new style syntax, while the ClientKill is old
-//
-// CLIENT KILL [value] ... [value]
-func (c cmdable) ClientKillByFilter(ctx context.Context, keys ...string) *IntCmd {
- args := make([]interface{}, 2+len(keys))
- args[0] = "client"
- args[1] = "kill"
- for i, key := range keys {
- args[2+i] = key
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClientList(ctx context.Context) *StringCmd {
- cmd := NewStringCmd(ctx, "client", "list")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClientPause(ctx context.Context, dur time.Duration) *BoolCmd {
- cmd := NewBoolCmd(ctx, "client", "pause", formatMs(ctx, dur))
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClientID(ctx context.Context) *IntCmd {
- cmd := NewIntCmd(ctx, "client", "id")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClientUnblock(ctx context.Context, id int64) *IntCmd {
- cmd := NewIntCmd(ctx, "client", "unblock", id)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClientUnblockWithError(ctx context.Context, id int64) *IntCmd {
- cmd := NewIntCmd(ctx, "client", "unblock", id, "error")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ConfigGet(ctx context.Context, parameter string) *SliceCmd {
- cmd := NewSliceCmd(ctx, "config", "get", parameter)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ConfigResetStat(ctx context.Context) *StatusCmd {
- cmd := NewStatusCmd(ctx, "config", "resetstat")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ConfigSet(ctx context.Context, parameter, value string) *StatusCmd {
- cmd := NewStatusCmd(ctx, "config", "set", parameter, value)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ConfigRewrite(ctx context.Context) *StatusCmd {
- cmd := NewStatusCmd(ctx, "config", "rewrite")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) DBSize(ctx context.Context) *IntCmd {
- cmd := NewIntCmd(ctx, "dbsize")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) FlushAll(ctx context.Context) *StatusCmd {
- cmd := NewStatusCmd(ctx, "flushall")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) FlushAllAsync(ctx context.Context) *StatusCmd {
- cmd := NewStatusCmd(ctx, "flushall", "async")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) FlushDB(ctx context.Context) *StatusCmd {
- cmd := NewStatusCmd(ctx, "flushdb")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) FlushDBAsync(ctx context.Context) *StatusCmd {
- cmd := NewStatusCmd(ctx, "flushdb", "async")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Info(ctx context.Context, section ...string) *StringCmd {
- args := []interface{}{"info"}
- if len(section) > 0 {
- args = append(args, section[0])
- }
- cmd := NewStringCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) LastSave(ctx context.Context) *IntCmd {
- cmd := NewIntCmd(ctx, "lastsave")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Save(ctx context.Context) *StatusCmd {
- cmd := NewStatusCmd(ctx, "save")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) shutdown(ctx context.Context, modifier string) *StatusCmd {
- var args []interface{}
- if modifier == "" {
- args = []interface{}{"shutdown"}
- } else {
- args = []interface{}{"shutdown", modifier}
- }
- cmd := NewStatusCmd(ctx, args...)
- _ = c(ctx, cmd)
- if err := cmd.Err(); err != nil {
- if err == io.EOF {
- // Server quit as expected.
- cmd.err = nil
- }
- } else {
- // Server did not quit. String reply contains the reason.
- cmd.err = errors.New(cmd.val)
- cmd.val = ""
- }
- return cmd
-}
-
-func (c cmdable) Shutdown(ctx context.Context) *StatusCmd {
- return c.shutdown(ctx, "")
-}
-
-func (c cmdable) ShutdownSave(ctx context.Context) *StatusCmd {
- return c.shutdown(ctx, "save")
-}
-
-func (c cmdable) ShutdownNoSave(ctx context.Context) *StatusCmd {
- return c.shutdown(ctx, "nosave")
-}
-
-func (c cmdable) SlaveOf(ctx context.Context, host, port string) *StatusCmd {
- cmd := NewStatusCmd(ctx, "slaveof", host, port)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) SlowLogGet(ctx context.Context, num int64) *SlowLogCmd {
- cmd := NewSlowLogCmd(context.Background(), "slowlog", "get", num)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Sync(_ context.Context) {
- panic("not implemented")
-}
-
-func (c cmdable) Time(ctx context.Context) *TimeCmd {
- cmd := NewTimeCmd(ctx, "time")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) DebugObject(ctx context.Context, key string) *StringCmd {
- cmd := NewStringCmd(ctx, "debug", "object", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ReadOnly(ctx context.Context) *StatusCmd {
- cmd := NewStatusCmd(ctx, "readonly")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ReadWrite(ctx context.Context) *StatusCmd {
- cmd := NewStatusCmd(ctx, "readwrite")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) MemoryUsage(ctx context.Context, key string, samples ...int) *IntCmd {
- args := []interface{}{"memory", "usage", key}
- if len(samples) > 0 {
- if len(samples) != 1 {
- panic("MemoryUsage expects single sample count")
- }
- args = append(args, "SAMPLES", samples[0])
- }
- cmd := NewIntCmd(ctx, args...)
- cmd.SetFirstKeyPos(2)
- _ = c(ctx, cmd)
- return cmd
-}
-
-//------------------------------------------------------------------------------
-
-func (c cmdable) Eval(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd {
- cmdArgs := make([]interface{}, 3+len(keys), 3+len(keys)+len(args))
- cmdArgs[0] = "eval"
- cmdArgs[1] = script
- cmdArgs[2] = len(keys)
- for i, key := range keys {
- cmdArgs[3+i] = key
- }
- cmdArgs = appendArgs(cmdArgs, args)
- cmd := NewCmd(ctx, cmdArgs...)
- cmd.SetFirstKeyPos(3)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) EvalSha(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd {
- cmdArgs := make([]interface{}, 3+len(keys), 3+len(keys)+len(args))
- cmdArgs[0] = "evalsha"
- cmdArgs[1] = sha1
- cmdArgs[2] = len(keys)
- for i, key := range keys {
- cmdArgs[3+i] = key
- }
- cmdArgs = appendArgs(cmdArgs, args)
- cmd := NewCmd(ctx, cmdArgs...)
- cmd.SetFirstKeyPos(3)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd {
- args := make([]interface{}, 2+len(hashes))
- args[0] = "script"
- args[1] = "exists"
- for i, hash := range hashes {
- args[2+i] = hash
- }
- cmd := NewBoolSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ScriptFlush(ctx context.Context) *StatusCmd {
- cmd := NewStatusCmd(ctx, "script", "flush")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ScriptKill(ctx context.Context) *StatusCmd {
- cmd := NewStatusCmd(ctx, "script", "kill")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ScriptLoad(ctx context.Context, script string) *StringCmd {
- cmd := NewStringCmd(ctx, "script", "load", script)
- _ = c(ctx, cmd)
- return cmd
-}
-
-//------------------------------------------------------------------------------
-
-// Publish posts the message to the channel.
-func (c cmdable) Publish(ctx context.Context, channel string, message interface{}) *IntCmd {
- cmd := NewIntCmd(ctx, "publish", channel, message)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) PubSubChannels(ctx context.Context, pattern string) *StringSliceCmd {
- args := []interface{}{"pubsub", "channels"}
- if pattern != "*" {
- args = append(args, pattern)
- }
- cmd := NewStringSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) PubSubNumSub(ctx context.Context, channels ...string) *StringIntMapCmd {
- args := make([]interface{}, 2+len(channels))
- args[0] = "pubsub"
- args[1] = "numsub"
- for i, channel := range channels {
- args[2+i] = channel
- }
- cmd := NewStringIntMapCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) PubSubNumPat(ctx context.Context) *IntCmd {
- cmd := NewIntCmd(ctx, "pubsub", "numpat")
- _ = c(ctx, cmd)
- return cmd
-}
-
-//------------------------------------------------------------------------------
-
-func (c cmdable) ClusterSlots(ctx context.Context) *ClusterSlotsCmd {
- cmd := NewClusterSlotsCmd(ctx, "cluster", "slots")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClusterNodes(ctx context.Context) *StringCmd {
- cmd := NewStringCmd(ctx, "cluster", "nodes")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClusterMeet(ctx context.Context, host, port string) *StatusCmd {
- cmd := NewStatusCmd(ctx, "cluster", "meet", host, port)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClusterForget(ctx context.Context, nodeID string) *StatusCmd {
- cmd := NewStatusCmd(ctx, "cluster", "forget", nodeID)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClusterReplicate(ctx context.Context, nodeID string) *StatusCmd {
- cmd := NewStatusCmd(ctx, "cluster", "replicate", nodeID)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClusterResetSoft(ctx context.Context) *StatusCmd {
- cmd := NewStatusCmd(ctx, "cluster", "reset", "soft")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClusterResetHard(ctx context.Context) *StatusCmd {
- cmd := NewStatusCmd(ctx, "cluster", "reset", "hard")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClusterInfo(ctx context.Context) *StringCmd {
- cmd := NewStringCmd(ctx, "cluster", "info")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClusterKeySlot(ctx context.Context, key string) *IntCmd {
- cmd := NewIntCmd(ctx, "cluster", "keyslot", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClusterGetKeysInSlot(ctx context.Context, slot int, count int) *StringSliceCmd {
- cmd := NewStringSliceCmd(ctx, "cluster", "getkeysinslot", slot, count)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClusterCountFailureReports(ctx context.Context, nodeID string) *IntCmd {
- cmd := NewIntCmd(ctx, "cluster", "count-failure-reports", nodeID)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClusterCountKeysInSlot(ctx context.Context, slot int) *IntCmd {
- cmd := NewIntCmd(ctx, "cluster", "countkeysinslot", slot)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClusterDelSlots(ctx context.Context, slots ...int) *StatusCmd {
- args := make([]interface{}, 2+len(slots))
- args[0] = "cluster"
- args[1] = "delslots"
- for i, slot := range slots {
- args[2+i] = slot
- }
- cmd := NewStatusCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClusterDelSlotsRange(ctx context.Context, min, max int) *StatusCmd {
- size := max - min + 1
- slots := make([]int, size)
- for i := 0; i < size; i++ {
- slots[i] = min + i
- }
- return c.ClusterDelSlots(ctx, slots...)
-}
-
-func (c cmdable) ClusterSaveConfig(ctx context.Context) *StatusCmd {
- cmd := NewStatusCmd(ctx, "cluster", "saveconfig")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClusterSlaves(ctx context.Context, nodeID string) *StringSliceCmd {
- cmd := NewStringSliceCmd(ctx, "cluster", "slaves", nodeID)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClusterFailover(ctx context.Context) *StatusCmd {
- cmd := NewStatusCmd(ctx, "cluster", "failover")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClusterAddSlots(ctx context.Context, slots ...int) *StatusCmd {
- args := make([]interface{}, 2+len(slots))
- args[0] = "cluster"
- args[1] = "addslots"
- for i, num := range slots {
- args[2+i] = num
- }
- cmd := NewStatusCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClusterAddSlotsRange(ctx context.Context, min, max int) *StatusCmd {
- size := max - min + 1
- slots := make([]int, size)
- for i := 0; i < size; i++ {
- slots[i] = min + i
- }
- return c.ClusterAddSlots(ctx, slots...)
-}
-
-//------------------------------------------------------------------------------
-
-func (c cmdable) GeoAdd(ctx context.Context, key string, geoLocation ...*GeoLocation) *IntCmd {
- args := make([]interface{}, 2+3*len(geoLocation))
- args[0] = "geoadd"
- args[1] = key
- for i, eachLoc := range geoLocation {
- args[2+3*i] = eachLoc.Longitude
- args[2+3*i+1] = eachLoc.Latitude
- args[2+3*i+2] = eachLoc.Name
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// GeoRadius is a read-only GEORADIUS_RO command.
-func (c cmdable) GeoRadius(
- ctx context.Context, key string, longitude, latitude float64, query *GeoRadiusQuery,
-) *GeoLocationCmd {
- cmd := NewGeoLocationCmd(ctx, query, "georadius_ro", key, longitude, latitude)
- if query.Store != "" || query.StoreDist != "" {
- cmd.SetErr(errors.New("GeoRadius does not support Store or StoreDist"))
- return cmd
- }
- _ = c(ctx, cmd)
- return cmd
-}
-
-// GeoRadiusStore is a writing GEORADIUS command.
-func (c cmdable) GeoRadiusStore(
- ctx context.Context, key string, longitude, latitude float64, query *GeoRadiusQuery,
-) *IntCmd {
- args := geoLocationArgs(query, "georadius", key, longitude, latitude)
- cmd := NewIntCmd(ctx, args...)
- if query.Store == "" && query.StoreDist == "" {
- cmd.SetErr(errors.New("GeoRadiusStore requires Store or StoreDist"))
- return cmd
- }
- _ = c(ctx, cmd)
- return cmd
-}
-
-// GeoRadiusByMember is a read-only GEORADIUSBYMEMBER_RO command.
-func (c cmdable) GeoRadiusByMember(
- ctx context.Context, key, member string, query *GeoRadiusQuery,
-) *GeoLocationCmd {
- cmd := NewGeoLocationCmd(ctx, query, "georadiusbymember_ro", key, member)
- if query.Store != "" || query.StoreDist != "" {
- cmd.SetErr(errors.New("GeoRadiusByMember does not support Store or StoreDist"))
- return cmd
- }
- _ = c(ctx, cmd)
- return cmd
-}
-
-// GeoRadiusByMemberStore is a writing GEORADIUSBYMEMBER command.
-func (c cmdable) GeoRadiusByMemberStore(
- ctx context.Context, key, member string, query *GeoRadiusQuery,
-) *IntCmd {
- args := geoLocationArgs(query, "georadiusbymember", key, member)
- cmd := NewIntCmd(ctx, args...)
- if query.Store == "" && query.StoreDist == "" {
- cmd.SetErr(errors.New("GeoRadiusByMemberStore requires Store or StoreDist"))
- return cmd
- }
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) GeoSearch(ctx context.Context, key string, q *GeoSearchQuery) *StringSliceCmd {
- args := make([]interface{}, 0, 13)
- args = append(args, "geosearch", key)
- args = geoSearchArgs(q, args)
- cmd := NewStringSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) GeoSearchLocation(
- ctx context.Context, key string, q *GeoSearchLocationQuery,
-) *GeoSearchLocationCmd {
- args := make([]interface{}, 0, 16)
- args = append(args, "geosearch", key)
- args = geoSearchLocationArgs(q, args)
- cmd := NewGeoSearchLocationCmd(ctx, q, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) GeoSearchStore(ctx context.Context, key, store string, q *GeoSearchStoreQuery) *IntCmd {
- args := make([]interface{}, 0, 15)
- args = append(args, "geosearchstore", store, key)
- args = geoSearchArgs(&q.GeoSearchQuery, args)
- if q.StoreDist {
- args = append(args, "storedist")
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) GeoDist(
- ctx context.Context, key string, member1, member2, unit string,
-) *FloatCmd {
- if unit == "" {
- unit = "km"
- }
- cmd := NewFloatCmd(ctx, "geodist", key, member1, member2, unit)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) GeoHash(ctx context.Context, key string, members ...string) *StringSliceCmd {
- args := make([]interface{}, 2+len(members))
- args[0] = "geohash"
- args[1] = key
- for i, member := range members {
- args[2+i] = member
- }
- cmd := NewStringSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) GeoPos(ctx context.Context, key string, members ...string) *GeoPosCmd {
- args := make([]interface{}, 2+len(members))
- args[0] = "geopos"
- args[1] = key
- for i, member := range members {
- args[2+i] = member
- }
- cmd := NewGeoPosCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
diff --git a/vendor/github.com/go-redis/redis/v8/doc.go b/vendor/github.com/go-redis/redis/v8/doc.go
deleted file mode 100644
index 55262533..00000000
--- a/vendor/github.com/go-redis/redis/v8/doc.go
+++ /dev/null
@@ -1,4 +0,0 @@
-/*
-Package redis implements a Redis client.
-*/
-package redis
diff --git a/vendor/github.com/go-redis/redis/v8/error.go b/vendor/github.com/go-redis/redis/v8/error.go
deleted file mode 100644
index 521594bb..00000000
--- a/vendor/github.com/go-redis/redis/v8/error.go
+++ /dev/null
@@ -1,144 +0,0 @@
-package redis
-
-import (
- "context"
- "io"
- "net"
- "strings"
-
- "github.com/go-redis/redis/v8/internal/pool"
- "github.com/go-redis/redis/v8/internal/proto"
-)
-
-// ErrClosed performs any operation on the closed client will return this error.
-var ErrClosed = pool.ErrClosed
-
-type Error interface {
- error
-
- // RedisError is a no-op function but
- // serves to distinguish types that are Redis
- // errors from ordinary errors: a type is a
- // Redis error if it has a RedisError method.
- RedisError()
-}
-
-var _ Error = proto.RedisError("")
-
-func shouldRetry(err error, retryTimeout bool) bool {
- switch err {
- case io.EOF, io.ErrUnexpectedEOF:
- return true
- case nil, context.Canceled, context.DeadlineExceeded:
- return false
- }
-
- if v, ok := err.(timeoutError); ok {
- if v.Timeout() {
- return retryTimeout
- }
- return true
- }
-
- s := err.Error()
- if s == "ERR max number of clients reached" {
- return true
- }
- if strings.HasPrefix(s, "LOADING ") {
- return true
- }
- if strings.HasPrefix(s, "READONLY ") {
- return true
- }
- if strings.HasPrefix(s, "CLUSTERDOWN ") {
- return true
- }
- if strings.HasPrefix(s, "TRYAGAIN ") {
- return true
- }
-
- return false
-}
-
-func isRedisError(err error) bool {
- _, ok := err.(proto.RedisError)
- return ok
-}
-
-func isBadConn(err error, allowTimeout bool, addr string) bool {
- switch err {
- case nil:
- return false
- case context.Canceled, context.DeadlineExceeded:
- return true
- }
-
- if isRedisError(err) {
- switch {
- case isReadOnlyError(err):
- // Close connections in read only state in case domain addr is used
- // and domain resolves to a different Redis Server. See #790.
- return true
- case isMovedSameConnAddr(err, addr):
- // Close connections when we are asked to move to the same addr
- // of the connection. Force a DNS resolution when all connections
- // of the pool are recycled
- return true
- default:
- return false
- }
- }
-
- if allowTimeout {
- if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
- return !netErr.Temporary()
- }
- }
-
- return true
-}
-
-func isMovedError(err error) (moved bool, ask bool, addr string) {
- if !isRedisError(err) {
- return
- }
-
- s := err.Error()
- switch {
- case strings.HasPrefix(s, "MOVED "):
- moved = true
- case strings.HasPrefix(s, "ASK "):
- ask = true
- default:
- return
- }
-
- ind := strings.LastIndex(s, " ")
- if ind == -1 {
- return false, false, ""
- }
- addr = s[ind+1:]
- return
-}
-
-func isLoadingError(err error) bool {
- return strings.HasPrefix(err.Error(), "LOADING ")
-}
-
-func isReadOnlyError(err error) bool {
- return strings.HasPrefix(err.Error(), "READONLY ")
-}
-
-func isMovedSameConnAddr(err error, addr string) bool {
- redisError := err.Error()
- if !strings.HasPrefix(redisError, "MOVED ") {
- return false
- }
- return strings.HasSuffix(redisError, " "+addr)
-}
-
-//------------------------------------------------------------------------------
-
-type timeoutError interface {
- Timeout() bool
-}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/arg.go b/vendor/github.com/go-redis/redis/v8/internal/arg.go
deleted file mode 100644
index b97fa0d6..00000000
--- a/vendor/github.com/go-redis/redis/v8/internal/arg.go
+++ /dev/null
@@ -1,56 +0,0 @@
-package internal
-
-import (
- "fmt"
- "strconv"
- "time"
-)
-
-func AppendArg(b []byte, v interface{}) []byte {
- switch v := v.(type) {
- case nil:
- return append(b, ""...)
- case string:
- return appendUTF8String(b, Bytes(v))
- case []byte:
- return appendUTF8String(b, v)
- case int:
- return strconv.AppendInt(b, int64(v), 10)
- case int8:
- return strconv.AppendInt(b, int64(v), 10)
- case int16:
- return strconv.AppendInt(b, int64(v), 10)
- case int32:
- return strconv.AppendInt(b, int64(v), 10)
- case int64:
- return strconv.AppendInt(b, v, 10)
- case uint:
- return strconv.AppendUint(b, uint64(v), 10)
- case uint8:
- return strconv.AppendUint(b, uint64(v), 10)
- case uint16:
- return strconv.AppendUint(b, uint64(v), 10)
- case uint32:
- return strconv.AppendUint(b, uint64(v), 10)
- case uint64:
- return strconv.AppendUint(b, v, 10)
- case float32:
- return strconv.AppendFloat(b, float64(v), 'f', -1, 64)
- case float64:
- return strconv.AppendFloat(b, v, 'f', -1, 64)
- case bool:
- if v {
- return append(b, "true"...)
- }
- return append(b, "false"...)
- case time.Time:
- return v.AppendFormat(b, time.RFC3339Nano)
- default:
- return append(b, fmt.Sprint(v)...)
- }
-}
-
-func appendUTF8String(dst []byte, src []byte) []byte {
- dst = append(dst, src...)
- return dst
-}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/hashtag/hashtag.go b/vendor/github.com/go-redis/redis/v8/internal/hashtag/hashtag.go
deleted file mode 100644
index b3a4f211..00000000
--- a/vendor/github.com/go-redis/redis/v8/internal/hashtag/hashtag.go
+++ /dev/null
@@ -1,78 +0,0 @@
-package hashtag
-
-import (
- "strings"
-
- "github.com/go-redis/redis/v8/internal/rand"
-)
-
-const slotNumber = 16384
-
-// CRC16 implementation according to CCITT standards.
-// Copyright 2001-2010 Georges Menie (www.menie.org)
-// Copyright 2013 The Go Authors. All rights reserved.
-// http://redis.io/topics/cluster-spec#appendix-a-crc16-reference-implementation-in-ansi-c
-var crc16tab = [256]uint16{
- 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7,
- 0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef,
- 0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6,
- 0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de,
- 0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485,
- 0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d,
- 0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4,
- 0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc,
- 0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823,
- 0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b,
- 0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12,
- 0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a,
- 0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41,
- 0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49,
- 0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70,
- 0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78,
- 0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f,
- 0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067,
- 0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e,
- 0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256,
- 0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d,
- 0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405,
- 0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c,
- 0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634,
- 0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab,
- 0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3,
- 0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a,
- 0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92,
- 0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9,
- 0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1,
- 0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8,
- 0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0,
-}
-
-func Key(key string) string {
- if s := strings.IndexByte(key, '{'); s > -1 {
- if e := strings.IndexByte(key[s+1:], '}'); e > 0 {
- return key[s+1 : s+e+1]
- }
- }
- return key
-}
-
-func RandomSlot() int {
- return rand.Intn(slotNumber)
-}
-
-// Slot returns a consistent slot number between 0 and 16383
-// for any given string key.
-func Slot(key string) int {
- if key == "" {
- return RandomSlot()
- }
- key = Key(key)
- return int(crc16sum(key)) % slotNumber
-}
-
-func crc16sum(key string) (crc uint16) {
- for i := 0; i < len(key); i++ {
- crc = (crc << 8) ^ crc16tab[(byte(crc>>8)^key[i])&0x00ff]
- }
- return
-}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/hscan/hscan.go b/vendor/github.com/go-redis/redis/v8/internal/hscan/hscan.go
deleted file mode 100644
index 852c8bd5..00000000
--- a/vendor/github.com/go-redis/redis/v8/internal/hscan/hscan.go
+++ /dev/null
@@ -1,201 +0,0 @@
-package hscan
-
-import (
- "errors"
- "fmt"
- "reflect"
- "strconv"
-)
-
-// decoderFunc represents decoding functions for default built-in types.
-type decoderFunc func(reflect.Value, string) error
-
-var (
- // List of built-in decoders indexed by their numeric constant values (eg: reflect.Bool = 1).
- decoders = []decoderFunc{
- reflect.Bool: decodeBool,
- reflect.Int: decodeInt,
- reflect.Int8: decodeInt8,
- reflect.Int16: decodeInt16,
- reflect.Int32: decodeInt32,
- reflect.Int64: decodeInt64,
- reflect.Uint: decodeUint,
- reflect.Uint8: decodeUint8,
- reflect.Uint16: decodeUint16,
- reflect.Uint32: decodeUint32,
- reflect.Uint64: decodeUint64,
- reflect.Float32: decodeFloat32,
- reflect.Float64: decodeFloat64,
- reflect.Complex64: decodeUnsupported,
- reflect.Complex128: decodeUnsupported,
- reflect.Array: decodeUnsupported,
- reflect.Chan: decodeUnsupported,
- reflect.Func: decodeUnsupported,
- reflect.Interface: decodeUnsupported,
- reflect.Map: decodeUnsupported,
- reflect.Ptr: decodeUnsupported,
- reflect.Slice: decodeSlice,
- reflect.String: decodeString,
- reflect.Struct: decodeUnsupported,
- reflect.UnsafePointer: decodeUnsupported,
- }
-
- // Global map of struct field specs that is populated once for every new
- // struct type that is scanned. This caches the field types and the corresponding
- // decoder functions to avoid iterating through struct fields on subsequent scans.
- globalStructMap = newStructMap()
-)
-
-func Struct(dst interface{}) (StructValue, error) {
- v := reflect.ValueOf(dst)
-
- // The destination to scan into should be a struct pointer.
- if v.Kind() != reflect.Ptr || v.IsNil() {
- return StructValue{}, fmt.Errorf("redis.Scan(non-pointer %T)", dst)
- }
-
- v = v.Elem()
- if v.Kind() != reflect.Struct {
- return StructValue{}, fmt.Errorf("redis.Scan(non-struct %T)", dst)
- }
-
- return StructValue{
- spec: globalStructMap.get(v.Type()),
- value: v,
- }, nil
-}
-
-// Scan scans the results from a key-value Redis map result set to a destination struct.
-// The Redis keys are matched to the struct's field with the `redis` tag.
-func Scan(dst interface{}, keys []interface{}, vals []interface{}) error {
- if len(keys) != len(vals) {
- return errors.New("args should have the same number of keys and vals")
- }
-
- strct, err := Struct(dst)
- if err != nil {
- return err
- }
-
- // Iterate through the (key, value) sequence.
- for i := 0; i < len(vals); i++ {
- key, ok := keys[i].(string)
- if !ok {
- continue
- }
-
- val, ok := vals[i].(string)
- if !ok {
- continue
- }
-
- if err := strct.Scan(key, val); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func decodeBool(f reflect.Value, s string) error {
- b, err := strconv.ParseBool(s)
- if err != nil {
- return err
- }
- f.SetBool(b)
- return nil
-}
-
-func decodeInt8(f reflect.Value, s string) error {
- return decodeNumber(f, s, 8)
-}
-
-func decodeInt16(f reflect.Value, s string) error {
- return decodeNumber(f, s, 16)
-}
-
-func decodeInt32(f reflect.Value, s string) error {
- return decodeNumber(f, s, 32)
-}
-
-func decodeInt64(f reflect.Value, s string) error {
- return decodeNumber(f, s, 64)
-}
-
-func decodeInt(f reflect.Value, s string) error {
- return decodeNumber(f, s, 0)
-}
-
-func decodeNumber(f reflect.Value, s string, bitSize int) error {
- v, err := strconv.ParseInt(s, 10, bitSize)
- if err != nil {
- return err
- }
- f.SetInt(v)
- return nil
-}
-
-func decodeUint8(f reflect.Value, s string) error {
- return decodeUnsignedNumber(f, s, 8)
-}
-
-func decodeUint16(f reflect.Value, s string) error {
- return decodeUnsignedNumber(f, s, 16)
-}
-
-func decodeUint32(f reflect.Value, s string) error {
- return decodeUnsignedNumber(f, s, 32)
-}
-
-func decodeUint64(f reflect.Value, s string) error {
- return decodeUnsignedNumber(f, s, 64)
-}
-
-func decodeUint(f reflect.Value, s string) error {
- return decodeUnsignedNumber(f, s, 0)
-}
-
-func decodeUnsignedNumber(f reflect.Value, s string, bitSize int) error {
- v, err := strconv.ParseUint(s, 10, bitSize)
- if err != nil {
- return err
- }
- f.SetUint(v)
- return nil
-}
-
-func decodeFloat32(f reflect.Value, s string) error {
- v, err := strconv.ParseFloat(s, 32)
- if err != nil {
- return err
- }
- f.SetFloat(v)
- return nil
-}
-
-// although the default is float64, but we better define it.
-func decodeFloat64(f reflect.Value, s string) error {
- v, err := strconv.ParseFloat(s, 64)
- if err != nil {
- return err
- }
- f.SetFloat(v)
- return nil
-}
-
-func decodeString(f reflect.Value, s string) error {
- f.SetString(s)
- return nil
-}
-
-func decodeSlice(f reflect.Value, s string) error {
- // []byte slice ([]uint8).
- if f.Type().Elem().Kind() == reflect.Uint8 {
- f.SetBytes([]byte(s))
- }
- return nil
-}
-
-func decodeUnsupported(v reflect.Value, s string) error {
- return fmt.Errorf("redis.Scan(unsupported %s)", v.Type())
-}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/hscan/structmap.go b/vendor/github.com/go-redis/redis/v8/internal/hscan/structmap.go
deleted file mode 100644
index 6839412b..00000000
--- a/vendor/github.com/go-redis/redis/v8/internal/hscan/structmap.go
+++ /dev/null
@@ -1,93 +0,0 @@
-package hscan
-
-import (
- "fmt"
- "reflect"
- "strings"
- "sync"
-)
-
-// structMap contains the map of struct fields for target structs
-// indexed by the struct type.
-type structMap struct {
- m sync.Map
-}
-
-func newStructMap() *structMap {
- return new(structMap)
-}
-
-func (s *structMap) get(t reflect.Type) *structSpec {
- if v, ok := s.m.Load(t); ok {
- return v.(*structSpec)
- }
-
- spec := newStructSpec(t, "redis")
- s.m.Store(t, spec)
- return spec
-}
-
-//------------------------------------------------------------------------------
-
-// structSpec contains the list of all fields in a target struct.
-type structSpec struct {
- m map[string]*structField
-}
-
-func (s *structSpec) set(tag string, sf *structField) {
- s.m[tag] = sf
-}
-
-func newStructSpec(t reflect.Type, fieldTag string) *structSpec {
- numField := t.NumField()
- out := &structSpec{
- m: make(map[string]*structField, numField),
- }
-
- for i := 0; i < numField; i++ {
- f := t.Field(i)
-
- tag := f.Tag.Get(fieldTag)
- if tag == "" || tag == "-" {
- continue
- }
-
- tag = strings.Split(tag, ",")[0]
- if tag == "" {
- continue
- }
-
- // Use the built-in decoder.
- out.set(tag, &structField{index: i, fn: decoders[f.Type.Kind()]})
- }
-
- return out
-}
-
-//------------------------------------------------------------------------------
-
-// structField represents a single field in a target struct.
-type structField struct {
- index int
- fn decoderFunc
-}
-
-//------------------------------------------------------------------------------
-
-type StructValue struct {
- spec *structSpec
- value reflect.Value
-}
-
-func (s StructValue) Scan(key string, value string) error {
- field, ok := s.spec.m[key]
- if !ok {
- return nil
- }
- if err := field.fn(s.value.Field(field.index), value); err != nil {
- t := s.value.Type()
- return fmt.Errorf("cannot scan redis.result %s into struct field %s.%s of type %s, error-%s",
- value, t.Name(), t.Field(field.index).Name, t.Field(field.index).Type, err.Error())
- }
- return nil
-}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/internal.go b/vendor/github.com/go-redis/redis/v8/internal/internal.go
deleted file mode 100644
index 4a59c599..00000000
--- a/vendor/github.com/go-redis/redis/v8/internal/internal.go
+++ /dev/null
@@ -1,29 +0,0 @@
-package internal
-
-import (
- "time"
-
- "github.com/go-redis/redis/v8/internal/rand"
-)
-
-func RetryBackoff(retry int, minBackoff, maxBackoff time.Duration) time.Duration {
- if retry < 0 {
- panic("not reached")
- }
- if minBackoff == 0 {
- return 0
- }
-
- d := minBackoff << uint(retry)
- if d < minBackoff {
- return maxBackoff
- }
-
- d = minBackoff + time.Duration(rand.Int63n(int64(d)))
-
- if d > maxBackoff || d < minBackoff {
- d = maxBackoff
- }
-
- return d
-}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/log.go b/vendor/github.com/go-redis/redis/v8/internal/log.go
deleted file mode 100644
index c8b9213d..00000000
--- a/vendor/github.com/go-redis/redis/v8/internal/log.go
+++ /dev/null
@@ -1,26 +0,0 @@
-package internal
-
-import (
- "context"
- "fmt"
- "log"
- "os"
-)
-
-type Logging interface {
- Printf(ctx context.Context, format string, v ...interface{})
-}
-
-type logger struct {
- log *log.Logger
-}
-
-func (l *logger) Printf(ctx context.Context, format string, v ...interface{}) {
- _ = l.log.Output(2, fmt.Sprintf(format, v...))
-}
-
-// Logger calls Output to print to the stderr.
-// Arguments are handled in the manner of fmt.Print.
-var Logger Logging = &logger{
- log: log.New(os.Stderr, "redis: ", log.LstdFlags|log.Lshortfile),
-}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/once.go b/vendor/github.com/go-redis/redis/v8/internal/once.go
deleted file mode 100644
index 64f46272..00000000
--- a/vendor/github.com/go-redis/redis/v8/internal/once.go
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
-Copyright 2014 The Camlistore Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package internal
-
-import (
- "sync"
- "sync/atomic"
-)
-
-// A Once will perform a successful action exactly once.
-//
-// Unlike a sync.Once, this Once's func returns an error
-// and is re-armed on failure.
-type Once struct {
- m sync.Mutex
- done uint32
-}
-
-// Do calls the function f if and only if Do has not been invoked
-// without error for this instance of Once. In other words, given
-// var once Once
-// if once.Do(f) is called multiple times, only the first call will
-// invoke f, even if f has a different value in each invocation unless
-// f returns an error. A new instance of Once is required for each
-// function to execute.
-//
-// Do is intended for initialization that must be run exactly once. Since f
-// is niladic, it may be necessary to use a function literal to capture the
-// arguments to a function to be invoked by Do:
-// err := config.once.Do(func() error { return config.init(filename) })
-func (o *Once) Do(f func() error) error {
- if atomic.LoadUint32(&o.done) == 1 {
- return nil
- }
- // Slow-path.
- o.m.Lock()
- defer o.m.Unlock()
- var err error
- if o.done == 0 {
- err = f()
- if err == nil {
- atomic.StoreUint32(&o.done, 1)
- }
- }
- return err
-}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/pool/conn.go b/vendor/github.com/go-redis/redis/v8/internal/pool/conn.go
deleted file mode 100644
index 56616598..00000000
--- a/vendor/github.com/go-redis/redis/v8/internal/pool/conn.go
+++ /dev/null
@@ -1,121 +0,0 @@
-package pool
-
-import (
- "bufio"
- "context"
- "net"
- "sync/atomic"
- "time"
-
- "github.com/go-redis/redis/v8/internal/proto"
-)
-
-var noDeadline = time.Time{}
-
-type Conn struct {
- usedAt int64 // atomic
- netConn net.Conn
-
- rd *proto.Reader
- bw *bufio.Writer
- wr *proto.Writer
-
- Inited bool
- pooled bool
- createdAt time.Time
-}
-
-func NewConn(netConn net.Conn) *Conn {
- cn := &Conn{
- netConn: netConn,
- createdAt: time.Now(),
- }
- cn.rd = proto.NewReader(netConn)
- cn.bw = bufio.NewWriter(netConn)
- cn.wr = proto.NewWriter(cn.bw)
- cn.SetUsedAt(time.Now())
- return cn
-}
-
-func (cn *Conn) UsedAt() time.Time {
- unix := atomic.LoadInt64(&cn.usedAt)
- return time.Unix(unix, 0)
-}
-
-func (cn *Conn) SetUsedAt(tm time.Time) {
- atomic.StoreInt64(&cn.usedAt, tm.Unix())
-}
-
-func (cn *Conn) SetNetConn(netConn net.Conn) {
- cn.netConn = netConn
- cn.rd.Reset(netConn)
- cn.bw.Reset(netConn)
-}
-
-func (cn *Conn) Write(b []byte) (int, error) {
- return cn.netConn.Write(b)
-}
-
-func (cn *Conn) RemoteAddr() net.Addr {
- if cn.netConn != nil {
- return cn.netConn.RemoteAddr()
- }
- return nil
-}
-
-func (cn *Conn) WithReader(ctx context.Context, timeout time.Duration, fn func(rd *proto.Reader) error) error {
- if err := cn.netConn.SetReadDeadline(cn.deadline(ctx, timeout)); err != nil {
- return err
- }
- return fn(cn.rd)
-}
-
-func (cn *Conn) WithWriter(
- ctx context.Context, timeout time.Duration, fn func(wr *proto.Writer) error,
-) error {
- if err := cn.netConn.SetWriteDeadline(cn.deadline(ctx, timeout)); err != nil {
- return err
- }
-
- if cn.bw.Buffered() > 0 {
- cn.bw.Reset(cn.netConn)
- }
-
- if err := fn(cn.wr); err != nil {
- return err
- }
-
- return cn.bw.Flush()
-}
-
-func (cn *Conn) Close() error {
- return cn.netConn.Close()
-}
-
-func (cn *Conn) deadline(ctx context.Context, timeout time.Duration) time.Time {
- tm := time.Now()
- cn.SetUsedAt(tm)
-
- if timeout > 0 {
- tm = tm.Add(timeout)
- }
-
- if ctx != nil {
- deadline, ok := ctx.Deadline()
- if ok {
- if timeout == 0 {
- return deadline
- }
- if deadline.Before(tm) {
- return deadline
- }
- return tm
- }
- }
-
- if timeout > 0 {
- return tm
- }
-
- return noDeadline
-}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/pool/pool.go b/vendor/github.com/go-redis/redis/v8/internal/pool/pool.go
deleted file mode 100644
index 44a4e779..00000000
--- a/vendor/github.com/go-redis/redis/v8/internal/pool/pool.go
+++ /dev/null
@@ -1,557 +0,0 @@
-package pool
-
-import (
- "context"
- "errors"
- "net"
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/go-redis/redis/v8/internal"
-)
-
-var (
- // ErrClosed performs any operation on the closed client will return this error.
- ErrClosed = errors.New("redis: client is closed")
-
- // ErrPoolTimeout timed out waiting to get a connection from the connection pool.
- ErrPoolTimeout = errors.New("redis: connection pool timeout")
-)
-
-var timers = sync.Pool{
- New: func() interface{} {
- t := time.NewTimer(time.Hour)
- t.Stop()
- return t
- },
-}
-
-// Stats contains pool state information and accumulated stats.
-type Stats struct {
- Hits uint32 // number of times free connection was found in the pool
- Misses uint32 // number of times free connection was NOT found in the pool
- Timeouts uint32 // number of times a wait timeout occurred
-
- TotalConns uint32 // number of total connections in the pool
- IdleConns uint32 // number of idle connections in the pool
- StaleConns uint32 // number of stale connections removed from the pool
-}
-
-type Pooler interface {
- NewConn(context.Context) (*Conn, error)
- CloseConn(*Conn) error
-
- Get(context.Context) (*Conn, error)
- Put(context.Context, *Conn)
- Remove(context.Context, *Conn, error)
-
- Len() int
- IdleLen() int
- Stats() *Stats
-
- Close() error
-}
-
-type Options struct {
- Dialer func(context.Context) (net.Conn, error)
- OnClose func(*Conn) error
-
- PoolFIFO bool
- PoolSize int
- MinIdleConns int
- MaxConnAge time.Duration
- PoolTimeout time.Duration
- IdleTimeout time.Duration
- IdleCheckFrequency time.Duration
-}
-
-type lastDialErrorWrap struct {
- err error
-}
-
-type ConnPool struct {
- opt *Options
-
- dialErrorsNum uint32 // atomic
-
- lastDialError atomic.Value
-
- queue chan struct{}
-
- connsMu sync.Mutex
- conns []*Conn
- idleConns []*Conn
- poolSize int
- idleConnsLen int
-
- stats Stats
-
- _closed uint32 // atomic
- closedCh chan struct{}
-}
-
-var _ Pooler = (*ConnPool)(nil)
-
-func NewConnPool(opt *Options) *ConnPool {
- p := &ConnPool{
- opt: opt,
-
- queue: make(chan struct{}, opt.PoolSize),
- conns: make([]*Conn, 0, opt.PoolSize),
- idleConns: make([]*Conn, 0, opt.PoolSize),
- closedCh: make(chan struct{}),
- }
-
- p.connsMu.Lock()
- p.checkMinIdleConns()
- p.connsMu.Unlock()
-
- if opt.IdleTimeout > 0 && opt.IdleCheckFrequency > 0 {
- go p.reaper(opt.IdleCheckFrequency)
- }
-
- return p
-}
-
-func (p *ConnPool) checkMinIdleConns() {
- if p.opt.MinIdleConns == 0 {
- return
- }
- for p.poolSize < p.opt.PoolSize && p.idleConnsLen < p.opt.MinIdleConns {
- p.poolSize++
- p.idleConnsLen++
-
- go func() {
- err := p.addIdleConn()
- if err != nil && err != ErrClosed {
- p.connsMu.Lock()
- p.poolSize--
- p.idleConnsLen--
- p.connsMu.Unlock()
- }
- }()
- }
-}
-
-func (p *ConnPool) addIdleConn() error {
- cn, err := p.dialConn(context.TODO(), true)
- if err != nil {
- return err
- }
-
- p.connsMu.Lock()
- defer p.connsMu.Unlock()
-
- // It is not allowed to add new connections to the closed connection pool.
- if p.closed() {
- _ = cn.Close()
- return ErrClosed
- }
-
- p.conns = append(p.conns, cn)
- p.idleConns = append(p.idleConns, cn)
- return nil
-}
-
-func (p *ConnPool) NewConn(ctx context.Context) (*Conn, error) {
- return p.newConn(ctx, false)
-}
-
-func (p *ConnPool) newConn(ctx context.Context, pooled bool) (*Conn, error) {
- cn, err := p.dialConn(ctx, pooled)
- if err != nil {
- return nil, err
- }
-
- p.connsMu.Lock()
- defer p.connsMu.Unlock()
-
- // It is not allowed to add new connections to the closed connection pool.
- if p.closed() {
- _ = cn.Close()
- return nil, ErrClosed
- }
-
- p.conns = append(p.conns, cn)
- if pooled {
- // If pool is full remove the cn on next Put.
- if p.poolSize >= p.opt.PoolSize {
- cn.pooled = false
- } else {
- p.poolSize++
- }
- }
-
- return cn, nil
-}
-
-func (p *ConnPool) dialConn(ctx context.Context, pooled bool) (*Conn, error) {
- if p.closed() {
- return nil, ErrClosed
- }
-
- if atomic.LoadUint32(&p.dialErrorsNum) >= uint32(p.opt.PoolSize) {
- return nil, p.getLastDialError()
- }
-
- netConn, err := p.opt.Dialer(ctx)
- if err != nil {
- p.setLastDialError(err)
- if atomic.AddUint32(&p.dialErrorsNum, 1) == uint32(p.opt.PoolSize) {
- go p.tryDial()
- }
- return nil, err
- }
-
- cn := NewConn(netConn)
- cn.pooled = pooled
- return cn, nil
-}
-
-func (p *ConnPool) tryDial() {
- for {
- if p.closed() {
- return
- }
-
- conn, err := p.opt.Dialer(context.Background())
- if err != nil {
- p.setLastDialError(err)
- time.Sleep(time.Second)
- continue
- }
-
- atomic.StoreUint32(&p.dialErrorsNum, 0)
- _ = conn.Close()
- return
- }
-}
-
-func (p *ConnPool) setLastDialError(err error) {
- p.lastDialError.Store(&lastDialErrorWrap{err: err})
-}
-
-func (p *ConnPool) getLastDialError() error {
- err, _ := p.lastDialError.Load().(*lastDialErrorWrap)
- if err != nil {
- return err.err
- }
- return nil
-}
-
-// Get returns existed connection from the pool or creates a new one.
-func (p *ConnPool) Get(ctx context.Context) (*Conn, error) {
- if p.closed() {
- return nil, ErrClosed
- }
-
- if err := p.waitTurn(ctx); err != nil {
- return nil, err
- }
-
- for {
- p.connsMu.Lock()
- cn, err := p.popIdle()
- p.connsMu.Unlock()
-
- if err != nil {
- return nil, err
- }
-
- if cn == nil {
- break
- }
-
- if p.isStaleConn(cn) {
- _ = p.CloseConn(cn)
- continue
- }
-
- atomic.AddUint32(&p.stats.Hits, 1)
- return cn, nil
- }
-
- atomic.AddUint32(&p.stats.Misses, 1)
-
- newcn, err := p.newConn(ctx, true)
- if err != nil {
- p.freeTurn()
- return nil, err
- }
-
- return newcn, nil
-}
-
-func (p *ConnPool) getTurn() {
- p.queue <- struct{}{}
-}
-
-func (p *ConnPool) waitTurn(ctx context.Context) error {
- select {
- case <-ctx.Done():
- return ctx.Err()
- default:
- }
-
- select {
- case p.queue <- struct{}{}:
- return nil
- default:
- }
-
- timer := timers.Get().(*time.Timer)
- timer.Reset(p.opt.PoolTimeout)
-
- select {
- case <-ctx.Done():
- if !timer.Stop() {
- <-timer.C
- }
- timers.Put(timer)
- return ctx.Err()
- case p.queue <- struct{}{}:
- if !timer.Stop() {
- <-timer.C
- }
- timers.Put(timer)
- return nil
- case <-timer.C:
- timers.Put(timer)
- atomic.AddUint32(&p.stats.Timeouts, 1)
- return ErrPoolTimeout
- }
-}
-
-func (p *ConnPool) freeTurn() {
- <-p.queue
-}
-
-func (p *ConnPool) popIdle() (*Conn, error) {
- if p.closed() {
- return nil, ErrClosed
- }
- n := len(p.idleConns)
- if n == 0 {
- return nil, nil
- }
-
- var cn *Conn
- if p.opt.PoolFIFO {
- cn = p.idleConns[0]
- copy(p.idleConns, p.idleConns[1:])
- p.idleConns = p.idleConns[:n-1]
- } else {
- idx := n - 1
- cn = p.idleConns[idx]
- p.idleConns = p.idleConns[:idx]
- }
- p.idleConnsLen--
- p.checkMinIdleConns()
- return cn, nil
-}
-
-func (p *ConnPool) Put(ctx context.Context, cn *Conn) {
- if cn.rd.Buffered() > 0 {
- internal.Logger.Printf(ctx, "Conn has unread data")
- p.Remove(ctx, cn, BadConnError{})
- return
- }
-
- if !cn.pooled {
- p.Remove(ctx, cn, nil)
- return
- }
-
- p.connsMu.Lock()
- p.idleConns = append(p.idleConns, cn)
- p.idleConnsLen++
- p.connsMu.Unlock()
- p.freeTurn()
-}
-
-func (p *ConnPool) Remove(ctx context.Context, cn *Conn, reason error) {
- p.removeConnWithLock(cn)
- p.freeTurn()
- _ = p.closeConn(cn)
-}
-
-func (p *ConnPool) CloseConn(cn *Conn) error {
- p.removeConnWithLock(cn)
- return p.closeConn(cn)
-}
-
-func (p *ConnPool) removeConnWithLock(cn *Conn) {
- p.connsMu.Lock()
- p.removeConn(cn)
- p.connsMu.Unlock()
-}
-
-func (p *ConnPool) removeConn(cn *Conn) {
- for i, c := range p.conns {
- if c == cn {
- p.conns = append(p.conns[:i], p.conns[i+1:]...)
- if cn.pooled {
- p.poolSize--
- p.checkMinIdleConns()
- }
- return
- }
- }
-}
-
-func (p *ConnPool) closeConn(cn *Conn) error {
- if p.opt.OnClose != nil {
- _ = p.opt.OnClose(cn)
- }
- return cn.Close()
-}
-
-// Len returns total number of connections.
-func (p *ConnPool) Len() int {
- p.connsMu.Lock()
- n := len(p.conns)
- p.connsMu.Unlock()
- return n
-}
-
-// IdleLen returns number of idle connections.
-func (p *ConnPool) IdleLen() int {
- p.connsMu.Lock()
- n := p.idleConnsLen
- p.connsMu.Unlock()
- return n
-}
-
-func (p *ConnPool) Stats() *Stats {
- idleLen := p.IdleLen()
- return &Stats{
- Hits: atomic.LoadUint32(&p.stats.Hits),
- Misses: atomic.LoadUint32(&p.stats.Misses),
- Timeouts: atomic.LoadUint32(&p.stats.Timeouts),
-
- TotalConns: uint32(p.Len()),
- IdleConns: uint32(idleLen),
- StaleConns: atomic.LoadUint32(&p.stats.StaleConns),
- }
-}
-
-func (p *ConnPool) closed() bool {
- return atomic.LoadUint32(&p._closed) == 1
-}
-
-func (p *ConnPool) Filter(fn func(*Conn) bool) error {
- p.connsMu.Lock()
- defer p.connsMu.Unlock()
-
- var firstErr error
- for _, cn := range p.conns {
- if fn(cn) {
- if err := p.closeConn(cn); err != nil && firstErr == nil {
- firstErr = err
- }
- }
- }
- return firstErr
-}
-
-func (p *ConnPool) Close() error {
- if !atomic.CompareAndSwapUint32(&p._closed, 0, 1) {
- return ErrClosed
- }
- close(p.closedCh)
-
- var firstErr error
- p.connsMu.Lock()
- for _, cn := range p.conns {
- if err := p.closeConn(cn); err != nil && firstErr == nil {
- firstErr = err
- }
- }
- p.conns = nil
- p.poolSize = 0
- p.idleConns = nil
- p.idleConnsLen = 0
- p.connsMu.Unlock()
-
- return firstErr
-}
-
-func (p *ConnPool) reaper(frequency time.Duration) {
- ticker := time.NewTicker(frequency)
- defer ticker.Stop()
-
- for {
- select {
- case <-ticker.C:
- // It is possible that ticker and closedCh arrive together,
- // and select pseudo-randomly pick ticker case, we double
- // check here to prevent being executed after closed.
- if p.closed() {
- return
- }
- _, err := p.ReapStaleConns()
- if err != nil {
- internal.Logger.Printf(context.Background(), "ReapStaleConns failed: %s", err)
- continue
- }
- case <-p.closedCh:
- return
- }
- }
-}
-
-func (p *ConnPool) ReapStaleConns() (int, error) {
- var n int
- for {
- p.getTurn()
-
- p.connsMu.Lock()
- cn := p.reapStaleConn()
- p.connsMu.Unlock()
-
- p.freeTurn()
-
- if cn != nil {
- _ = p.closeConn(cn)
- n++
- } else {
- break
- }
- }
- atomic.AddUint32(&p.stats.StaleConns, uint32(n))
- return n, nil
-}
-
-func (p *ConnPool) reapStaleConn() *Conn {
- if len(p.idleConns) == 0 {
- return nil
- }
-
- cn := p.idleConns[0]
- if !p.isStaleConn(cn) {
- return nil
- }
-
- p.idleConns = append(p.idleConns[:0], p.idleConns[1:]...)
- p.idleConnsLen--
- p.removeConn(cn)
-
- return cn
-}
-
-func (p *ConnPool) isStaleConn(cn *Conn) bool {
- if p.opt.IdleTimeout == 0 && p.opt.MaxConnAge == 0 {
- return false
- }
-
- now := time.Now()
- if p.opt.IdleTimeout > 0 && now.Sub(cn.UsedAt()) >= p.opt.IdleTimeout {
- return true
- }
- if p.opt.MaxConnAge > 0 && now.Sub(cn.createdAt) >= p.opt.MaxConnAge {
- return true
- }
-
- return false
-}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/pool/pool_single.go b/vendor/github.com/go-redis/redis/v8/internal/pool/pool_single.go
deleted file mode 100644
index 5a3fde19..00000000
--- a/vendor/github.com/go-redis/redis/v8/internal/pool/pool_single.go
+++ /dev/null
@@ -1,58 +0,0 @@
-package pool
-
-import "context"
-
-type SingleConnPool struct {
- pool Pooler
- cn *Conn
- stickyErr error
-}
-
-var _ Pooler = (*SingleConnPool)(nil)
-
-func NewSingleConnPool(pool Pooler, cn *Conn) *SingleConnPool {
- return &SingleConnPool{
- pool: pool,
- cn: cn,
- }
-}
-
-func (p *SingleConnPool) NewConn(ctx context.Context) (*Conn, error) {
- return p.pool.NewConn(ctx)
-}
-
-func (p *SingleConnPool) CloseConn(cn *Conn) error {
- return p.pool.CloseConn(cn)
-}
-
-func (p *SingleConnPool) Get(ctx context.Context) (*Conn, error) {
- if p.stickyErr != nil {
- return nil, p.stickyErr
- }
- return p.cn, nil
-}
-
-func (p *SingleConnPool) Put(ctx context.Context, cn *Conn) {}
-
-func (p *SingleConnPool) Remove(ctx context.Context, cn *Conn, reason error) {
- p.cn = nil
- p.stickyErr = reason
-}
-
-func (p *SingleConnPool) Close() error {
- p.cn = nil
- p.stickyErr = ErrClosed
- return nil
-}
-
-func (p *SingleConnPool) Len() int {
- return 0
-}
-
-func (p *SingleConnPool) IdleLen() int {
- return 0
-}
-
-func (p *SingleConnPool) Stats() *Stats {
- return &Stats{}
-}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/pool/pool_sticky.go b/vendor/github.com/go-redis/redis/v8/internal/pool/pool_sticky.go
deleted file mode 100644
index 3adb99bc..00000000
--- a/vendor/github.com/go-redis/redis/v8/internal/pool/pool_sticky.go
+++ /dev/null
@@ -1,201 +0,0 @@
-package pool
-
-import (
- "context"
- "errors"
- "fmt"
- "sync/atomic"
-)
-
-const (
- stateDefault = 0
- stateInited = 1
- stateClosed = 2
-)
-
-type BadConnError struct {
- wrapped error
-}
-
-var _ error = (*BadConnError)(nil)
-
-func (e BadConnError) Error() string {
- s := "redis: Conn is in a bad state"
- if e.wrapped != nil {
- s += ": " + e.wrapped.Error()
- }
- return s
-}
-
-func (e BadConnError) Unwrap() error {
- return e.wrapped
-}
-
-//------------------------------------------------------------------------------
-
-type StickyConnPool struct {
- pool Pooler
- shared int32 // atomic
-
- state uint32 // atomic
- ch chan *Conn
-
- _badConnError atomic.Value
-}
-
-var _ Pooler = (*StickyConnPool)(nil)
-
-func NewStickyConnPool(pool Pooler) *StickyConnPool {
- p, ok := pool.(*StickyConnPool)
- if !ok {
- p = &StickyConnPool{
- pool: pool,
- ch: make(chan *Conn, 1),
- }
- }
- atomic.AddInt32(&p.shared, 1)
- return p
-}
-
-func (p *StickyConnPool) NewConn(ctx context.Context) (*Conn, error) {
- return p.pool.NewConn(ctx)
-}
-
-func (p *StickyConnPool) CloseConn(cn *Conn) error {
- return p.pool.CloseConn(cn)
-}
-
-func (p *StickyConnPool) Get(ctx context.Context) (*Conn, error) {
- // In worst case this races with Close which is not a very common operation.
- for i := 0; i < 1000; i++ {
- switch atomic.LoadUint32(&p.state) {
- case stateDefault:
- cn, err := p.pool.Get(ctx)
- if err != nil {
- return nil, err
- }
- if atomic.CompareAndSwapUint32(&p.state, stateDefault, stateInited) {
- return cn, nil
- }
- p.pool.Remove(ctx, cn, ErrClosed)
- case stateInited:
- if err := p.badConnError(); err != nil {
- return nil, err
- }
- cn, ok := <-p.ch
- if !ok {
- return nil, ErrClosed
- }
- return cn, nil
- case stateClosed:
- return nil, ErrClosed
- default:
- panic("not reached")
- }
- }
- return nil, fmt.Errorf("redis: StickyConnPool.Get: infinite loop")
-}
-
-func (p *StickyConnPool) Put(ctx context.Context, cn *Conn) {
- defer func() {
- if recover() != nil {
- p.freeConn(ctx, cn)
- }
- }()
- p.ch <- cn
-}
-
-func (p *StickyConnPool) freeConn(ctx context.Context, cn *Conn) {
- if err := p.badConnError(); err != nil {
- p.pool.Remove(ctx, cn, err)
- } else {
- p.pool.Put(ctx, cn)
- }
-}
-
-func (p *StickyConnPool) Remove(ctx context.Context, cn *Conn, reason error) {
- defer func() {
- if recover() != nil {
- p.pool.Remove(ctx, cn, ErrClosed)
- }
- }()
- p._badConnError.Store(BadConnError{wrapped: reason})
- p.ch <- cn
-}
-
-func (p *StickyConnPool) Close() error {
- if shared := atomic.AddInt32(&p.shared, -1); shared > 0 {
- return nil
- }
-
- for i := 0; i < 1000; i++ {
- state := atomic.LoadUint32(&p.state)
- if state == stateClosed {
- return ErrClosed
- }
- if atomic.CompareAndSwapUint32(&p.state, state, stateClosed) {
- close(p.ch)
- cn, ok := <-p.ch
- if ok {
- p.freeConn(context.TODO(), cn)
- }
- return nil
- }
- }
-
- return errors.New("redis: StickyConnPool.Close: infinite loop")
-}
-
-func (p *StickyConnPool) Reset(ctx context.Context) error {
- if p.badConnError() == nil {
- return nil
- }
-
- select {
- case cn, ok := <-p.ch:
- if !ok {
- return ErrClosed
- }
- p.pool.Remove(ctx, cn, ErrClosed)
- p._badConnError.Store(BadConnError{wrapped: nil})
- default:
- return errors.New("redis: StickyConnPool does not have a Conn")
- }
-
- if !atomic.CompareAndSwapUint32(&p.state, stateInited, stateDefault) {
- state := atomic.LoadUint32(&p.state)
- return fmt.Errorf("redis: invalid StickyConnPool state: %d", state)
- }
-
- return nil
-}
-
-func (p *StickyConnPool) badConnError() error {
- if v := p._badConnError.Load(); v != nil {
- if err := v.(BadConnError); err.wrapped != nil {
- return err
- }
- }
- return nil
-}
-
-func (p *StickyConnPool) Len() int {
- switch atomic.LoadUint32(&p.state) {
- case stateDefault:
- return 0
- case stateInited:
- return 1
- case stateClosed:
- return 0
- default:
- panic("not reached")
- }
-}
-
-func (p *StickyConnPool) IdleLen() int {
- return len(p.ch)
-}
-
-func (p *StickyConnPool) Stats() *Stats {
- return &Stats{}
-}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/proto/reader.go b/vendor/github.com/go-redis/redis/v8/internal/proto/reader.go
deleted file mode 100644
index 0e6ca779..00000000
--- a/vendor/github.com/go-redis/redis/v8/internal/proto/reader.go
+++ /dev/null
@@ -1,332 +0,0 @@
-package proto
-
-import (
- "bufio"
- "fmt"
- "io"
-
- "github.com/go-redis/redis/v8/internal/util"
-)
-
-// redis resp protocol data type.
-const (
- ErrorReply = '-'
- StatusReply = '+'
- IntReply = ':'
- StringReply = '$'
- ArrayReply = '*'
-)
-
-//------------------------------------------------------------------------------
-
-const Nil = RedisError("redis: nil") // nolint:errname
-
-type RedisError string
-
-func (e RedisError) Error() string { return string(e) }
-
-func (RedisError) RedisError() {}
-
-//------------------------------------------------------------------------------
-
-type MultiBulkParse func(*Reader, int64) (interface{}, error)
-
-type Reader struct {
- rd *bufio.Reader
- _buf []byte
-}
-
-func NewReader(rd io.Reader) *Reader {
- return &Reader{
- rd: bufio.NewReader(rd),
- _buf: make([]byte, 64),
- }
-}
-
-func (r *Reader) Buffered() int {
- return r.rd.Buffered()
-}
-
-func (r *Reader) Peek(n int) ([]byte, error) {
- return r.rd.Peek(n)
-}
-
-func (r *Reader) Reset(rd io.Reader) {
- r.rd.Reset(rd)
-}
-
-func (r *Reader) ReadLine() ([]byte, error) {
- line, err := r.readLine()
- if err != nil {
- return nil, err
- }
- if isNilReply(line) {
- return nil, Nil
- }
- return line, nil
-}
-
-// readLine that returns an error if:
-// - there is a pending read error;
-// - or line does not end with \r\n.
-func (r *Reader) readLine() ([]byte, error) {
- b, err := r.rd.ReadSlice('\n')
- if err != nil {
- if err != bufio.ErrBufferFull {
- return nil, err
- }
-
- full := make([]byte, len(b))
- copy(full, b)
-
- b, err = r.rd.ReadBytes('\n')
- if err != nil {
- return nil, err
- }
-
- full = append(full, b...) //nolint:makezero
- b = full
- }
- if len(b) <= 2 || b[len(b)-1] != '\n' || b[len(b)-2] != '\r' {
- return nil, fmt.Errorf("redis: invalid reply: %q", b)
- }
- return b[:len(b)-2], nil
-}
-
-func (r *Reader) ReadReply(m MultiBulkParse) (interface{}, error) {
- line, err := r.ReadLine()
- if err != nil {
- return nil, err
- }
-
- switch line[0] {
- case ErrorReply:
- return nil, ParseErrorReply(line)
- case StatusReply:
- return string(line[1:]), nil
- case IntReply:
- return util.ParseInt(line[1:], 10, 64)
- case StringReply:
- return r.readStringReply(line)
- case ArrayReply:
- n, err := parseArrayLen(line)
- if err != nil {
- return nil, err
- }
- if m == nil {
- err := fmt.Errorf("redis: got %.100q, but multi bulk parser is nil", line)
- return nil, err
- }
- return m(r, n)
- }
- return nil, fmt.Errorf("redis: can't parse %.100q", line)
-}
-
-func (r *Reader) ReadIntReply() (int64, error) {
- line, err := r.ReadLine()
- if err != nil {
- return 0, err
- }
- switch line[0] {
- case ErrorReply:
- return 0, ParseErrorReply(line)
- case IntReply:
- return util.ParseInt(line[1:], 10, 64)
- default:
- return 0, fmt.Errorf("redis: can't parse int reply: %.100q", line)
- }
-}
-
-func (r *Reader) ReadString() (string, error) {
- line, err := r.ReadLine()
- if err != nil {
- return "", err
- }
- switch line[0] {
- case ErrorReply:
- return "", ParseErrorReply(line)
- case StringReply:
- return r.readStringReply(line)
- case StatusReply:
- return string(line[1:]), nil
- case IntReply:
- return string(line[1:]), nil
- default:
- return "", fmt.Errorf("redis: can't parse reply=%.100q reading string", line)
- }
-}
-
-func (r *Reader) readStringReply(line []byte) (string, error) {
- if isNilReply(line) {
- return "", Nil
- }
-
- replyLen, err := util.Atoi(line[1:])
- if err != nil {
- return "", err
- }
-
- b := make([]byte, replyLen+2)
- _, err = io.ReadFull(r.rd, b)
- if err != nil {
- return "", err
- }
-
- return util.BytesToString(b[:replyLen]), nil
-}
-
-func (r *Reader) ReadArrayReply(m MultiBulkParse) (interface{}, error) {
- line, err := r.ReadLine()
- if err != nil {
- return nil, err
- }
- switch line[0] {
- case ErrorReply:
- return nil, ParseErrorReply(line)
- case ArrayReply:
- n, err := parseArrayLen(line)
- if err != nil {
- return nil, err
- }
- return m(r, n)
- default:
- return nil, fmt.Errorf("redis: can't parse array reply: %.100q", line)
- }
-}
-
-func (r *Reader) ReadArrayLen() (int, error) {
- line, err := r.ReadLine()
- if err != nil {
- return 0, err
- }
- switch line[0] {
- case ErrorReply:
- return 0, ParseErrorReply(line)
- case ArrayReply:
- n, err := parseArrayLen(line)
- if err != nil {
- return 0, err
- }
- return int(n), nil
- default:
- return 0, fmt.Errorf("redis: can't parse array reply: %.100q", line)
- }
-}
-
-func (r *Reader) ReadScanReply() ([]string, uint64, error) {
- n, err := r.ReadArrayLen()
- if err != nil {
- return nil, 0, err
- }
- if n != 2 {
- return nil, 0, fmt.Errorf("redis: got %d elements in scan reply, expected 2", n)
- }
-
- cursor, err := r.ReadUint()
- if err != nil {
- return nil, 0, err
- }
-
- n, err = r.ReadArrayLen()
- if err != nil {
- return nil, 0, err
- }
-
- keys := make([]string, n)
-
- for i := 0; i < n; i++ {
- key, err := r.ReadString()
- if err != nil {
- return nil, 0, err
- }
- keys[i] = key
- }
-
- return keys, cursor, err
-}
-
-func (r *Reader) ReadInt() (int64, error) {
- b, err := r.readTmpBytesReply()
- if err != nil {
- return 0, err
- }
- return util.ParseInt(b, 10, 64)
-}
-
-func (r *Reader) ReadUint() (uint64, error) {
- b, err := r.readTmpBytesReply()
- if err != nil {
- return 0, err
- }
- return util.ParseUint(b, 10, 64)
-}
-
-func (r *Reader) ReadFloatReply() (float64, error) {
- b, err := r.readTmpBytesReply()
- if err != nil {
- return 0, err
- }
- return util.ParseFloat(b, 64)
-}
-
-func (r *Reader) readTmpBytesReply() ([]byte, error) {
- line, err := r.ReadLine()
- if err != nil {
- return nil, err
- }
- switch line[0] {
- case ErrorReply:
- return nil, ParseErrorReply(line)
- case StringReply:
- return r._readTmpBytesReply(line)
- case StatusReply:
- return line[1:], nil
- default:
- return nil, fmt.Errorf("redis: can't parse string reply: %.100q", line)
- }
-}
-
-func (r *Reader) _readTmpBytesReply(line []byte) ([]byte, error) {
- if isNilReply(line) {
- return nil, Nil
- }
-
- replyLen, err := util.Atoi(line[1:])
- if err != nil {
- return nil, err
- }
-
- buf := r.buf(replyLen + 2)
- _, err = io.ReadFull(r.rd, buf)
- if err != nil {
- return nil, err
- }
-
- return buf[:replyLen], nil
-}
-
-func (r *Reader) buf(n int) []byte {
- if n <= cap(r._buf) {
- return r._buf[:n]
- }
- d := n - cap(r._buf)
- r._buf = append(r._buf, make([]byte, d)...)
- return r._buf
-}
-
-func isNilReply(b []byte) bool {
- return len(b) == 3 &&
- (b[0] == StringReply || b[0] == ArrayReply) &&
- b[1] == '-' && b[2] == '1'
-}
-
-func ParseErrorReply(line []byte) error {
- return RedisError(string(line[1:]))
-}
-
-func parseArrayLen(line []byte) (int64, error) {
- if isNilReply(line) {
- return 0, Nil
- }
- return util.ParseInt(line[1:], 10, 64)
-}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/proto/scan.go b/vendor/github.com/go-redis/redis/v8/internal/proto/scan.go
deleted file mode 100644
index 0e994765..00000000
--- a/vendor/github.com/go-redis/redis/v8/internal/proto/scan.go
+++ /dev/null
@@ -1,180 +0,0 @@
-package proto
-
-import (
- "encoding"
- "fmt"
- "reflect"
- "time"
-
- "github.com/go-redis/redis/v8/internal/util"
-)
-
-// Scan parses bytes `b` to `v` with appropriate type.
-//nolint:gocyclo
-func Scan(b []byte, v interface{}) error {
- switch v := v.(type) {
- case nil:
- return fmt.Errorf("redis: Scan(nil)")
- case *string:
- *v = util.BytesToString(b)
- return nil
- case *[]byte:
- *v = b
- return nil
- case *int:
- var err error
- *v, err = util.Atoi(b)
- return err
- case *int8:
- n, err := util.ParseInt(b, 10, 8)
- if err != nil {
- return err
- }
- *v = int8(n)
- return nil
- case *int16:
- n, err := util.ParseInt(b, 10, 16)
- if err != nil {
- return err
- }
- *v = int16(n)
- return nil
- case *int32:
- n, err := util.ParseInt(b, 10, 32)
- if err != nil {
- return err
- }
- *v = int32(n)
- return nil
- case *int64:
- n, err := util.ParseInt(b, 10, 64)
- if err != nil {
- return err
- }
- *v = n
- return nil
- case *uint:
- n, err := util.ParseUint(b, 10, 64)
- if err != nil {
- return err
- }
- *v = uint(n)
- return nil
- case *uint8:
- n, err := util.ParseUint(b, 10, 8)
- if err != nil {
- return err
- }
- *v = uint8(n)
- return nil
- case *uint16:
- n, err := util.ParseUint(b, 10, 16)
- if err != nil {
- return err
- }
- *v = uint16(n)
- return nil
- case *uint32:
- n, err := util.ParseUint(b, 10, 32)
- if err != nil {
- return err
- }
- *v = uint32(n)
- return nil
- case *uint64:
- n, err := util.ParseUint(b, 10, 64)
- if err != nil {
- return err
- }
- *v = n
- return nil
- case *float32:
- n, err := util.ParseFloat(b, 32)
- if err != nil {
- return err
- }
- *v = float32(n)
- return err
- case *float64:
- var err error
- *v, err = util.ParseFloat(b, 64)
- return err
- case *bool:
- *v = len(b) == 1 && b[0] == '1'
- return nil
- case *time.Time:
- var err error
- *v, err = time.Parse(time.RFC3339Nano, util.BytesToString(b))
- return err
- case *time.Duration:
- n, err := util.ParseInt(b, 10, 64)
- if err != nil {
- return err
- }
- *v = time.Duration(n)
- return nil
- case encoding.BinaryUnmarshaler:
- return v.UnmarshalBinary(b)
- default:
- return fmt.Errorf(
- "redis: can't unmarshal %T (consider implementing BinaryUnmarshaler)", v)
- }
-}
-
-func ScanSlice(data []string, slice interface{}) error {
- v := reflect.ValueOf(slice)
- if !v.IsValid() {
- return fmt.Errorf("redis: ScanSlice(nil)")
- }
- if v.Kind() != reflect.Ptr {
- return fmt.Errorf("redis: ScanSlice(non-pointer %T)", slice)
- }
- v = v.Elem()
- if v.Kind() != reflect.Slice {
- return fmt.Errorf("redis: ScanSlice(non-slice %T)", slice)
- }
-
- next := makeSliceNextElemFunc(v)
- for i, s := range data {
- elem := next()
- if err := Scan([]byte(s), elem.Addr().Interface()); err != nil {
- err = fmt.Errorf("redis: ScanSlice index=%d value=%q failed: %w", i, s, err)
- return err
- }
- }
-
- return nil
-}
-
-func makeSliceNextElemFunc(v reflect.Value) func() reflect.Value {
- elemType := v.Type().Elem()
-
- if elemType.Kind() == reflect.Ptr {
- elemType = elemType.Elem()
- return func() reflect.Value {
- if v.Len() < v.Cap() {
- v.Set(v.Slice(0, v.Len()+1))
- elem := v.Index(v.Len() - 1)
- if elem.IsNil() {
- elem.Set(reflect.New(elemType))
- }
- return elem.Elem()
- }
-
- elem := reflect.New(elemType)
- v.Set(reflect.Append(v, elem))
- return elem.Elem()
- }
- }
-
- zero := reflect.Zero(elemType)
- return func() reflect.Value {
- if v.Len() < v.Cap() {
- v.Set(v.Slice(0, v.Len()+1))
- return v.Index(v.Len() - 1)
- }
-
- v.Set(reflect.Append(v, zero))
- return v.Index(v.Len() - 1)
- }
-}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/proto/writer.go b/vendor/github.com/go-redis/redis/v8/internal/proto/writer.go
deleted file mode 100644
index c4260981..00000000
--- a/vendor/github.com/go-redis/redis/v8/internal/proto/writer.go
+++ /dev/null
@@ -1,155 +0,0 @@
-package proto
-
-import (
- "encoding"
- "fmt"
- "io"
- "strconv"
- "time"
-
- "github.com/go-redis/redis/v8/internal/util"
-)
-
-type writer interface {
- io.Writer
- io.ByteWriter
- // io.StringWriter
- WriteString(s string) (n int, err error)
-}
-
-type Writer struct {
- writer
-
- lenBuf []byte
- numBuf []byte
-}
-
-func NewWriter(wr writer) *Writer {
- return &Writer{
- writer: wr,
-
- lenBuf: make([]byte, 64),
- numBuf: make([]byte, 64),
- }
-}
-
-func (w *Writer) WriteArgs(args []interface{}) error {
- if err := w.WriteByte(ArrayReply); err != nil {
- return err
- }
-
- if err := w.writeLen(len(args)); err != nil {
- return err
- }
-
- for _, arg := range args {
- if err := w.WriteArg(arg); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (w *Writer) writeLen(n int) error {
- w.lenBuf = strconv.AppendUint(w.lenBuf[:0], uint64(n), 10)
- w.lenBuf = append(w.lenBuf, '\r', '\n')
- _, err := w.Write(w.lenBuf)
- return err
-}
-
-func (w *Writer) WriteArg(v interface{}) error {
- switch v := v.(type) {
- case nil:
- return w.string("")
- case string:
- return w.string(v)
- case []byte:
- return w.bytes(v)
- case int:
- return w.int(int64(v))
- case int8:
- return w.int(int64(v))
- case int16:
- return w.int(int64(v))
- case int32:
- return w.int(int64(v))
- case int64:
- return w.int(v)
- case uint:
- return w.uint(uint64(v))
- case uint8:
- return w.uint(uint64(v))
- case uint16:
- return w.uint(uint64(v))
- case uint32:
- return w.uint(uint64(v))
- case uint64:
- return w.uint(v)
- case float32:
- return w.float(float64(v))
- case float64:
- return w.float(v)
- case bool:
- if v {
- return w.int(1)
- }
- return w.int(0)
- case time.Time:
- w.numBuf = v.AppendFormat(w.numBuf[:0], time.RFC3339Nano)
- return w.bytes(w.numBuf)
- case time.Duration:
- return w.int(v.Nanoseconds())
- case encoding.BinaryMarshaler:
- b, err := v.MarshalBinary()
- if err != nil {
- return err
- }
- return w.bytes(b)
- default:
- return fmt.Errorf(
- "redis: can't marshal %T (implement encoding.BinaryMarshaler)", v)
- }
-}
-
-func (w *Writer) bytes(b []byte) error {
- if err := w.WriteByte(StringReply); err != nil {
- return err
- }
-
- if err := w.writeLen(len(b)); err != nil {
- return err
- }
-
- if _, err := w.Write(b); err != nil {
- return err
- }
-
- return w.crlf()
-}
-
-func (w *Writer) string(s string) error {
- return w.bytes(util.StringToBytes(s))
-}
-
-func (w *Writer) uint(n uint64) error {
- w.numBuf = strconv.AppendUint(w.numBuf[:0], n, 10)
- return w.bytes(w.numBuf)
-}
-
-func (w *Writer) int(n int64) error {
- w.numBuf = strconv.AppendInt(w.numBuf[:0], n, 10)
- return w.bytes(w.numBuf)
-}
-
-func (w *Writer) float(f float64) error {
- w.numBuf = strconv.AppendFloat(w.numBuf[:0], f, 'f', -1, 64)
- return w.bytes(w.numBuf)
-}
-
-func (w *Writer) crlf() error {
- if err := w.WriteByte('\r'); err != nil {
- return err
- }
- return w.WriteByte('\n')
-}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/rand/rand.go b/vendor/github.com/go-redis/redis/v8/internal/rand/rand.go
deleted file mode 100644
index 2edccba9..00000000
--- a/vendor/github.com/go-redis/redis/v8/internal/rand/rand.go
+++ /dev/null
@@ -1,50 +0,0 @@
-package rand
-
-import (
- "math/rand"
- "sync"
-)
-
-// Int returns a non-negative pseudo-random int.
-func Int() int { return pseudo.Int() }
-
-// Intn returns, as an int, a non-negative pseudo-random number in [0,n).
-// It panics if n <= 0.
-func Intn(n int) int { return pseudo.Intn(n) }
-
-// Int63n returns, as an int64, a non-negative pseudo-random number in [0,n).
-// It panics if n <= 0.
-func Int63n(n int64) int64 { return pseudo.Int63n(n) }
-
-// Perm returns, as a slice of n ints, a pseudo-random permutation of the integers [0,n).
-func Perm(n int) []int { return pseudo.Perm(n) }
-
-// Seed uses the provided seed value to initialize the default Source to a
-// deterministic state. If Seed is not called, the generator behaves as if
-// seeded by Seed(1).
-func Seed(n int64) { pseudo.Seed(n) }
-
-var pseudo = rand.New(&source{src: rand.NewSource(1)})
-
-type source struct {
- src rand.Source
- mu sync.Mutex
-}
-
-func (s *source) Int63() int64 {
- s.mu.Lock()
- n := s.src.Int63()
- s.mu.Unlock()
- return n
-}
-
-func (s *source) Seed(seed int64) {
- s.mu.Lock()
- s.src.Seed(seed)
- s.mu.Unlock()
-}
-
-// Shuffle pseudo-randomizes the order of elements.
-// n is the number of elements.
-// swap swaps the elements with indexes i and j.
-func Shuffle(n int, swap func(i, j int)) { pseudo.Shuffle(n, swap) }
diff --git a/vendor/github.com/go-redis/redis/v8/internal/safe.go b/vendor/github.com/go-redis/redis/v8/internal/safe.go
deleted file mode 100644
index fd2f4340..00000000
--- a/vendor/github.com/go-redis/redis/v8/internal/safe.go
+++ /dev/null
@@ -1,12 +0,0 @@
-//go:build appengine
-// +build appengine
-
-package internal
-
-func String(b []byte) string {
- return string(b)
-}
-
-func Bytes(s string) []byte {
- return []byte(s)
-}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/unsafe.go b/vendor/github.com/go-redis/redis/v8/internal/unsafe.go
deleted file mode 100644
index 9f2e418f..00000000
--- a/vendor/github.com/go-redis/redis/v8/internal/unsafe.go
+++ /dev/null
@@ -1,21 +0,0 @@
-//go:build !appengine
-// +build !appengine
-
-package internal
-
-import "unsafe"
-
-// String converts byte slice to string.
-func String(b []byte) string {
- return *(*string)(unsafe.Pointer(&b))
-}
-
-// Bytes converts string to byte slice.
-func Bytes(s string) []byte {
- return *(*[]byte)(unsafe.Pointer(
- &struct {
- string
- Cap int
- }{s, len(s)},
- ))
-}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/util.go b/vendor/github.com/go-redis/redis/v8/internal/util.go
deleted file mode 100644
index e34a7f03..00000000
--- a/vendor/github.com/go-redis/redis/v8/internal/util.go
+++ /dev/null
@@ -1,46 +0,0 @@
-package internal
-
-import (
- "context"
- "time"
-
- "github.com/go-redis/redis/v8/internal/util"
-)
-
-func Sleep(ctx context.Context, dur time.Duration) error {
- t := time.NewTimer(dur)
- defer t.Stop()
-
- select {
- case <-t.C:
- return nil
- case <-ctx.Done():
- return ctx.Err()
- }
-}
-
-func ToLower(s string) string {
- if isLower(s) {
- return s
- }
-
- b := make([]byte, len(s))
- for i := range b {
- c := s[i]
- if c >= 'A' && c <= 'Z' {
- c += 'a' - 'A'
- }
- b[i] = c
- }
- return util.BytesToString(b)
-}
-
-func isLower(s string) bool {
- for i := 0; i < len(s); i++ {
- c := s[i]
- if c >= 'A' && c <= 'Z' {
- return false
- }
- }
- return true
-}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/util/safe.go b/vendor/github.com/go-redis/redis/v8/internal/util/safe.go
deleted file mode 100644
index 21307110..00000000
--- a/vendor/github.com/go-redis/redis/v8/internal/util/safe.go
+++ /dev/null
@@ -1,12 +0,0 @@
-//go:build appengine
-// +build appengine
-
-package util
-
-func BytesToString(b []byte) string {
- return string(b)
-}
-
-func StringToBytes(s string) []byte {
- return []byte(s)
-}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/util/strconv.go b/vendor/github.com/go-redis/redis/v8/internal/util/strconv.go
deleted file mode 100644
index db503380..00000000
--- a/vendor/github.com/go-redis/redis/v8/internal/util/strconv.go
+++ /dev/null
@@ -1,19 +0,0 @@
-package util
-
-import "strconv"
-
-func Atoi(b []byte) (int, error) {
- return strconv.Atoi(BytesToString(b))
-}
-
-func ParseInt(b []byte, base int, bitSize int) (int64, error) {
- return strconv.ParseInt(BytesToString(b), base, bitSize)
-}
-
-func ParseUint(b []byte, base int, bitSize int) (uint64, error) {
- return strconv.ParseUint(BytesToString(b), base, bitSize)
-}
-
-func ParseFloat(b []byte, bitSize int) (float64, error) {
- return strconv.ParseFloat(BytesToString(b), bitSize)
-}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/util/unsafe.go b/vendor/github.com/go-redis/redis/v8/internal/util/unsafe.go
deleted file mode 100644
index daa8d769..00000000
--- a/vendor/github.com/go-redis/redis/v8/internal/util/unsafe.go
+++ /dev/null
@@ -1,23 +0,0 @@
-//go:build !appengine
-// +build !appengine
-
-package util
-
-import (
- "unsafe"
-)
-
-// BytesToString converts byte slice to string.
-func BytesToString(b []byte) string {
- return *(*string)(unsafe.Pointer(&b))
-}
-
-// StringToBytes converts string to byte slice.
-func StringToBytes(s string) []byte {
- return *(*[]byte)(unsafe.Pointer(
- &struct {
- string
- Cap int
- }{s, len(s)},
- ))
-}
diff --git a/vendor/github.com/go-redis/redis/v8/iterator.go b/vendor/github.com/go-redis/redis/v8/iterator.go
deleted file mode 100644
index 2f8bc2be..00000000
--- a/vendor/github.com/go-redis/redis/v8/iterator.go
+++ /dev/null
@@ -1,77 +0,0 @@
-package redis
-
-import (
- "context"
- "sync"
-)
-
-// ScanIterator is used to incrementally iterate over a collection of elements.
-// It's safe for concurrent use by multiple goroutines.
-type ScanIterator struct {
- mu sync.Mutex // protects Scanner and pos
- cmd *ScanCmd
- pos int
-}
-
-// Err returns the last iterator error, if any.
-func (it *ScanIterator) Err() error {
- it.mu.Lock()
- err := it.cmd.Err()
- it.mu.Unlock()
- return err
-}
-
-// Next advances the cursor and returns true if more values can be read.
-func (it *ScanIterator) Next(ctx context.Context) bool {
- it.mu.Lock()
- defer it.mu.Unlock()
-
- // Instantly return on errors.
- if it.cmd.Err() != nil {
- return false
- }
-
- // Advance cursor, check if we are still within range.
- if it.pos < len(it.cmd.page) {
- it.pos++
- return true
- }
-
- for {
- // Return if there is no more data to fetch.
- if it.cmd.cursor == 0 {
- return false
- }
-
- // Fetch next page.
- switch it.cmd.args[0] {
- case "scan", "qscan":
- it.cmd.args[1] = it.cmd.cursor
- default:
- it.cmd.args[2] = it.cmd.cursor
- }
-
- err := it.cmd.process(ctx, it.cmd)
- if err != nil {
- return false
- }
-
- it.pos = 1
-
- // Redis can occasionally return empty page.
- if len(it.cmd.page) > 0 {
- return true
- }
- }
-}
-
-// Val returns the key/field at the current cursor position.
-func (it *ScanIterator) Val() string {
- var v string
- it.mu.Lock()
- if it.cmd.Err() == nil && it.pos > 0 && it.pos <= len(it.cmd.page) {
- v = it.cmd.page[it.pos-1]
- }
- it.mu.Unlock()
- return v
-}
diff --git a/vendor/github.com/go-redis/redis/v8/options.go b/vendor/github.com/go-redis/redis/v8/options.go
deleted file mode 100644
index a4abe32c..00000000
--- a/vendor/github.com/go-redis/redis/v8/options.go
+++ /dev/null
@@ -1,429 +0,0 @@
-package redis
-
-import (
- "context"
- "crypto/tls"
- "errors"
- "fmt"
- "net"
- "net/url"
- "runtime"
- "sort"
- "strconv"
- "strings"
- "time"
-
- "github.com/go-redis/redis/v8/internal/pool"
-)
-
-// Limiter is the interface of a rate limiter or a circuit breaker.
-type Limiter interface {
- // Allow returns nil if operation is allowed or an error otherwise.
- // If operation is allowed client must ReportResult of the operation
- // whether it is a success or a failure.
- Allow() error
- // ReportResult reports the result of the previously allowed operation.
- // nil indicates a success, non-nil error usually indicates a failure.
- ReportResult(result error)
-}
-
-// Options keeps the settings to setup redis connection.
-type Options struct {
- // The network type, either tcp or unix.
- // Default is tcp.
- Network string
- // host:port address.
- Addr string
-
- // Dialer creates new network connection and has priority over
- // Network and Addr options.
- Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
-
- // Hook that is called when new connection is established.
- OnConnect func(ctx context.Context, cn *Conn) error
-
- // Use the specified Username to authenticate the current connection
- // with one of the connections defined in the ACL list when connecting
- // to a Redis 6.0 instance, or greater, that is using the Redis ACL system.
- Username string
- // Optional password. Must match the password specified in the
- // requirepass server configuration option (if connecting to a Redis 5.0 instance, or lower),
- // or the User Password when connecting to a Redis 6.0 instance, or greater,
- // that is using the Redis ACL system.
- Password string
-
- // Database to be selected after connecting to the server.
- DB int
-
- // Maximum number of retries before giving up.
- // Default is 3 retries; -1 (not 0) disables retries.
- MaxRetries int
- // Minimum backoff between each retry.
- // Default is 8 milliseconds; -1 disables backoff.
- MinRetryBackoff time.Duration
- // Maximum backoff between each retry.
- // Default is 512 milliseconds; -1 disables backoff.
- MaxRetryBackoff time.Duration
-
- // Dial timeout for establishing new connections.
- // Default is 5 seconds.
- DialTimeout time.Duration
- // Timeout for socket reads. If reached, commands will fail
- // with a timeout instead of blocking. Use value -1 for no timeout and 0 for default.
- // Default is 3 seconds.
- ReadTimeout time.Duration
- // Timeout for socket writes. If reached, commands will fail
- // with a timeout instead of blocking.
- // Default is ReadTimeout.
- WriteTimeout time.Duration
-
- // Type of connection pool.
- // true for FIFO pool, false for LIFO pool.
- // Note that fifo has higher overhead compared to lifo.
- PoolFIFO bool
- // Maximum number of socket connections.
- // Default is 10 connections per every available CPU as reported by runtime.GOMAXPROCS.
- PoolSize int
- // Minimum number of idle connections which is useful when establishing
- // new connection is slow.
- MinIdleConns int
- // Connection age at which client retires (closes) the connection.
- // Default is to not close aged connections.
- MaxConnAge time.Duration
- // Amount of time client waits for connection if all connections
- // are busy before returning an error.
- // Default is ReadTimeout + 1 second.
- PoolTimeout time.Duration
- // Amount of time after which client closes idle connections.
- // Should be less than server's timeout.
- // Default is 5 minutes. -1 disables idle timeout check.
- IdleTimeout time.Duration
- // Frequency of idle checks made by idle connections reaper.
- // Default is 1 minute. -1 disables idle connections reaper,
- // but idle connections are still discarded by the client
- // if IdleTimeout is set.
- IdleCheckFrequency time.Duration
-
- // Enables read only queries on slave nodes.
- readOnly bool
-
- // TLS Config to use. When set TLS will be negotiated.
- TLSConfig *tls.Config
-
- // Limiter interface used to implemented circuit breaker or rate limiter.
- Limiter Limiter
-}
-
-func (opt *Options) init() {
- if opt.Addr == "" {
- opt.Addr = "localhost:6379"
- }
- if opt.Network == "" {
- if strings.HasPrefix(opt.Addr, "/") {
- opt.Network = "unix"
- } else {
- opt.Network = "tcp"
- }
- }
- if opt.DialTimeout == 0 {
- opt.DialTimeout = 5 * time.Second
- }
- if opt.Dialer == nil {
- opt.Dialer = func(ctx context.Context, network, addr string) (net.Conn, error) {
- netDialer := &net.Dialer{
- Timeout: opt.DialTimeout,
- KeepAlive: 5 * time.Minute,
- }
- if opt.TLSConfig == nil {
- return netDialer.DialContext(ctx, network, addr)
- }
- return tls.DialWithDialer(netDialer, network, addr, opt.TLSConfig)
- }
- }
- if opt.PoolSize == 0 {
- opt.PoolSize = 10 * runtime.GOMAXPROCS(0)
- }
- switch opt.ReadTimeout {
- case -1:
- opt.ReadTimeout = 0
- case 0:
- opt.ReadTimeout = 3 * time.Second
- }
- switch opt.WriteTimeout {
- case -1:
- opt.WriteTimeout = 0
- case 0:
- opt.WriteTimeout = opt.ReadTimeout
- }
- if opt.PoolTimeout == 0 {
- opt.PoolTimeout = opt.ReadTimeout + time.Second
- }
- if opt.IdleTimeout == 0 {
- opt.IdleTimeout = 5 * time.Minute
- }
- if opt.IdleCheckFrequency == 0 {
- opt.IdleCheckFrequency = time.Minute
- }
-
- if opt.MaxRetries == -1 {
- opt.MaxRetries = 0
- } else if opt.MaxRetries == 0 {
- opt.MaxRetries = 3
- }
- switch opt.MinRetryBackoff {
- case -1:
- opt.MinRetryBackoff = 0
- case 0:
- opt.MinRetryBackoff = 8 * time.Millisecond
- }
- switch opt.MaxRetryBackoff {
- case -1:
- opt.MaxRetryBackoff = 0
- case 0:
- opt.MaxRetryBackoff = 512 * time.Millisecond
- }
-}
-
-func (opt *Options) clone() *Options {
- clone := *opt
- return &clone
-}
-
-// ParseURL parses an URL into Options that can be used to connect to Redis.
-// Scheme is required.
-// There are two connection types: by tcp socket and by unix socket.
-// Tcp connection:
-// redis://:@:/
-// Unix connection:
-// unix://:@ ?db=
-// Most Option fields can be set using query parameters, with the following restrictions:
-// - field names are mapped using snake-case conversion: to set MaxRetries, use max_retries
-// - only scalar type fields are supported (bool, int, time.Duration)
-// - for time.Duration fields, values must be a valid input for time.ParseDuration();
-// additionally a plain integer as value (i.e. without unit) is intepreted as seconds
-// - to disable a duration field, use value less than or equal to 0; to use the default
-// value, leave the value blank or remove the parameter
-// - only the last value is interpreted if a parameter is given multiple times
-// - fields "network", "addr", "username" and "password" can only be set using other
-// URL attributes (scheme, host, userinfo, resp.), query paremeters using these
-// names will be treated as unknown parameters
-// - unknown parameter names will result in an error
-// Examples:
-// redis://user:password@localhost:6789/3?dial_timeout=3&db=1&read_timeout=6s&max_retries=2
-// is equivalent to:
-// &Options{
-// Network: "tcp",
-// Addr: "localhost:6789",
-// DB: 1, // path "/3" was overridden by "&db=1"
-// DialTimeout: 3 * time.Second, // no time unit = seconds
-// ReadTimeout: 6 * time.Second,
-// MaxRetries: 2,
-// }
-func ParseURL(redisURL string) (*Options, error) {
- u, err := url.Parse(redisURL)
- if err != nil {
- return nil, err
- }
-
- switch u.Scheme {
- case "redis", "rediss":
- return setupTCPConn(u)
- case "unix":
- return setupUnixConn(u)
- default:
- return nil, fmt.Errorf("redis: invalid URL scheme: %s", u.Scheme)
- }
-}
-
-func setupTCPConn(u *url.URL) (*Options, error) {
- o := &Options{Network: "tcp"}
-
- o.Username, o.Password = getUserPassword(u)
-
- h, p, err := net.SplitHostPort(u.Host)
- if err != nil {
- h = u.Host
- }
- if h == "" {
- h = "localhost"
- }
- if p == "" {
- p = "6379"
- }
- o.Addr = net.JoinHostPort(h, p)
-
- f := strings.FieldsFunc(u.Path, func(r rune) bool {
- return r == '/'
- })
- switch len(f) {
- case 0:
- o.DB = 0
- case 1:
- if o.DB, err = strconv.Atoi(f[0]); err != nil {
- return nil, fmt.Errorf("redis: invalid database number: %q", f[0])
- }
- default:
- return nil, fmt.Errorf("redis: invalid URL path: %s", u.Path)
- }
-
- if u.Scheme == "rediss" {
- o.TLSConfig = &tls.Config{ServerName: h}
- }
-
- return setupConnParams(u, o)
-}
-
-func setupUnixConn(u *url.URL) (*Options, error) {
- o := &Options{
- Network: "unix",
- }
-
- if strings.TrimSpace(u.Path) == "" { // path is required with unix connection
- return nil, errors.New("redis: empty unix socket path")
- }
- o.Addr = u.Path
- o.Username, o.Password = getUserPassword(u)
- return setupConnParams(u, o)
-}
-
-type queryOptions struct {
- q url.Values
- err error
-}
-
-func (o *queryOptions) string(name string) string {
- vs := o.q[name]
- if len(vs) == 0 {
- return ""
- }
- delete(o.q, name) // enable detection of unknown parameters
- return vs[len(vs)-1]
-}
-
-func (o *queryOptions) int(name string) int {
- s := o.string(name)
- if s == "" {
- return 0
- }
- i, err := strconv.Atoi(s)
- if err == nil {
- return i
- }
- if o.err == nil {
- o.err = fmt.Errorf("redis: invalid %s number: %s", name, err)
- }
- return 0
-}
-
-func (o *queryOptions) duration(name string) time.Duration {
- s := o.string(name)
- if s == "" {
- return 0
- }
- // try plain number first
- if i, err := strconv.Atoi(s); err == nil {
- if i <= 0 {
- // disable timeouts
- return -1
- }
- return time.Duration(i) * time.Second
- }
- dur, err := time.ParseDuration(s)
- if err == nil {
- return dur
- }
- if o.err == nil {
- o.err = fmt.Errorf("redis: invalid %s duration: %w", name, err)
- }
- return 0
-}
-
-func (o *queryOptions) bool(name string) bool {
- switch s := o.string(name); s {
- case "true", "1":
- return true
- case "false", "0", "":
- return false
- default:
- if o.err == nil {
- o.err = fmt.Errorf("redis: invalid %s boolean: expected true/false/1/0 or an empty string, got %q", name, s)
- }
- return false
- }
-}
-
-func (o *queryOptions) remaining() []string {
- if len(o.q) == 0 {
- return nil
- }
- keys := make([]string, 0, len(o.q))
- for k := range o.q {
- keys = append(keys, k)
- }
- sort.Strings(keys)
- return keys
-}
-
-// setupConnParams converts query parameters in u to option value in o.
-func setupConnParams(u *url.URL, o *Options) (*Options, error) {
- q := queryOptions{q: u.Query()}
-
- // compat: a future major release may use q.int("db")
- if tmp := q.string("db"); tmp != "" {
- db, err := strconv.Atoi(tmp)
- if err != nil {
- return nil, fmt.Errorf("redis: invalid database number: %w", err)
- }
- o.DB = db
- }
-
- o.MaxRetries = q.int("max_retries")
- o.MinRetryBackoff = q.duration("min_retry_backoff")
- o.MaxRetryBackoff = q.duration("max_retry_backoff")
- o.DialTimeout = q.duration("dial_timeout")
- o.ReadTimeout = q.duration("read_timeout")
- o.WriteTimeout = q.duration("write_timeout")
- o.PoolFIFO = q.bool("pool_fifo")
- o.PoolSize = q.int("pool_size")
- o.MinIdleConns = q.int("min_idle_conns")
- o.MaxConnAge = q.duration("max_conn_age")
- o.PoolTimeout = q.duration("pool_timeout")
- o.IdleTimeout = q.duration("idle_timeout")
- o.IdleCheckFrequency = q.duration("idle_check_frequency")
- if q.err != nil {
- return nil, q.err
- }
-
- // any parameters left?
- if r := q.remaining(); len(r) > 0 {
- return nil, fmt.Errorf("redis: unexpected option: %s", strings.Join(r, ", "))
- }
-
- return o, nil
-}
-
-func getUserPassword(u *url.URL) (string, string) {
- var user, password string
- if u.User != nil {
- user = u.User.Username()
- if p, ok := u.User.Password(); ok {
- password = p
- }
- }
- return user, password
-}
-
-func newConnPool(opt *Options) *pool.ConnPool {
- return pool.NewConnPool(&pool.Options{
- Dialer: func(ctx context.Context) (net.Conn, error) {
- return opt.Dialer(ctx, opt.Network, opt.Addr)
- },
- PoolFIFO: opt.PoolFIFO,
- PoolSize: opt.PoolSize,
- MinIdleConns: opt.MinIdleConns,
- MaxConnAge: opt.MaxConnAge,
- PoolTimeout: opt.PoolTimeout,
- IdleTimeout: opt.IdleTimeout,
- IdleCheckFrequency: opt.IdleCheckFrequency,
- })
-}
diff --git a/vendor/github.com/go-redis/redis/v8/package.json b/vendor/github.com/go-redis/redis/v8/package.json
deleted file mode 100644
index e4ea4bb0..00000000
--- a/vendor/github.com/go-redis/redis/v8/package.json
+++ /dev/null
@@ -1,8 +0,0 @@
-{
- "name": "redis",
- "version": "8.11.5",
- "main": "index.js",
- "repository": "git@github.com:go-redis/redis.git",
- "author": "Vladimir Mihailenco ",
- "license": "BSD-2-clause"
-}
diff --git a/vendor/github.com/go-redis/redis/v8/pipeline.go b/vendor/github.com/go-redis/redis/v8/pipeline.go
deleted file mode 100644
index 31bab971..00000000
--- a/vendor/github.com/go-redis/redis/v8/pipeline.go
+++ /dev/null
@@ -1,147 +0,0 @@
-package redis
-
-import (
- "context"
- "sync"
-
- "github.com/go-redis/redis/v8/internal/pool"
-)
-
-type pipelineExecer func(context.Context, []Cmder) error
-
-// Pipeliner is an mechanism to realise Redis Pipeline technique.
-//
-// Pipelining is a technique to extremely speed up processing by packing
-// operations to batches, send them at once to Redis and read a replies in a
-// singe step.
-// See https://redis.io/topics/pipelining
-//
-// Pay attention, that Pipeline is not a transaction, so you can get unexpected
-// results in case of big pipelines and small read/write timeouts.
-// Redis client has retransmission logic in case of timeouts, pipeline
-// can be retransmitted and commands can be executed more then once.
-// To avoid this: it is good idea to use reasonable bigger read/write timeouts
-// depends of your batch size and/or use TxPipeline.
-type Pipeliner interface {
- StatefulCmdable
- Len() int
- Do(ctx context.Context, args ...interface{}) *Cmd
- Process(ctx context.Context, cmd Cmder) error
- Close() error
- Discard() error
- Exec(ctx context.Context) ([]Cmder, error)
-}
-
-var _ Pipeliner = (*Pipeline)(nil)
-
-// Pipeline implements pipelining as described in
-// http://redis.io/topics/pipelining. It's safe for concurrent use
-// by multiple goroutines.
-type Pipeline struct {
- cmdable
- statefulCmdable
-
- ctx context.Context
- exec pipelineExecer
-
- mu sync.Mutex
- cmds []Cmder
- closed bool
-}
-
-func (c *Pipeline) init() {
- c.cmdable = c.Process
- c.statefulCmdable = c.Process
-}
-
-// Len returns the number of queued commands.
-func (c *Pipeline) Len() int {
- c.mu.Lock()
- ln := len(c.cmds)
- c.mu.Unlock()
- return ln
-}
-
-// Do queues the custom command for later execution.
-func (c *Pipeline) Do(ctx context.Context, args ...interface{}) *Cmd {
- cmd := NewCmd(ctx, args...)
- _ = c.Process(ctx, cmd)
- return cmd
-}
-
-// Process queues the cmd for later execution.
-func (c *Pipeline) Process(ctx context.Context, cmd Cmder) error {
- c.mu.Lock()
- c.cmds = append(c.cmds, cmd)
- c.mu.Unlock()
- return nil
-}
-
-// Close closes the pipeline, releasing any open resources.
-func (c *Pipeline) Close() error {
- c.mu.Lock()
- _ = c.discard()
- c.closed = true
- c.mu.Unlock()
- return nil
-}
-
-// Discard resets the pipeline and discards queued commands.
-func (c *Pipeline) Discard() error {
- c.mu.Lock()
- err := c.discard()
- c.mu.Unlock()
- return err
-}
-
-func (c *Pipeline) discard() error {
- if c.closed {
- return pool.ErrClosed
- }
- c.cmds = c.cmds[:0]
- return nil
-}
-
-// Exec executes all previously queued commands using one
-// client-server roundtrip.
-//
-// Exec always returns list of commands and error of the first failed
-// command if any.
-func (c *Pipeline) Exec(ctx context.Context) ([]Cmder, error) {
- c.mu.Lock()
- defer c.mu.Unlock()
-
- if c.closed {
- return nil, pool.ErrClosed
- }
-
- if len(c.cmds) == 0 {
- return nil, nil
- }
-
- cmds := c.cmds
- c.cmds = nil
-
- return cmds, c.exec(ctx, cmds)
-}
-
-func (c *Pipeline) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
- if err := fn(c); err != nil {
- return nil, err
- }
- cmds, err := c.Exec(ctx)
- _ = c.Close()
- return cmds, err
-}
-
-func (c *Pipeline) Pipeline() Pipeliner {
- return c
-}
-
-func (c *Pipeline) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
- return c.Pipelined(ctx, fn)
-}
-
-func (c *Pipeline) TxPipeline() Pipeliner {
- return c
-}
diff --git a/vendor/github.com/go-redis/redis/v8/pubsub.go b/vendor/github.com/go-redis/redis/v8/pubsub.go
deleted file mode 100644
index efc2354a..00000000
--- a/vendor/github.com/go-redis/redis/v8/pubsub.go
+++ /dev/null
@@ -1,668 +0,0 @@
-package redis
-
-import (
- "context"
- "fmt"
- "strings"
- "sync"
- "time"
-
- "github.com/go-redis/redis/v8/internal"
- "github.com/go-redis/redis/v8/internal/pool"
- "github.com/go-redis/redis/v8/internal/proto"
-)
-
-// PubSub implements Pub/Sub commands as described in
-// http://redis.io/topics/pubsub. Message receiving is NOT safe
-// for concurrent use by multiple goroutines.
-//
-// PubSub automatically reconnects to Redis Server and resubscribes
-// to the channels in case of network errors.
-type PubSub struct {
- opt *Options
-
- newConn func(ctx context.Context, channels []string) (*pool.Conn, error)
- closeConn func(*pool.Conn) error
-
- mu sync.Mutex
- cn *pool.Conn
- channels map[string]struct{}
- patterns map[string]struct{}
-
- closed bool
- exit chan struct{}
-
- cmd *Cmd
-
- chOnce sync.Once
- msgCh *channel
- allCh *channel
-}
-
-func (c *PubSub) init() {
- c.exit = make(chan struct{})
-}
-
-func (c *PubSub) String() string {
- channels := mapKeys(c.channels)
- channels = append(channels, mapKeys(c.patterns)...)
- return fmt.Sprintf("PubSub(%s)", strings.Join(channels, ", "))
-}
-
-func (c *PubSub) connWithLock(ctx context.Context) (*pool.Conn, error) {
- c.mu.Lock()
- cn, err := c.conn(ctx, nil)
- c.mu.Unlock()
- return cn, err
-}
-
-func (c *PubSub) conn(ctx context.Context, newChannels []string) (*pool.Conn, error) {
- if c.closed {
- return nil, pool.ErrClosed
- }
- if c.cn != nil {
- return c.cn, nil
- }
-
- channels := mapKeys(c.channels)
- channels = append(channels, newChannels...)
-
- cn, err := c.newConn(ctx, channels)
- if err != nil {
- return nil, err
- }
-
- if err := c.resubscribe(ctx, cn); err != nil {
- _ = c.closeConn(cn)
- return nil, err
- }
-
- c.cn = cn
- return cn, nil
-}
-
-func (c *PubSub) writeCmd(ctx context.Context, cn *pool.Conn, cmd Cmder) error {
- return cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
- return writeCmd(wr, cmd)
- })
-}
-
-func (c *PubSub) resubscribe(ctx context.Context, cn *pool.Conn) error {
- var firstErr error
-
- if len(c.channels) > 0 {
- firstErr = c._subscribe(ctx, cn, "subscribe", mapKeys(c.channels))
- }
-
- if len(c.patterns) > 0 {
- err := c._subscribe(ctx, cn, "psubscribe", mapKeys(c.patterns))
- if err != nil && firstErr == nil {
- firstErr = err
- }
- }
-
- return firstErr
-}
-
-func mapKeys(m map[string]struct{}) []string {
- s := make([]string, len(m))
- i := 0
- for k := range m {
- s[i] = k
- i++
- }
- return s
-}
-
-func (c *PubSub) _subscribe(
- ctx context.Context, cn *pool.Conn, redisCmd string, channels []string,
-) error {
- args := make([]interface{}, 0, 1+len(channels))
- args = append(args, redisCmd)
- for _, channel := range channels {
- args = append(args, channel)
- }
- cmd := NewSliceCmd(ctx, args...)
- return c.writeCmd(ctx, cn, cmd)
-}
-
-func (c *PubSub) releaseConnWithLock(
- ctx context.Context,
- cn *pool.Conn,
- err error,
- allowTimeout bool,
-) {
- c.mu.Lock()
- c.releaseConn(ctx, cn, err, allowTimeout)
- c.mu.Unlock()
-}
-
-func (c *PubSub) releaseConn(ctx context.Context, cn *pool.Conn, err error, allowTimeout bool) {
- if c.cn != cn {
- return
- }
- if isBadConn(err, allowTimeout, c.opt.Addr) {
- c.reconnect(ctx, err)
- }
-}
-
-func (c *PubSub) reconnect(ctx context.Context, reason error) {
- _ = c.closeTheCn(reason)
- _, _ = c.conn(ctx, nil)
-}
-
-func (c *PubSub) closeTheCn(reason error) error {
- if c.cn == nil {
- return nil
- }
- if !c.closed {
- internal.Logger.Printf(c.getContext(), "redis: discarding bad PubSub connection: %s", reason)
- }
- err := c.closeConn(c.cn)
- c.cn = nil
- return err
-}
-
-func (c *PubSub) Close() error {
- c.mu.Lock()
- defer c.mu.Unlock()
-
- if c.closed {
- return pool.ErrClosed
- }
- c.closed = true
- close(c.exit)
-
- return c.closeTheCn(pool.ErrClosed)
-}
-
-// Subscribe the client to the specified channels. It returns
-// empty subscription if there are no channels.
-func (c *PubSub) Subscribe(ctx context.Context, channels ...string) error {
- c.mu.Lock()
- defer c.mu.Unlock()
-
- err := c.subscribe(ctx, "subscribe", channels...)
- if c.channels == nil {
- c.channels = make(map[string]struct{})
- }
- for _, s := range channels {
- c.channels[s] = struct{}{}
- }
- return err
-}
-
-// PSubscribe the client to the given patterns. It returns
-// empty subscription if there are no patterns.
-func (c *PubSub) PSubscribe(ctx context.Context, patterns ...string) error {
- c.mu.Lock()
- defer c.mu.Unlock()
-
- err := c.subscribe(ctx, "psubscribe", patterns...)
- if c.patterns == nil {
- c.patterns = make(map[string]struct{})
- }
- for _, s := range patterns {
- c.patterns[s] = struct{}{}
- }
- return err
-}
-
-// Unsubscribe the client from the given channels, or from all of
-// them if none is given.
-func (c *PubSub) Unsubscribe(ctx context.Context, channels ...string) error {
- c.mu.Lock()
- defer c.mu.Unlock()
-
- for _, channel := range channels {
- delete(c.channels, channel)
- }
- err := c.subscribe(ctx, "unsubscribe", channels...)
- return err
-}
-
-// PUnsubscribe the client from the given patterns, or from all of
-// them if none is given.
-func (c *PubSub) PUnsubscribe(ctx context.Context, patterns ...string) error {
- c.mu.Lock()
- defer c.mu.Unlock()
-
- for _, pattern := range patterns {
- delete(c.patterns, pattern)
- }
- err := c.subscribe(ctx, "punsubscribe", patterns...)
- return err
-}
-
-func (c *PubSub) subscribe(ctx context.Context, redisCmd string, channels ...string) error {
- cn, err := c.conn(ctx, channels)
- if err != nil {
- return err
- }
-
- err = c._subscribe(ctx, cn, redisCmd, channels)
- c.releaseConn(ctx, cn, err, false)
- return err
-}
-
-func (c *PubSub) Ping(ctx context.Context, payload ...string) error {
- args := []interface{}{"ping"}
- if len(payload) == 1 {
- args = append(args, payload[0])
- }
- cmd := NewCmd(ctx, args...)
-
- c.mu.Lock()
- defer c.mu.Unlock()
-
- cn, err := c.conn(ctx, nil)
- if err != nil {
- return err
- }
-
- err = c.writeCmd(ctx, cn, cmd)
- c.releaseConn(ctx, cn, err, false)
- return err
-}
-
-// Subscription received after a successful subscription to channel.
-type Subscription struct {
- // Can be "subscribe", "unsubscribe", "psubscribe" or "punsubscribe".
- Kind string
- // Channel name we have subscribed to.
- Channel string
- // Number of channels we are currently subscribed to.
- Count int
-}
-
-func (m *Subscription) String() string {
- return fmt.Sprintf("%s: %s", m.Kind, m.Channel)
-}
-
-// Message received as result of a PUBLISH command issued by another client.
-type Message struct {
- Channel string
- Pattern string
- Payload string
- PayloadSlice []string
-}
-
-func (m *Message) String() string {
- return fmt.Sprintf("Message<%s: %s>", m.Channel, m.Payload)
-}
-
-// Pong received as result of a PING command issued by another client.
-type Pong struct {
- Payload string
-}
-
-func (p *Pong) String() string {
- if p.Payload != "" {
- return fmt.Sprintf("Pong<%s>", p.Payload)
- }
- return "Pong"
-}
-
-func (c *PubSub) newMessage(reply interface{}) (interface{}, error) {
- switch reply := reply.(type) {
- case string:
- return &Pong{
- Payload: reply,
- }, nil
- case []interface{}:
- switch kind := reply[0].(string); kind {
- case "subscribe", "unsubscribe", "psubscribe", "punsubscribe":
- // Can be nil in case of "unsubscribe".
- channel, _ := reply[1].(string)
- return &Subscription{
- Kind: kind,
- Channel: channel,
- Count: int(reply[2].(int64)),
- }, nil
- case "message":
- switch payload := reply[2].(type) {
- case string:
- return &Message{
- Channel: reply[1].(string),
- Payload: payload,
- }, nil
- case []interface{}:
- ss := make([]string, len(payload))
- for i, s := range payload {
- ss[i] = s.(string)
- }
- return &Message{
- Channel: reply[1].(string),
- PayloadSlice: ss,
- }, nil
- default:
- return nil, fmt.Errorf("redis: unsupported pubsub message payload: %T", payload)
- }
- case "pmessage":
- return &Message{
- Pattern: reply[1].(string),
- Channel: reply[2].(string),
- Payload: reply[3].(string),
- }, nil
- case "pong":
- return &Pong{
- Payload: reply[1].(string),
- }, nil
- default:
- return nil, fmt.Errorf("redis: unsupported pubsub message: %q", kind)
- }
- default:
- return nil, fmt.Errorf("redis: unsupported pubsub message: %#v", reply)
- }
-}
-
-// ReceiveTimeout acts like Receive but returns an error if message
-// is not received in time. This is low-level API and in most cases
-// Channel should be used instead.
-func (c *PubSub) ReceiveTimeout(ctx context.Context, timeout time.Duration) (interface{}, error) {
- if c.cmd == nil {
- c.cmd = NewCmd(ctx)
- }
-
- // Don't hold the lock to allow subscriptions and pings.
-
- cn, err := c.connWithLock(ctx)
- if err != nil {
- return nil, err
- }
-
- err = cn.WithReader(ctx, timeout, func(rd *proto.Reader) error {
- return c.cmd.readReply(rd)
- })
-
- c.releaseConnWithLock(ctx, cn, err, timeout > 0)
-
- if err != nil {
- return nil, err
- }
-
- return c.newMessage(c.cmd.Val())
-}
-
-// Receive returns a message as a Subscription, Message, Pong or error.
-// See PubSub example for details. This is low-level API and in most cases
-// Channel should be used instead.
-func (c *PubSub) Receive(ctx context.Context) (interface{}, error) {
- return c.ReceiveTimeout(ctx, 0)
-}
-
-// ReceiveMessage returns a Message or error ignoring Subscription and Pong
-// messages. This is low-level API and in most cases Channel should be used
-// instead.
-func (c *PubSub) ReceiveMessage(ctx context.Context) (*Message, error) {
- for {
- msg, err := c.Receive(ctx)
- if err != nil {
- return nil, err
- }
-
- switch msg := msg.(type) {
- case *Subscription:
- // Ignore.
- case *Pong:
- // Ignore.
- case *Message:
- return msg, nil
- default:
- err := fmt.Errorf("redis: unknown message: %T", msg)
- return nil, err
- }
- }
-}
-
-func (c *PubSub) getContext() context.Context {
- if c.cmd != nil {
- return c.cmd.ctx
- }
- return context.Background()
-}
-
-//------------------------------------------------------------------------------
-
-// Channel returns a Go channel for concurrently receiving messages.
-// The channel is closed together with the PubSub. If the Go channel
-// is blocked full for 30 seconds the message is dropped.
-// Receive* APIs can not be used after channel is created.
-//
-// go-redis periodically sends ping messages to test connection health
-// and re-subscribes if ping can not not received for 30 seconds.
-func (c *PubSub) Channel(opts ...ChannelOption) <-chan *Message {
- c.chOnce.Do(func() {
- c.msgCh = newChannel(c, opts...)
- c.msgCh.initMsgChan()
- })
- if c.msgCh == nil {
- err := fmt.Errorf("redis: Channel can't be called after ChannelWithSubscriptions")
- panic(err)
- }
- return c.msgCh.msgCh
-}
-
-// ChannelSize is like Channel, but creates a Go channel
-// with specified buffer size.
-//
-// Deprecated: use Channel(WithChannelSize(size)), remove in v9.
-func (c *PubSub) ChannelSize(size int) <-chan *Message {
- return c.Channel(WithChannelSize(size))
-}
-
-// ChannelWithSubscriptions is like Channel, but message type can be either
-// *Subscription or *Message. Subscription messages can be used to detect
-// reconnections.
-//
-// ChannelWithSubscriptions can not be used together with Channel or ChannelSize.
-func (c *PubSub) ChannelWithSubscriptions(_ context.Context, size int) <-chan interface{} {
- c.chOnce.Do(func() {
- c.allCh = newChannel(c, WithChannelSize(size))
- c.allCh.initAllChan()
- })
- if c.allCh == nil {
- err := fmt.Errorf("redis: ChannelWithSubscriptions can't be called after Channel")
- panic(err)
- }
- return c.allCh.allCh
-}
-
-type ChannelOption func(c *channel)
-
-// WithChannelSize specifies the Go chan size that is used to buffer incoming messages.
-//
-// The default is 100 messages.
-func WithChannelSize(size int) ChannelOption {
- return func(c *channel) {
- c.chanSize = size
- }
-}
-
-// WithChannelHealthCheckInterval specifies the health check interval.
-// PubSub will ping Redis Server if it does not receive any messages within the interval.
-// To disable health check, use zero interval.
-//
-// The default is 3 seconds.
-func WithChannelHealthCheckInterval(d time.Duration) ChannelOption {
- return func(c *channel) {
- c.checkInterval = d
- }
-}
-
-// WithChannelSendTimeout specifies the channel send timeout after which
-// the message is dropped.
-//
-// The default is 60 seconds.
-func WithChannelSendTimeout(d time.Duration) ChannelOption {
- return func(c *channel) {
- c.chanSendTimeout = d
- }
-}
-
-type channel struct {
- pubSub *PubSub
-
- msgCh chan *Message
- allCh chan interface{}
- ping chan struct{}
-
- chanSize int
- chanSendTimeout time.Duration
- checkInterval time.Duration
-}
-
-func newChannel(pubSub *PubSub, opts ...ChannelOption) *channel {
- c := &channel{
- pubSub: pubSub,
-
- chanSize: 100,
- chanSendTimeout: time.Minute,
- checkInterval: 3 * time.Second,
- }
- for _, opt := range opts {
- opt(c)
- }
- if c.checkInterval > 0 {
- c.initHealthCheck()
- }
- return c
-}
-
-func (c *channel) initHealthCheck() {
- ctx := context.TODO()
- c.ping = make(chan struct{}, 1)
-
- go func() {
- timer := time.NewTimer(time.Minute)
- timer.Stop()
-
- for {
- timer.Reset(c.checkInterval)
- select {
- case <-c.ping:
- if !timer.Stop() {
- <-timer.C
- }
- case <-timer.C:
- if pingErr := c.pubSub.Ping(ctx); pingErr != nil {
- c.pubSub.mu.Lock()
- c.pubSub.reconnect(ctx, pingErr)
- c.pubSub.mu.Unlock()
- }
- case <-c.pubSub.exit:
- return
- }
- }
- }()
-}
-
-// initMsgChan must be in sync with initAllChan.
-func (c *channel) initMsgChan() {
- ctx := context.TODO()
- c.msgCh = make(chan *Message, c.chanSize)
-
- go func() {
- timer := time.NewTimer(time.Minute)
- timer.Stop()
-
- var errCount int
- for {
- msg, err := c.pubSub.Receive(ctx)
- if err != nil {
- if err == pool.ErrClosed {
- close(c.msgCh)
- return
- }
- if errCount > 0 {
- time.Sleep(100 * time.Millisecond)
- }
- errCount++
- continue
- }
-
- errCount = 0
-
- // Any message is as good as a ping.
- select {
- case c.ping <- struct{}{}:
- default:
- }
-
- switch msg := msg.(type) {
- case *Subscription:
- // Ignore.
- case *Pong:
- // Ignore.
- case *Message:
- timer.Reset(c.chanSendTimeout)
- select {
- case c.msgCh <- msg:
- if !timer.Stop() {
- <-timer.C
- }
- case <-timer.C:
- internal.Logger.Printf(
- ctx, "redis: %s channel is full for %s (message is dropped)",
- c, c.chanSendTimeout)
- }
- default:
- internal.Logger.Printf(ctx, "redis: unknown message type: %T", msg)
- }
- }
- }()
-}
-
-// initAllChan must be in sync with initMsgChan.
-func (c *channel) initAllChan() {
- ctx := context.TODO()
- c.allCh = make(chan interface{}, c.chanSize)
-
- go func() {
- timer := time.NewTimer(time.Minute)
- timer.Stop()
-
- var errCount int
- for {
- msg, err := c.pubSub.Receive(ctx)
- if err != nil {
- if err == pool.ErrClosed {
- close(c.allCh)
- return
- }
- if errCount > 0 {
- time.Sleep(100 * time.Millisecond)
- }
- errCount++
- continue
- }
-
- errCount = 0
-
- // Any message is as good as a ping.
- select {
- case c.ping <- struct{}{}:
- default:
- }
-
- switch msg := msg.(type) {
- case *Pong:
- // Ignore.
- case *Subscription, *Message:
- timer.Reset(c.chanSendTimeout)
- select {
- case c.allCh <- msg:
- if !timer.Stop() {
- <-timer.C
- }
- case <-timer.C:
- internal.Logger.Printf(
- ctx, "redis: %s channel is full for %s (message is dropped)",
- c, c.chanSendTimeout)
- }
- default:
- internal.Logger.Printf(ctx, "redis: unknown message type: %T", msg)
- }
- }
- }()
-}
diff --git a/vendor/github.com/go-redis/redis/v8/redis.go b/vendor/github.com/go-redis/redis/v8/redis.go
deleted file mode 100644
index bcf8a2a9..00000000
--- a/vendor/github.com/go-redis/redis/v8/redis.go
+++ /dev/null
@@ -1,773 +0,0 @@
-package redis
-
-import (
- "context"
- "errors"
- "fmt"
- "sync/atomic"
- "time"
-
- "github.com/go-redis/redis/v8/internal"
- "github.com/go-redis/redis/v8/internal/pool"
- "github.com/go-redis/redis/v8/internal/proto"
-)
-
-// Nil reply returned by Redis when key does not exist.
-const Nil = proto.Nil
-
-func SetLogger(logger internal.Logging) {
- internal.Logger = logger
-}
-
-//------------------------------------------------------------------------------
-
-type Hook interface {
- BeforeProcess(ctx context.Context, cmd Cmder) (context.Context, error)
- AfterProcess(ctx context.Context, cmd Cmder) error
-
- BeforeProcessPipeline(ctx context.Context, cmds []Cmder) (context.Context, error)
- AfterProcessPipeline(ctx context.Context, cmds []Cmder) error
-}
-
-type hooks struct {
- hooks []Hook
-}
-
-func (hs *hooks) lock() {
- hs.hooks = hs.hooks[:len(hs.hooks):len(hs.hooks)]
-}
-
-func (hs hooks) clone() hooks {
- clone := hs
- clone.lock()
- return clone
-}
-
-func (hs *hooks) AddHook(hook Hook) {
- hs.hooks = append(hs.hooks, hook)
-}
-
-func (hs hooks) process(
- ctx context.Context, cmd Cmder, fn func(context.Context, Cmder) error,
-) error {
- if len(hs.hooks) == 0 {
- err := fn(ctx, cmd)
- cmd.SetErr(err)
- return err
- }
-
- var hookIndex int
- var retErr error
-
- for ; hookIndex < len(hs.hooks) && retErr == nil; hookIndex++ {
- ctx, retErr = hs.hooks[hookIndex].BeforeProcess(ctx, cmd)
- if retErr != nil {
- cmd.SetErr(retErr)
- }
- }
-
- if retErr == nil {
- retErr = fn(ctx, cmd)
- cmd.SetErr(retErr)
- }
-
- for hookIndex--; hookIndex >= 0; hookIndex-- {
- if err := hs.hooks[hookIndex].AfterProcess(ctx, cmd); err != nil {
- retErr = err
- cmd.SetErr(retErr)
- }
- }
-
- return retErr
-}
-
-func (hs hooks) processPipeline(
- ctx context.Context, cmds []Cmder, fn func(context.Context, []Cmder) error,
-) error {
- if len(hs.hooks) == 0 {
- err := fn(ctx, cmds)
- return err
- }
-
- var hookIndex int
- var retErr error
-
- for ; hookIndex < len(hs.hooks) && retErr == nil; hookIndex++ {
- ctx, retErr = hs.hooks[hookIndex].BeforeProcessPipeline(ctx, cmds)
- if retErr != nil {
- setCmdsErr(cmds, retErr)
- }
- }
-
- if retErr == nil {
- retErr = fn(ctx, cmds)
- }
-
- for hookIndex--; hookIndex >= 0; hookIndex-- {
- if err := hs.hooks[hookIndex].AfterProcessPipeline(ctx, cmds); err != nil {
- retErr = err
- setCmdsErr(cmds, retErr)
- }
- }
-
- return retErr
-}
-
-func (hs hooks) processTxPipeline(
- ctx context.Context, cmds []Cmder, fn func(context.Context, []Cmder) error,
-) error {
- cmds = wrapMultiExec(ctx, cmds)
- return hs.processPipeline(ctx, cmds, fn)
-}
-
-//------------------------------------------------------------------------------
-
-type baseClient struct {
- opt *Options
- connPool pool.Pooler
-
- onClose func() error // hook called when client is closed
-}
-
-func newBaseClient(opt *Options, connPool pool.Pooler) *baseClient {
- return &baseClient{
- opt: opt,
- connPool: connPool,
- }
-}
-
-func (c *baseClient) clone() *baseClient {
- clone := *c
- return &clone
-}
-
-func (c *baseClient) withTimeout(timeout time.Duration) *baseClient {
- opt := c.opt.clone()
- opt.ReadTimeout = timeout
- opt.WriteTimeout = timeout
-
- clone := c.clone()
- clone.opt = opt
-
- return clone
-}
-
-func (c *baseClient) String() string {
- return fmt.Sprintf("Redis<%s db:%d>", c.getAddr(), c.opt.DB)
-}
-
-func (c *baseClient) newConn(ctx context.Context) (*pool.Conn, error) {
- cn, err := c.connPool.NewConn(ctx)
- if err != nil {
- return nil, err
- }
-
- err = c.initConn(ctx, cn)
- if err != nil {
- _ = c.connPool.CloseConn(cn)
- return nil, err
- }
-
- return cn, nil
-}
-
-func (c *baseClient) getConn(ctx context.Context) (*pool.Conn, error) {
- if c.opt.Limiter != nil {
- err := c.opt.Limiter.Allow()
- if err != nil {
- return nil, err
- }
- }
-
- cn, err := c._getConn(ctx)
- if err != nil {
- if c.opt.Limiter != nil {
- c.opt.Limiter.ReportResult(err)
- }
- return nil, err
- }
-
- return cn, nil
-}
-
-func (c *baseClient) _getConn(ctx context.Context) (*pool.Conn, error) {
- cn, err := c.connPool.Get(ctx)
- if err != nil {
- return nil, err
- }
-
- if cn.Inited {
- return cn, nil
- }
-
- if err := c.initConn(ctx, cn); err != nil {
- c.connPool.Remove(ctx, cn, err)
- if err := errors.Unwrap(err); err != nil {
- return nil, err
- }
- return nil, err
- }
-
- return cn, nil
-}
-
-func (c *baseClient) initConn(ctx context.Context, cn *pool.Conn) error {
- if cn.Inited {
- return nil
- }
- cn.Inited = true
-
- if c.opt.Password == "" &&
- c.opt.DB == 0 &&
- !c.opt.readOnly &&
- c.opt.OnConnect == nil {
- return nil
- }
-
- connPool := pool.NewSingleConnPool(c.connPool, cn)
- conn := newConn(ctx, c.opt, connPool)
-
- _, err := conn.Pipelined(ctx, func(pipe Pipeliner) error {
- if c.opt.Password != "" {
- if c.opt.Username != "" {
- pipe.AuthACL(ctx, c.opt.Username, c.opt.Password)
- } else {
- pipe.Auth(ctx, c.opt.Password)
- }
- }
-
- if c.opt.DB > 0 {
- pipe.Select(ctx, c.opt.DB)
- }
-
- if c.opt.readOnly {
- pipe.ReadOnly(ctx)
- }
-
- return nil
- })
- if err != nil {
- return err
- }
-
- if c.opt.OnConnect != nil {
- return c.opt.OnConnect(ctx, conn)
- }
- return nil
-}
-
-func (c *baseClient) releaseConn(ctx context.Context, cn *pool.Conn, err error) {
- if c.opt.Limiter != nil {
- c.opt.Limiter.ReportResult(err)
- }
-
- if isBadConn(err, false, c.opt.Addr) {
- c.connPool.Remove(ctx, cn, err)
- } else {
- c.connPool.Put(ctx, cn)
- }
-}
-
-func (c *baseClient) withConn(
- ctx context.Context, fn func(context.Context, *pool.Conn) error,
-) error {
- cn, err := c.getConn(ctx)
- if err != nil {
- return err
- }
-
- defer func() {
- c.releaseConn(ctx, cn, err)
- }()
-
- done := ctx.Done() //nolint:ifshort
-
- if done == nil {
- err = fn(ctx, cn)
- return err
- }
-
- errc := make(chan error, 1)
- go func() { errc <- fn(ctx, cn) }()
-
- select {
- case <-done:
- _ = cn.Close()
- // Wait for the goroutine to finish and send something.
- <-errc
-
- err = ctx.Err()
- return err
- case err = <-errc:
- return err
- }
-}
-
-func (c *baseClient) process(ctx context.Context, cmd Cmder) error {
- var lastErr error
- for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ {
- attempt := attempt
-
- retry, err := c._process(ctx, cmd, attempt)
- if err == nil || !retry {
- return err
- }
-
- lastErr = err
- }
- return lastErr
-}
-
-func (c *baseClient) _process(ctx context.Context, cmd Cmder, attempt int) (bool, error) {
- if attempt > 0 {
- if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
- return false, err
- }
- }
-
- retryTimeout := uint32(1)
- err := c.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
- err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
- return writeCmd(wr, cmd)
- })
- if err != nil {
- return err
- }
-
- err = cn.WithReader(ctx, c.cmdTimeout(cmd), cmd.readReply)
- if err != nil {
- if cmd.readTimeout() == nil {
- atomic.StoreUint32(&retryTimeout, 1)
- }
- return err
- }
-
- return nil
- })
- if err == nil {
- return false, nil
- }
-
- retry := shouldRetry(err, atomic.LoadUint32(&retryTimeout) == 1)
- return retry, err
-}
-
-func (c *baseClient) retryBackoff(attempt int) time.Duration {
- return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff)
-}
-
-func (c *baseClient) cmdTimeout(cmd Cmder) time.Duration {
- if timeout := cmd.readTimeout(); timeout != nil {
- t := *timeout
- if t == 0 {
- return 0
- }
- return t + 10*time.Second
- }
- return c.opt.ReadTimeout
-}
-
-// Close closes the client, releasing any open resources.
-//
-// It is rare to Close a Client, as the Client is meant to be
-// long-lived and shared between many goroutines.
-func (c *baseClient) Close() error {
- var firstErr error
- if c.onClose != nil {
- if err := c.onClose(); err != nil {
- firstErr = err
- }
- }
- if err := c.connPool.Close(); err != nil && firstErr == nil {
- firstErr = err
- }
- return firstErr
-}
-
-func (c *baseClient) getAddr() string {
- return c.opt.Addr
-}
-
-func (c *baseClient) processPipeline(ctx context.Context, cmds []Cmder) error {
- return c.generalProcessPipeline(ctx, cmds, c.pipelineProcessCmds)
-}
-
-func (c *baseClient) processTxPipeline(ctx context.Context, cmds []Cmder) error {
- return c.generalProcessPipeline(ctx, cmds, c.txPipelineProcessCmds)
-}
-
-type pipelineProcessor func(context.Context, *pool.Conn, []Cmder) (bool, error)
-
-func (c *baseClient) generalProcessPipeline(
- ctx context.Context, cmds []Cmder, p pipelineProcessor,
-) error {
- err := c._generalProcessPipeline(ctx, cmds, p)
- if err != nil {
- setCmdsErr(cmds, err)
- return err
- }
- return cmdsFirstErr(cmds)
-}
-
-func (c *baseClient) _generalProcessPipeline(
- ctx context.Context, cmds []Cmder, p pipelineProcessor,
-) error {
- var lastErr error
- for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ {
- if attempt > 0 {
- if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
- return err
- }
- }
-
- var canRetry bool
- lastErr = c.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
- var err error
- canRetry, err = p(ctx, cn, cmds)
- return err
- })
- if lastErr == nil || !canRetry || !shouldRetry(lastErr, true) {
- return lastErr
- }
- }
- return lastErr
-}
-
-func (c *baseClient) pipelineProcessCmds(
- ctx context.Context, cn *pool.Conn, cmds []Cmder,
-) (bool, error) {
- err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
- return writeCmds(wr, cmds)
- })
- if err != nil {
- return true, err
- }
-
- err = cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error {
- return pipelineReadCmds(rd, cmds)
- })
- return true, err
-}
-
-func pipelineReadCmds(rd *proto.Reader, cmds []Cmder) error {
- for _, cmd := range cmds {
- err := cmd.readReply(rd)
- cmd.SetErr(err)
- if err != nil && !isRedisError(err) {
- return err
- }
- }
- return nil
-}
-
-func (c *baseClient) txPipelineProcessCmds(
- ctx context.Context, cn *pool.Conn, cmds []Cmder,
-) (bool, error) {
- err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
- return writeCmds(wr, cmds)
- })
- if err != nil {
- return true, err
- }
-
- err = cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error {
- statusCmd := cmds[0].(*StatusCmd)
- // Trim multi and exec.
- cmds = cmds[1 : len(cmds)-1]
-
- err := txPipelineReadQueued(rd, statusCmd, cmds)
- if err != nil {
- return err
- }
-
- return pipelineReadCmds(rd, cmds)
- })
- return false, err
-}
-
-func wrapMultiExec(ctx context.Context, cmds []Cmder) []Cmder {
- if len(cmds) == 0 {
- panic("not reached")
- }
- cmdCopy := make([]Cmder, len(cmds)+2)
- cmdCopy[0] = NewStatusCmd(ctx, "multi")
- copy(cmdCopy[1:], cmds)
- cmdCopy[len(cmdCopy)-1] = NewSliceCmd(ctx, "exec")
- return cmdCopy
-}
-
-func txPipelineReadQueued(rd *proto.Reader, statusCmd *StatusCmd, cmds []Cmder) error {
- // Parse queued replies.
- if err := statusCmd.readReply(rd); err != nil {
- return err
- }
-
- for range cmds {
- if err := statusCmd.readReply(rd); err != nil && !isRedisError(err) {
- return err
- }
- }
-
- // Parse number of replies.
- line, err := rd.ReadLine()
- if err != nil {
- if err == Nil {
- err = TxFailedErr
- }
- return err
- }
-
- switch line[0] {
- case proto.ErrorReply:
- return proto.ParseErrorReply(line)
- case proto.ArrayReply:
- // ok
- default:
- err := fmt.Errorf("redis: expected '*', but got line %q", line)
- return err
- }
-
- return nil
-}
-
-//------------------------------------------------------------------------------
-
-// Client is a Redis client representing a pool of zero or more
-// underlying connections. It's safe for concurrent use by multiple
-// goroutines.
-type Client struct {
- *baseClient
- cmdable
- hooks
- ctx context.Context
-}
-
-// NewClient returns a client to the Redis Server specified by Options.
-func NewClient(opt *Options) *Client {
- opt.init()
-
- c := Client{
- baseClient: newBaseClient(opt, newConnPool(opt)),
- ctx: context.Background(),
- }
- c.cmdable = c.Process
-
- return &c
-}
-
-func (c *Client) clone() *Client {
- clone := *c
- clone.cmdable = clone.Process
- clone.hooks.lock()
- return &clone
-}
-
-func (c *Client) WithTimeout(timeout time.Duration) *Client {
- clone := c.clone()
- clone.baseClient = c.baseClient.withTimeout(timeout)
- return clone
-}
-
-func (c *Client) Context() context.Context {
- return c.ctx
-}
-
-func (c *Client) WithContext(ctx context.Context) *Client {
- if ctx == nil {
- panic("nil context")
- }
- clone := c.clone()
- clone.ctx = ctx
- return clone
-}
-
-func (c *Client) Conn(ctx context.Context) *Conn {
- return newConn(ctx, c.opt, pool.NewStickyConnPool(c.connPool))
-}
-
-// Do creates a Cmd from the args and processes the cmd.
-func (c *Client) Do(ctx context.Context, args ...interface{}) *Cmd {
- cmd := NewCmd(ctx, args...)
- _ = c.Process(ctx, cmd)
- return cmd
-}
-
-func (c *Client) Process(ctx context.Context, cmd Cmder) error {
- return c.hooks.process(ctx, cmd, c.baseClient.process)
-}
-
-func (c *Client) processPipeline(ctx context.Context, cmds []Cmder) error {
- return c.hooks.processPipeline(ctx, cmds, c.baseClient.processPipeline)
-}
-
-func (c *Client) processTxPipeline(ctx context.Context, cmds []Cmder) error {
- return c.hooks.processTxPipeline(ctx, cmds, c.baseClient.processTxPipeline)
-}
-
-// Options returns read-only Options that were used to create the client.
-func (c *Client) Options() *Options {
- return c.opt
-}
-
-type PoolStats pool.Stats
-
-// PoolStats returns connection pool stats.
-func (c *Client) PoolStats() *PoolStats {
- stats := c.connPool.Stats()
- return (*PoolStats)(stats)
-}
-
-func (c *Client) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
- return c.Pipeline().Pipelined(ctx, fn)
-}
-
-func (c *Client) Pipeline() Pipeliner {
- pipe := Pipeline{
- ctx: c.ctx,
- exec: c.processPipeline,
- }
- pipe.init()
- return &pipe
-}
-
-func (c *Client) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
- return c.TxPipeline().Pipelined(ctx, fn)
-}
-
-// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC.
-func (c *Client) TxPipeline() Pipeliner {
- pipe := Pipeline{
- ctx: c.ctx,
- exec: c.processTxPipeline,
- }
- pipe.init()
- return &pipe
-}
-
-func (c *Client) pubSub() *PubSub {
- pubsub := &PubSub{
- opt: c.opt,
-
- newConn: func(ctx context.Context, channels []string) (*pool.Conn, error) {
- return c.newConn(ctx)
- },
- closeConn: c.connPool.CloseConn,
- }
- pubsub.init()
- return pubsub
-}
-
-// Subscribe subscribes the client to the specified channels.
-// Channels can be omitted to create empty subscription.
-// Note that this method does not wait on a response from Redis, so the
-// subscription may not be active immediately. To force the connection to wait,
-// you may call the Receive() method on the returned *PubSub like so:
-//
-// sub := client.Subscribe(queryResp)
-// iface, err := sub.Receive()
-// if err != nil {
-// // handle error
-// }
-//
-// // Should be *Subscription, but others are possible if other actions have been
-// // taken on sub since it was created.
-// switch iface.(type) {
-// case *Subscription:
-// // subscribe succeeded
-// case *Message:
-// // received first message
-// case *Pong:
-// // pong received
-// default:
-// // handle error
-// }
-//
-// ch := sub.Channel()
-func (c *Client) Subscribe(ctx context.Context, channels ...string) *PubSub {
- pubsub := c.pubSub()
- if len(channels) > 0 {
- _ = pubsub.Subscribe(ctx, channels...)
- }
- return pubsub
-}
-
-// PSubscribe subscribes the client to the given patterns.
-// Patterns can be omitted to create empty subscription.
-func (c *Client) PSubscribe(ctx context.Context, channels ...string) *PubSub {
- pubsub := c.pubSub()
- if len(channels) > 0 {
- _ = pubsub.PSubscribe(ctx, channels...)
- }
- return pubsub
-}
-
-//------------------------------------------------------------------------------
-
-type conn struct {
- baseClient
- cmdable
- statefulCmdable
- hooks // TODO: inherit hooks
-}
-
-// Conn represents a single Redis connection rather than a pool of connections.
-// Prefer running commands from Client unless there is a specific need
-// for a continuous single Redis connection.
-type Conn struct {
- *conn
- ctx context.Context
-}
-
-func newConn(ctx context.Context, opt *Options, connPool pool.Pooler) *Conn {
- c := Conn{
- conn: &conn{
- baseClient: baseClient{
- opt: opt,
- connPool: connPool,
- },
- },
- ctx: ctx,
- }
- c.cmdable = c.Process
- c.statefulCmdable = c.Process
- return &c
-}
-
-func (c *Conn) Process(ctx context.Context, cmd Cmder) error {
- return c.hooks.process(ctx, cmd, c.baseClient.process)
-}
-
-func (c *Conn) processPipeline(ctx context.Context, cmds []Cmder) error {
- return c.hooks.processPipeline(ctx, cmds, c.baseClient.processPipeline)
-}
-
-func (c *Conn) processTxPipeline(ctx context.Context, cmds []Cmder) error {
- return c.hooks.processTxPipeline(ctx, cmds, c.baseClient.processTxPipeline)
-}
-
-func (c *Conn) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
- return c.Pipeline().Pipelined(ctx, fn)
-}
-
-func (c *Conn) Pipeline() Pipeliner {
- pipe := Pipeline{
- ctx: c.ctx,
- exec: c.processPipeline,
- }
- pipe.init()
- return &pipe
-}
-
-func (c *Conn) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
- return c.TxPipeline().Pipelined(ctx, fn)
-}
-
-// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC.
-func (c *Conn) TxPipeline() Pipeliner {
- pipe := Pipeline{
- ctx: c.ctx,
- exec: c.processTxPipeline,
- }
- pipe.init()
- return &pipe
-}
diff --git a/vendor/github.com/go-redis/redis/v8/result.go b/vendor/github.com/go-redis/redis/v8/result.go
deleted file mode 100644
index 24cfd499..00000000
--- a/vendor/github.com/go-redis/redis/v8/result.go
+++ /dev/null
@@ -1,180 +0,0 @@
-package redis
-
-import "time"
-
-// NewCmdResult returns a Cmd initialised with val and err for testing.
-func NewCmdResult(val interface{}, err error) *Cmd {
- var cmd Cmd
- cmd.val = val
- cmd.SetErr(err)
- return &cmd
-}
-
-// NewSliceResult returns a SliceCmd initialised with val and err for testing.
-func NewSliceResult(val []interface{}, err error) *SliceCmd {
- var cmd SliceCmd
- cmd.val = val
- cmd.SetErr(err)
- return &cmd
-}
-
-// NewStatusResult returns a StatusCmd initialised with val and err for testing.
-func NewStatusResult(val string, err error) *StatusCmd {
- var cmd StatusCmd
- cmd.val = val
- cmd.SetErr(err)
- return &cmd
-}
-
-// NewIntResult returns an IntCmd initialised with val and err for testing.
-func NewIntResult(val int64, err error) *IntCmd {
- var cmd IntCmd
- cmd.val = val
- cmd.SetErr(err)
- return &cmd
-}
-
-// NewDurationResult returns a DurationCmd initialised with val and err for testing.
-func NewDurationResult(val time.Duration, err error) *DurationCmd {
- var cmd DurationCmd
- cmd.val = val
- cmd.SetErr(err)
- return &cmd
-}
-
-// NewBoolResult returns a BoolCmd initialised with val and err for testing.
-func NewBoolResult(val bool, err error) *BoolCmd {
- var cmd BoolCmd
- cmd.val = val
- cmd.SetErr(err)
- return &cmd
-}
-
-// NewStringResult returns a StringCmd initialised with val and err for testing.
-func NewStringResult(val string, err error) *StringCmd {
- var cmd StringCmd
- cmd.val = val
- cmd.SetErr(err)
- return &cmd
-}
-
-// NewFloatResult returns a FloatCmd initialised with val and err for testing.
-func NewFloatResult(val float64, err error) *FloatCmd {
- var cmd FloatCmd
- cmd.val = val
- cmd.SetErr(err)
- return &cmd
-}
-
-// NewStringSliceResult returns a StringSliceCmd initialised with val and err for testing.
-func NewStringSliceResult(val []string, err error) *StringSliceCmd {
- var cmd StringSliceCmd
- cmd.val = val
- cmd.SetErr(err)
- return &cmd
-}
-
-// NewBoolSliceResult returns a BoolSliceCmd initialised with val and err for testing.
-func NewBoolSliceResult(val []bool, err error) *BoolSliceCmd {
- var cmd BoolSliceCmd
- cmd.val = val
- cmd.SetErr(err)
- return &cmd
-}
-
-// NewStringStringMapResult returns a StringStringMapCmd initialised with val and err for testing.
-func NewStringStringMapResult(val map[string]string, err error) *StringStringMapCmd {
- var cmd StringStringMapCmd
- cmd.val = val
- cmd.SetErr(err)
- return &cmd
-}
-
-// NewStringIntMapCmdResult returns a StringIntMapCmd initialised with val and err for testing.
-func NewStringIntMapCmdResult(val map[string]int64, err error) *StringIntMapCmd {
- var cmd StringIntMapCmd
- cmd.val = val
- cmd.SetErr(err)
- return &cmd
-}
-
-// NewTimeCmdResult returns a TimeCmd initialised with val and err for testing.
-func NewTimeCmdResult(val time.Time, err error) *TimeCmd {
- var cmd TimeCmd
- cmd.val = val
- cmd.SetErr(err)
- return &cmd
-}
-
-// NewZSliceCmdResult returns a ZSliceCmd initialised with val and err for testing.
-func NewZSliceCmdResult(val []Z, err error) *ZSliceCmd {
- var cmd ZSliceCmd
- cmd.val = val
- cmd.SetErr(err)
- return &cmd
-}
-
-// NewZWithKeyCmdResult returns a NewZWithKeyCmd initialised with val and err for testing.
-func NewZWithKeyCmdResult(val *ZWithKey, err error) *ZWithKeyCmd {
- var cmd ZWithKeyCmd
- cmd.val = val
- cmd.SetErr(err)
- return &cmd
-}
-
-// NewScanCmdResult returns a ScanCmd initialised with val and err for testing.
-func NewScanCmdResult(keys []string, cursor uint64, err error) *ScanCmd {
- var cmd ScanCmd
- cmd.page = keys
- cmd.cursor = cursor
- cmd.SetErr(err)
- return &cmd
-}
-
-// NewClusterSlotsCmdResult returns a ClusterSlotsCmd initialised with val and err for testing.
-func NewClusterSlotsCmdResult(val []ClusterSlot, err error) *ClusterSlotsCmd {
- var cmd ClusterSlotsCmd
- cmd.val = val
- cmd.SetErr(err)
- return &cmd
-}
-
-// NewGeoLocationCmdResult returns a GeoLocationCmd initialised with val and err for testing.
-func NewGeoLocationCmdResult(val []GeoLocation, err error) *GeoLocationCmd {
- var cmd GeoLocationCmd
- cmd.locations = val
- cmd.SetErr(err)
- return &cmd
-}
-
-// NewGeoPosCmdResult returns a GeoPosCmd initialised with val and err for testing.
-func NewGeoPosCmdResult(val []*GeoPos, err error) *GeoPosCmd {
- var cmd GeoPosCmd
- cmd.val = val
- cmd.SetErr(err)
- return &cmd
-}
-
-// NewCommandsInfoCmdResult returns a CommandsInfoCmd initialised with val and err for testing.
-func NewCommandsInfoCmdResult(val map[string]*CommandInfo, err error) *CommandsInfoCmd {
- var cmd CommandsInfoCmd
- cmd.val = val
- cmd.SetErr(err)
- return &cmd
-}
-
-// NewXMessageSliceCmdResult returns a XMessageSliceCmd initialised with val and err for testing.
-func NewXMessageSliceCmdResult(val []XMessage, err error) *XMessageSliceCmd {
- var cmd XMessageSliceCmd
- cmd.val = val
- cmd.SetErr(err)
- return &cmd
-}
-
-// NewXStreamSliceCmdResult returns a XStreamSliceCmd initialised with val and err for testing.
-func NewXStreamSliceCmdResult(val []XStream, err error) *XStreamSliceCmd {
- var cmd XStreamSliceCmd
- cmd.val = val
- cmd.SetErr(err)
- return &cmd
-}
diff --git a/vendor/github.com/go-redis/redis/v8/ring.go b/vendor/github.com/go-redis/redis/v8/ring.go
deleted file mode 100644
index 4df00fc8..00000000
--- a/vendor/github.com/go-redis/redis/v8/ring.go
+++ /dev/null
@@ -1,736 +0,0 @@
-package redis
-
-import (
- "context"
- "crypto/tls"
- "errors"
- "fmt"
- "net"
- "strconv"
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/cespare/xxhash/v2"
- rendezvous "github.com/dgryski/go-rendezvous" //nolint
-
- "github.com/go-redis/redis/v8/internal"
- "github.com/go-redis/redis/v8/internal/hashtag"
- "github.com/go-redis/redis/v8/internal/pool"
- "github.com/go-redis/redis/v8/internal/rand"
-)
-
-var errRingShardsDown = errors.New("redis: all ring shards are down")
-
-//------------------------------------------------------------------------------
-
-type ConsistentHash interface {
- Get(string) string
-}
-
-type rendezvousWrapper struct {
- *rendezvous.Rendezvous
-}
-
-func (w rendezvousWrapper) Get(key string) string {
- return w.Lookup(key)
-}
-
-func newRendezvous(shards []string) ConsistentHash {
- return rendezvousWrapper{rendezvous.New(shards, xxhash.Sum64String)}
-}
-
-//------------------------------------------------------------------------------
-
-// RingOptions are used to configure a ring client and should be
-// passed to NewRing.
-type RingOptions struct {
- // Map of name => host:port addresses of ring shards.
- Addrs map[string]string
-
- // NewClient creates a shard client with provided name and options.
- NewClient func(name string, opt *Options) *Client
-
- // Frequency of PING commands sent to check shards availability.
- // Shard is considered down after 3 subsequent failed checks.
- HeartbeatFrequency time.Duration
-
- // NewConsistentHash returns a consistent hash that is used
- // to distribute keys across the shards.
- //
- // See https://medium.com/@dgryski/consistent-hashing-algorithmic-tradeoffs-ef6b8e2fcae8
- // for consistent hashing algorithmic tradeoffs.
- NewConsistentHash func(shards []string) ConsistentHash
-
- // Following options are copied from Options struct.
-
- Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
- OnConnect func(ctx context.Context, cn *Conn) error
-
- Username string
- Password string
- DB int
-
- MaxRetries int
- MinRetryBackoff time.Duration
- MaxRetryBackoff time.Duration
-
- DialTimeout time.Duration
- ReadTimeout time.Duration
- WriteTimeout time.Duration
-
- // PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default LIFO).
- PoolFIFO bool
-
- PoolSize int
- MinIdleConns int
- MaxConnAge time.Duration
- PoolTimeout time.Duration
- IdleTimeout time.Duration
- IdleCheckFrequency time.Duration
-
- TLSConfig *tls.Config
- Limiter Limiter
-}
-
-func (opt *RingOptions) init() {
- if opt.NewClient == nil {
- opt.NewClient = func(name string, opt *Options) *Client {
- return NewClient(opt)
- }
- }
-
- if opt.HeartbeatFrequency == 0 {
- opt.HeartbeatFrequency = 500 * time.Millisecond
- }
-
- if opt.NewConsistentHash == nil {
- opt.NewConsistentHash = newRendezvous
- }
-
- if opt.MaxRetries == -1 {
- opt.MaxRetries = 0
- } else if opt.MaxRetries == 0 {
- opt.MaxRetries = 3
- }
- switch opt.MinRetryBackoff {
- case -1:
- opt.MinRetryBackoff = 0
- case 0:
- opt.MinRetryBackoff = 8 * time.Millisecond
- }
- switch opt.MaxRetryBackoff {
- case -1:
- opt.MaxRetryBackoff = 0
- case 0:
- opt.MaxRetryBackoff = 512 * time.Millisecond
- }
-}
-
-func (opt *RingOptions) clientOptions() *Options {
- return &Options{
- Dialer: opt.Dialer,
- OnConnect: opt.OnConnect,
-
- Username: opt.Username,
- Password: opt.Password,
- DB: opt.DB,
-
- MaxRetries: -1,
-
- DialTimeout: opt.DialTimeout,
- ReadTimeout: opt.ReadTimeout,
- WriteTimeout: opt.WriteTimeout,
-
- PoolFIFO: opt.PoolFIFO,
- PoolSize: opt.PoolSize,
- MinIdleConns: opt.MinIdleConns,
- MaxConnAge: opt.MaxConnAge,
- PoolTimeout: opt.PoolTimeout,
- IdleTimeout: opt.IdleTimeout,
- IdleCheckFrequency: opt.IdleCheckFrequency,
-
- TLSConfig: opt.TLSConfig,
- Limiter: opt.Limiter,
- }
-}
-
-//------------------------------------------------------------------------------
-
-type ringShard struct {
- Client *Client
- down int32
-}
-
-func newRingShard(opt *RingOptions, name, addr string) *ringShard {
- clopt := opt.clientOptions()
- clopt.Addr = addr
-
- return &ringShard{
- Client: opt.NewClient(name, clopt),
- }
-}
-
-func (shard *ringShard) String() string {
- var state string
- if shard.IsUp() {
- state = "up"
- } else {
- state = "down"
- }
- return fmt.Sprintf("%s is %s", shard.Client, state)
-}
-
-func (shard *ringShard) IsDown() bool {
- const threshold = 3
- return atomic.LoadInt32(&shard.down) >= threshold
-}
-
-func (shard *ringShard) IsUp() bool {
- return !shard.IsDown()
-}
-
-// Vote votes to set shard state and returns true if state was changed.
-func (shard *ringShard) Vote(up bool) bool {
- if up {
- changed := shard.IsDown()
- atomic.StoreInt32(&shard.down, 0)
- return changed
- }
-
- if shard.IsDown() {
- return false
- }
-
- atomic.AddInt32(&shard.down, 1)
- return shard.IsDown()
-}
-
-//------------------------------------------------------------------------------
-
-type ringShards struct {
- opt *RingOptions
-
- mu sync.RWMutex
- hash ConsistentHash
- shards map[string]*ringShard // read only
- list []*ringShard // read only
- numShard int
- closed bool
-}
-
-func newRingShards(opt *RingOptions) *ringShards {
- shards := make(map[string]*ringShard, len(opt.Addrs))
- list := make([]*ringShard, 0, len(shards))
-
- for name, addr := range opt.Addrs {
- shard := newRingShard(opt, name, addr)
- shards[name] = shard
-
- list = append(list, shard)
- }
-
- c := &ringShards{
- opt: opt,
-
- shards: shards,
- list: list,
- }
- c.rebalance()
-
- return c
-}
-
-func (c *ringShards) List() []*ringShard {
- var list []*ringShard
-
- c.mu.RLock()
- if !c.closed {
- list = c.list
- }
- c.mu.RUnlock()
-
- return list
-}
-
-func (c *ringShards) Hash(key string) string {
- key = hashtag.Key(key)
-
- var hash string
-
- c.mu.RLock()
- if c.numShard > 0 {
- hash = c.hash.Get(key)
- }
- c.mu.RUnlock()
-
- return hash
-}
-
-func (c *ringShards) GetByKey(key string) (*ringShard, error) {
- key = hashtag.Key(key)
-
- c.mu.RLock()
-
- if c.closed {
- c.mu.RUnlock()
- return nil, pool.ErrClosed
- }
-
- if c.numShard == 0 {
- c.mu.RUnlock()
- return nil, errRingShardsDown
- }
-
- hash := c.hash.Get(key)
- if hash == "" {
- c.mu.RUnlock()
- return nil, errRingShardsDown
- }
-
- shard := c.shards[hash]
- c.mu.RUnlock()
-
- return shard, nil
-}
-
-func (c *ringShards) GetByName(shardName string) (*ringShard, error) {
- if shardName == "" {
- return c.Random()
- }
-
- c.mu.RLock()
- shard := c.shards[shardName]
- c.mu.RUnlock()
- return shard, nil
-}
-
-func (c *ringShards) Random() (*ringShard, error) {
- return c.GetByKey(strconv.Itoa(rand.Int()))
-}
-
-// heartbeat monitors state of each shard in the ring.
-func (c *ringShards) Heartbeat(frequency time.Duration) {
- ticker := time.NewTicker(frequency)
- defer ticker.Stop()
-
- ctx := context.Background()
- for range ticker.C {
- var rebalance bool
-
- for _, shard := range c.List() {
- err := shard.Client.Ping(ctx).Err()
- isUp := err == nil || err == pool.ErrPoolTimeout
- if shard.Vote(isUp) {
- internal.Logger.Printf(context.Background(), "ring shard state changed: %s", shard)
- rebalance = true
- }
- }
-
- if rebalance {
- c.rebalance()
- }
- }
-}
-
-// rebalance removes dead shards from the Ring.
-func (c *ringShards) rebalance() {
- c.mu.RLock()
- shards := c.shards
- c.mu.RUnlock()
-
- liveShards := make([]string, 0, len(shards))
-
- for name, shard := range shards {
- if shard.IsUp() {
- liveShards = append(liveShards, name)
- }
- }
-
- hash := c.opt.NewConsistentHash(liveShards)
-
- c.mu.Lock()
- c.hash = hash
- c.numShard = len(liveShards)
- c.mu.Unlock()
-}
-
-func (c *ringShards) Len() int {
- c.mu.RLock()
- l := c.numShard
- c.mu.RUnlock()
- return l
-}
-
-func (c *ringShards) Close() error {
- c.mu.Lock()
- defer c.mu.Unlock()
-
- if c.closed {
- return nil
- }
- c.closed = true
-
- var firstErr error
- for _, shard := range c.shards {
- if err := shard.Client.Close(); err != nil && firstErr == nil {
- firstErr = err
- }
- }
- c.hash = nil
- c.shards = nil
- c.list = nil
-
- return firstErr
-}
-
-//------------------------------------------------------------------------------
-
-type ring struct {
- opt *RingOptions
- shards *ringShards
- cmdsInfoCache *cmdsInfoCache //nolint:structcheck
-}
-
-// Ring is a Redis client that uses consistent hashing to distribute
-// keys across multiple Redis servers (shards). It's safe for
-// concurrent use by multiple goroutines.
-//
-// Ring monitors the state of each shard and removes dead shards from
-// the ring. When a shard comes online it is added back to the ring. This
-// gives you maximum availability and partition tolerance, but no
-// consistency between different shards or even clients. Each client
-// uses shards that are available to the client and does not do any
-// coordination when shard state is changed.
-//
-// Ring should be used when you need multiple Redis servers for caching
-// and can tolerate losing data when one of the servers dies.
-// Otherwise you should use Redis Cluster.
-type Ring struct {
- *ring
- cmdable
- hooks
- ctx context.Context
-}
-
-func NewRing(opt *RingOptions) *Ring {
- opt.init()
-
- ring := Ring{
- ring: &ring{
- opt: opt,
- shards: newRingShards(opt),
- },
- ctx: context.Background(),
- }
-
- ring.cmdsInfoCache = newCmdsInfoCache(ring.cmdsInfo)
- ring.cmdable = ring.Process
-
- go ring.shards.Heartbeat(opt.HeartbeatFrequency)
-
- return &ring
-}
-
-func (c *Ring) Context() context.Context {
- return c.ctx
-}
-
-func (c *Ring) WithContext(ctx context.Context) *Ring {
- if ctx == nil {
- panic("nil context")
- }
- clone := *c
- clone.cmdable = clone.Process
- clone.hooks.lock()
- clone.ctx = ctx
- return &clone
-}
-
-// Do creates a Cmd from the args and processes the cmd.
-func (c *Ring) Do(ctx context.Context, args ...interface{}) *Cmd {
- cmd := NewCmd(ctx, args...)
- _ = c.Process(ctx, cmd)
- return cmd
-}
-
-func (c *Ring) Process(ctx context.Context, cmd Cmder) error {
- return c.hooks.process(ctx, cmd, c.process)
-}
-
-// Options returns read-only Options that were used to create the client.
-func (c *Ring) Options() *RingOptions {
- return c.opt
-}
-
-func (c *Ring) retryBackoff(attempt int) time.Duration {
- return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff)
-}
-
-// PoolStats returns accumulated connection pool stats.
-func (c *Ring) PoolStats() *PoolStats {
- shards := c.shards.List()
- var acc PoolStats
- for _, shard := range shards {
- s := shard.Client.connPool.Stats()
- acc.Hits += s.Hits
- acc.Misses += s.Misses
- acc.Timeouts += s.Timeouts
- acc.TotalConns += s.TotalConns
- acc.IdleConns += s.IdleConns
- }
- return &acc
-}
-
-// Len returns the current number of shards in the ring.
-func (c *Ring) Len() int {
- return c.shards.Len()
-}
-
-// Subscribe subscribes the client to the specified channels.
-func (c *Ring) Subscribe(ctx context.Context, channels ...string) *PubSub {
- if len(channels) == 0 {
- panic("at least one channel is required")
- }
-
- shard, err := c.shards.GetByKey(channels[0])
- if err != nil {
- // TODO: return PubSub with sticky error
- panic(err)
- }
- return shard.Client.Subscribe(ctx, channels...)
-}
-
-// PSubscribe subscribes the client to the given patterns.
-func (c *Ring) PSubscribe(ctx context.Context, channels ...string) *PubSub {
- if len(channels) == 0 {
- panic("at least one channel is required")
- }
-
- shard, err := c.shards.GetByKey(channels[0])
- if err != nil {
- // TODO: return PubSub with sticky error
- panic(err)
- }
- return shard.Client.PSubscribe(ctx, channels...)
-}
-
-// ForEachShard concurrently calls the fn on each live shard in the ring.
-// It returns the first error if any.
-func (c *Ring) ForEachShard(
- ctx context.Context,
- fn func(ctx context.Context, client *Client) error,
-) error {
- shards := c.shards.List()
- var wg sync.WaitGroup
- errCh := make(chan error, 1)
- for _, shard := range shards {
- if shard.IsDown() {
- continue
- }
-
- wg.Add(1)
- go func(shard *ringShard) {
- defer wg.Done()
- err := fn(ctx, shard.Client)
- if err != nil {
- select {
- case errCh <- err:
- default:
- }
- }
- }(shard)
- }
- wg.Wait()
-
- select {
- case err := <-errCh:
- return err
- default:
- return nil
- }
-}
-
-func (c *Ring) cmdsInfo(ctx context.Context) (map[string]*CommandInfo, error) {
- shards := c.shards.List()
- var firstErr error
- for _, shard := range shards {
- cmdsInfo, err := shard.Client.Command(ctx).Result()
- if err == nil {
- return cmdsInfo, nil
- }
- if firstErr == nil {
- firstErr = err
- }
- }
- if firstErr == nil {
- return nil, errRingShardsDown
- }
- return nil, firstErr
-}
-
-func (c *Ring) cmdInfo(ctx context.Context, name string) *CommandInfo {
- cmdsInfo, err := c.cmdsInfoCache.Get(ctx)
- if err != nil {
- return nil
- }
- info := cmdsInfo[name]
- if info == nil {
- internal.Logger.Printf(ctx, "info for cmd=%s not found", name)
- }
- return info
-}
-
-func (c *Ring) cmdShard(ctx context.Context, cmd Cmder) (*ringShard, error) {
- cmdInfo := c.cmdInfo(ctx, cmd.Name())
- pos := cmdFirstKeyPos(cmd, cmdInfo)
- if pos == 0 {
- return c.shards.Random()
- }
- firstKey := cmd.stringArg(pos)
- return c.shards.GetByKey(firstKey)
-}
-
-func (c *Ring) process(ctx context.Context, cmd Cmder) error {
- var lastErr error
- for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ {
- if attempt > 0 {
- if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
- return err
- }
- }
-
- shard, err := c.cmdShard(ctx, cmd)
- if err != nil {
- return err
- }
-
- lastErr = shard.Client.Process(ctx, cmd)
- if lastErr == nil || !shouldRetry(lastErr, cmd.readTimeout() == nil) {
- return lastErr
- }
- }
- return lastErr
-}
-
-func (c *Ring) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
- return c.Pipeline().Pipelined(ctx, fn)
-}
-
-func (c *Ring) Pipeline() Pipeliner {
- pipe := Pipeline{
- ctx: c.ctx,
- exec: c.processPipeline,
- }
- pipe.init()
- return &pipe
-}
-
-func (c *Ring) processPipeline(ctx context.Context, cmds []Cmder) error {
- return c.hooks.processPipeline(ctx, cmds, func(ctx context.Context, cmds []Cmder) error {
- return c.generalProcessPipeline(ctx, cmds, false)
- })
-}
-
-func (c *Ring) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
- return c.TxPipeline().Pipelined(ctx, fn)
-}
-
-func (c *Ring) TxPipeline() Pipeliner {
- pipe := Pipeline{
- ctx: c.ctx,
- exec: c.processTxPipeline,
- }
- pipe.init()
- return &pipe
-}
-
-func (c *Ring) processTxPipeline(ctx context.Context, cmds []Cmder) error {
- return c.hooks.processPipeline(ctx, cmds, func(ctx context.Context, cmds []Cmder) error {
- return c.generalProcessPipeline(ctx, cmds, true)
- })
-}
-
-func (c *Ring) generalProcessPipeline(
- ctx context.Context, cmds []Cmder, tx bool,
-) error {
- cmdsMap := make(map[string][]Cmder)
- for _, cmd := range cmds {
- cmdInfo := c.cmdInfo(ctx, cmd.Name())
- hash := cmd.stringArg(cmdFirstKeyPos(cmd, cmdInfo))
- if hash != "" {
- hash = c.shards.Hash(hash)
- }
- cmdsMap[hash] = append(cmdsMap[hash], cmd)
- }
-
- var wg sync.WaitGroup
- for hash, cmds := range cmdsMap {
- wg.Add(1)
- go func(hash string, cmds []Cmder) {
- defer wg.Done()
-
- _ = c.processShardPipeline(ctx, hash, cmds, tx)
- }(hash, cmds)
- }
-
- wg.Wait()
- return cmdsFirstErr(cmds)
-}
-
-func (c *Ring) processShardPipeline(
- ctx context.Context, hash string, cmds []Cmder, tx bool,
-) error {
- // TODO: retry?
- shard, err := c.shards.GetByName(hash)
- if err != nil {
- setCmdsErr(cmds, err)
- return err
- }
-
- if tx {
- return shard.Client.processTxPipeline(ctx, cmds)
- }
- return shard.Client.processPipeline(ctx, cmds)
-}
-
-func (c *Ring) Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error {
- if len(keys) == 0 {
- return fmt.Errorf("redis: Watch requires at least one key")
- }
-
- var shards []*ringShard
- for _, key := range keys {
- if key != "" {
- shard, err := c.shards.GetByKey(hashtag.Key(key))
- if err != nil {
- return err
- }
-
- shards = append(shards, shard)
- }
- }
-
- if len(shards) == 0 {
- return fmt.Errorf("redis: Watch requires at least one shard")
- }
-
- if len(shards) > 1 {
- for _, shard := range shards[1:] {
- if shard.Client != shards[0].Client {
- err := fmt.Errorf("redis: Watch requires all keys to be in the same shard")
- return err
- }
- }
- }
-
- return shards[0].Client.Watch(ctx, fn, keys...)
-}
-
-// Close closes the ring client, releasing any open resources.
-//
-// It is rare to Close a Ring, as the Ring is meant to be long-lived
-// and shared between many goroutines.
-func (c *Ring) Close() error {
- return c.shards.Close()
-}
diff --git a/vendor/github.com/go-redis/redis/v8/script.go b/vendor/github.com/go-redis/redis/v8/script.go
deleted file mode 100644
index 5cab18d6..00000000
--- a/vendor/github.com/go-redis/redis/v8/script.go
+++ /dev/null
@@ -1,65 +0,0 @@
-package redis
-
-import (
- "context"
- "crypto/sha1"
- "encoding/hex"
- "io"
- "strings"
-)
-
-type Scripter interface {
- Eval(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd
- EvalSha(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd
- ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd
- ScriptLoad(ctx context.Context, script string) *StringCmd
-}
-
-var (
- _ Scripter = (*Client)(nil)
- _ Scripter = (*Ring)(nil)
- _ Scripter = (*ClusterClient)(nil)
-)
-
-type Script struct {
- src, hash string
-}
-
-func NewScript(src string) *Script {
- h := sha1.New()
- _, _ = io.WriteString(h, src)
- return &Script{
- src: src,
- hash: hex.EncodeToString(h.Sum(nil)),
- }
-}
-
-func (s *Script) Hash() string {
- return s.hash
-}
-
-func (s *Script) Load(ctx context.Context, c Scripter) *StringCmd {
- return c.ScriptLoad(ctx, s.src)
-}
-
-func (s *Script) Exists(ctx context.Context, c Scripter) *BoolSliceCmd {
- return c.ScriptExists(ctx, s.hash)
-}
-
-func (s *Script) Eval(ctx context.Context, c Scripter, keys []string, args ...interface{}) *Cmd {
- return c.Eval(ctx, s.src, keys, args...)
-}
-
-func (s *Script) EvalSha(ctx context.Context, c Scripter, keys []string, args ...interface{}) *Cmd {
- return c.EvalSha(ctx, s.hash, keys, args...)
-}
-
-// Run optimistically uses EVALSHA to run the script. If script does not exist
-// it is retried using EVAL.
-func (s *Script) Run(ctx context.Context, c Scripter, keys []string, args ...interface{}) *Cmd {
- r := s.EvalSha(ctx, c, keys, args...)
- if err := r.Err(); err != nil && strings.HasPrefix(err.Error(), "NOSCRIPT ") {
- return s.Eval(ctx, c, keys, args...)
- }
- return r
-}
diff --git a/vendor/github.com/go-redis/redis/v8/sentinel.go b/vendor/github.com/go-redis/redis/v8/sentinel.go
deleted file mode 100644
index ec6221dc..00000000
--- a/vendor/github.com/go-redis/redis/v8/sentinel.go
+++ /dev/null
@@ -1,796 +0,0 @@
-package redis
-
-import (
- "context"
- "crypto/tls"
- "errors"
- "net"
- "strings"
- "sync"
- "time"
-
- "github.com/go-redis/redis/v8/internal"
- "github.com/go-redis/redis/v8/internal/pool"
- "github.com/go-redis/redis/v8/internal/rand"
-)
-
-//------------------------------------------------------------------------------
-
-// FailoverOptions are used to configure a failover client and should
-// be passed to NewFailoverClient.
-type FailoverOptions struct {
- // The master name.
- MasterName string
- // A seed list of host:port addresses of sentinel nodes.
- SentinelAddrs []string
-
- // If specified with SentinelPassword, enables ACL-based authentication (via
- // AUTH ).
- SentinelUsername string
- // Sentinel password from "requirepass " (if enabled) in Sentinel
- // configuration, or, if SentinelUsername is also supplied, used for ACL-based
- // authentication.
- SentinelPassword string
-
- // Allows routing read-only commands to the closest master or slave node.
- // This option only works with NewFailoverClusterClient.
- RouteByLatency bool
- // Allows routing read-only commands to the random master or slave node.
- // This option only works with NewFailoverClusterClient.
- RouteRandomly bool
-
- // Route all commands to slave read-only nodes.
- SlaveOnly bool
-
- // Use slaves disconnected with master when cannot get connected slaves
- // Now, this option only works in RandomSlaveAddr function.
- UseDisconnectedSlaves bool
-
- // Following options are copied from Options struct.
-
- Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
- OnConnect func(ctx context.Context, cn *Conn) error
-
- Username string
- Password string
- DB int
-
- MaxRetries int
- MinRetryBackoff time.Duration
- MaxRetryBackoff time.Duration
-
- DialTimeout time.Duration
- ReadTimeout time.Duration
- WriteTimeout time.Duration
-
- // PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default LIFO).
- PoolFIFO bool
-
- PoolSize int
- MinIdleConns int
- MaxConnAge time.Duration
- PoolTimeout time.Duration
- IdleTimeout time.Duration
- IdleCheckFrequency time.Duration
-
- TLSConfig *tls.Config
-}
-
-func (opt *FailoverOptions) clientOptions() *Options {
- return &Options{
- Addr: "FailoverClient",
-
- Dialer: opt.Dialer,
- OnConnect: opt.OnConnect,
-
- DB: opt.DB,
- Username: opt.Username,
- Password: opt.Password,
-
- MaxRetries: opt.MaxRetries,
- MinRetryBackoff: opt.MinRetryBackoff,
- MaxRetryBackoff: opt.MaxRetryBackoff,
-
- DialTimeout: opt.DialTimeout,
- ReadTimeout: opt.ReadTimeout,
- WriteTimeout: opt.WriteTimeout,
-
- PoolFIFO: opt.PoolFIFO,
- PoolSize: opt.PoolSize,
- PoolTimeout: opt.PoolTimeout,
- IdleTimeout: opt.IdleTimeout,
- IdleCheckFrequency: opt.IdleCheckFrequency,
- MinIdleConns: opt.MinIdleConns,
- MaxConnAge: opt.MaxConnAge,
-
- TLSConfig: opt.TLSConfig,
- }
-}
-
-func (opt *FailoverOptions) sentinelOptions(addr string) *Options {
- return &Options{
- Addr: addr,
-
- Dialer: opt.Dialer,
- OnConnect: opt.OnConnect,
-
- DB: 0,
- Username: opt.SentinelUsername,
- Password: opt.SentinelPassword,
-
- MaxRetries: opt.MaxRetries,
- MinRetryBackoff: opt.MinRetryBackoff,
- MaxRetryBackoff: opt.MaxRetryBackoff,
-
- DialTimeout: opt.DialTimeout,
- ReadTimeout: opt.ReadTimeout,
- WriteTimeout: opt.WriteTimeout,
-
- PoolFIFO: opt.PoolFIFO,
- PoolSize: opt.PoolSize,
- PoolTimeout: opt.PoolTimeout,
- IdleTimeout: opt.IdleTimeout,
- IdleCheckFrequency: opt.IdleCheckFrequency,
- MinIdleConns: opt.MinIdleConns,
- MaxConnAge: opt.MaxConnAge,
-
- TLSConfig: opt.TLSConfig,
- }
-}
-
-func (opt *FailoverOptions) clusterOptions() *ClusterOptions {
- return &ClusterOptions{
- Dialer: opt.Dialer,
- OnConnect: opt.OnConnect,
-
- Username: opt.Username,
- Password: opt.Password,
-
- MaxRedirects: opt.MaxRetries,
-
- RouteByLatency: opt.RouteByLatency,
- RouteRandomly: opt.RouteRandomly,
-
- MinRetryBackoff: opt.MinRetryBackoff,
- MaxRetryBackoff: opt.MaxRetryBackoff,
-
- DialTimeout: opt.DialTimeout,
- ReadTimeout: opt.ReadTimeout,
- WriteTimeout: opt.WriteTimeout,
-
- PoolFIFO: opt.PoolFIFO,
- PoolSize: opt.PoolSize,
- PoolTimeout: opt.PoolTimeout,
- IdleTimeout: opt.IdleTimeout,
- IdleCheckFrequency: opt.IdleCheckFrequency,
- MinIdleConns: opt.MinIdleConns,
- MaxConnAge: opt.MaxConnAge,
-
- TLSConfig: opt.TLSConfig,
- }
-}
-
-// NewFailoverClient returns a Redis client that uses Redis Sentinel
-// for automatic failover. It's safe for concurrent use by multiple
-// goroutines.
-func NewFailoverClient(failoverOpt *FailoverOptions) *Client {
- if failoverOpt.RouteByLatency {
- panic("to route commands by latency, use NewFailoverClusterClient")
- }
- if failoverOpt.RouteRandomly {
- panic("to route commands randomly, use NewFailoverClusterClient")
- }
-
- sentinelAddrs := make([]string, len(failoverOpt.SentinelAddrs))
- copy(sentinelAddrs, failoverOpt.SentinelAddrs)
-
- rand.Shuffle(len(sentinelAddrs), func(i, j int) {
- sentinelAddrs[i], sentinelAddrs[j] = sentinelAddrs[j], sentinelAddrs[i]
- })
-
- failover := &sentinelFailover{
- opt: failoverOpt,
- sentinelAddrs: sentinelAddrs,
- }
-
- opt := failoverOpt.clientOptions()
- opt.Dialer = masterSlaveDialer(failover)
- opt.init()
-
- connPool := newConnPool(opt)
-
- failover.mu.Lock()
- failover.onFailover = func(ctx context.Context, addr string) {
- _ = connPool.Filter(func(cn *pool.Conn) bool {
- return cn.RemoteAddr().String() != addr
- })
- }
- failover.mu.Unlock()
-
- c := Client{
- baseClient: newBaseClient(opt, connPool),
- ctx: context.Background(),
- }
- c.cmdable = c.Process
- c.onClose = failover.Close
-
- return &c
-}
-
-func masterSlaveDialer(
- failover *sentinelFailover,
-) func(ctx context.Context, network, addr string) (net.Conn, error) {
- return func(ctx context.Context, network, _ string) (net.Conn, error) {
- var addr string
- var err error
-
- if failover.opt.SlaveOnly {
- addr, err = failover.RandomSlaveAddr(ctx)
- } else {
- addr, err = failover.MasterAddr(ctx)
- if err == nil {
- failover.trySwitchMaster(ctx, addr)
- }
- }
- if err != nil {
- return nil, err
- }
- if failover.opt.Dialer != nil {
- return failover.opt.Dialer(ctx, network, addr)
- }
-
- netDialer := &net.Dialer{
- Timeout: failover.opt.DialTimeout,
- KeepAlive: 5 * time.Minute,
- }
- if failover.opt.TLSConfig == nil {
- return netDialer.DialContext(ctx, network, addr)
- }
- return tls.DialWithDialer(netDialer, network, addr, failover.opt.TLSConfig)
- }
-}
-
-//------------------------------------------------------------------------------
-
-// SentinelClient is a client for a Redis Sentinel.
-type SentinelClient struct {
- *baseClient
- hooks
- ctx context.Context
-}
-
-func NewSentinelClient(opt *Options) *SentinelClient {
- opt.init()
- c := &SentinelClient{
- baseClient: &baseClient{
- opt: opt,
- connPool: newConnPool(opt),
- },
- ctx: context.Background(),
- }
- return c
-}
-
-func (c *SentinelClient) Context() context.Context {
- return c.ctx
-}
-
-func (c *SentinelClient) WithContext(ctx context.Context) *SentinelClient {
- if ctx == nil {
- panic("nil context")
- }
- clone := *c
- clone.ctx = ctx
- return &clone
-}
-
-func (c *SentinelClient) Process(ctx context.Context, cmd Cmder) error {
- return c.hooks.process(ctx, cmd, c.baseClient.process)
-}
-
-func (c *SentinelClient) pubSub() *PubSub {
- pubsub := &PubSub{
- opt: c.opt,
-
- newConn: func(ctx context.Context, channels []string) (*pool.Conn, error) {
- return c.newConn(ctx)
- },
- closeConn: c.connPool.CloseConn,
- }
- pubsub.init()
- return pubsub
-}
-
-// Ping is used to test if a connection is still alive, or to
-// measure latency.
-func (c *SentinelClient) Ping(ctx context.Context) *StringCmd {
- cmd := NewStringCmd(ctx, "ping")
- _ = c.Process(ctx, cmd)
- return cmd
-}
-
-// Subscribe subscribes the client to the specified channels.
-// Channels can be omitted to create empty subscription.
-func (c *SentinelClient) Subscribe(ctx context.Context, channels ...string) *PubSub {
- pubsub := c.pubSub()
- if len(channels) > 0 {
- _ = pubsub.Subscribe(ctx, channels...)
- }
- return pubsub
-}
-
-// PSubscribe subscribes the client to the given patterns.
-// Patterns can be omitted to create empty subscription.
-func (c *SentinelClient) PSubscribe(ctx context.Context, channels ...string) *PubSub {
- pubsub := c.pubSub()
- if len(channels) > 0 {
- _ = pubsub.PSubscribe(ctx, channels...)
- }
- return pubsub
-}
-
-func (c *SentinelClient) GetMasterAddrByName(ctx context.Context, name string) *StringSliceCmd {
- cmd := NewStringSliceCmd(ctx, "sentinel", "get-master-addr-by-name", name)
- _ = c.Process(ctx, cmd)
- return cmd
-}
-
-func (c *SentinelClient) Sentinels(ctx context.Context, name string) *SliceCmd {
- cmd := NewSliceCmd(ctx, "sentinel", "sentinels", name)
- _ = c.Process(ctx, cmd)
- return cmd
-}
-
-// Failover forces a failover as if the master was not reachable, and without
-// asking for agreement to other Sentinels.
-func (c *SentinelClient) Failover(ctx context.Context, name string) *StatusCmd {
- cmd := NewStatusCmd(ctx, "sentinel", "failover", name)
- _ = c.Process(ctx, cmd)
- return cmd
-}
-
-// Reset resets all the masters with matching name. The pattern argument is a
-// glob-style pattern. The reset process clears any previous state in a master
-// (including a failover in progress), and removes every slave and sentinel
-// already discovered and associated with the master.
-func (c *SentinelClient) Reset(ctx context.Context, pattern string) *IntCmd {
- cmd := NewIntCmd(ctx, "sentinel", "reset", pattern)
- _ = c.Process(ctx, cmd)
- return cmd
-}
-
-// FlushConfig forces Sentinel to rewrite its configuration on disk, including
-// the current Sentinel state.
-func (c *SentinelClient) FlushConfig(ctx context.Context) *StatusCmd {
- cmd := NewStatusCmd(ctx, "sentinel", "flushconfig")
- _ = c.Process(ctx, cmd)
- return cmd
-}
-
-// Master shows the state and info of the specified master.
-func (c *SentinelClient) Master(ctx context.Context, name string) *StringStringMapCmd {
- cmd := NewStringStringMapCmd(ctx, "sentinel", "master", name)
- _ = c.Process(ctx, cmd)
- return cmd
-}
-
-// Masters shows a list of monitored masters and their state.
-func (c *SentinelClient) Masters(ctx context.Context) *SliceCmd {
- cmd := NewSliceCmd(ctx, "sentinel", "masters")
- _ = c.Process(ctx, cmd)
- return cmd
-}
-
-// Slaves shows a list of slaves for the specified master and their state.
-func (c *SentinelClient) Slaves(ctx context.Context, name string) *SliceCmd {
- cmd := NewSliceCmd(ctx, "sentinel", "slaves", name)
- _ = c.Process(ctx, cmd)
- return cmd
-}
-
-// CkQuorum checks if the current Sentinel configuration is able to reach the
-// quorum needed to failover a master, and the majority needed to authorize the
-// failover. This command should be used in monitoring systems to check if a
-// Sentinel deployment is ok.
-func (c *SentinelClient) CkQuorum(ctx context.Context, name string) *StringCmd {
- cmd := NewStringCmd(ctx, "sentinel", "ckquorum", name)
- _ = c.Process(ctx, cmd)
- return cmd
-}
-
-// Monitor tells the Sentinel to start monitoring a new master with the specified
-// name, ip, port, and quorum.
-func (c *SentinelClient) Monitor(ctx context.Context, name, ip, port, quorum string) *StringCmd {
- cmd := NewStringCmd(ctx, "sentinel", "monitor", name, ip, port, quorum)
- _ = c.Process(ctx, cmd)
- return cmd
-}
-
-// Set is used in order to change configuration parameters of a specific master.
-func (c *SentinelClient) Set(ctx context.Context, name, option, value string) *StringCmd {
- cmd := NewStringCmd(ctx, "sentinel", "set", name, option, value)
- _ = c.Process(ctx, cmd)
- return cmd
-}
-
-// Remove is used in order to remove the specified master: the master will no
-// longer be monitored, and will totally be removed from the internal state of
-// the Sentinel.
-func (c *SentinelClient) Remove(ctx context.Context, name string) *StringCmd {
- cmd := NewStringCmd(ctx, "sentinel", "remove", name)
- _ = c.Process(ctx, cmd)
- return cmd
-}
-
-//------------------------------------------------------------------------------
-
-type sentinelFailover struct {
- opt *FailoverOptions
-
- sentinelAddrs []string
-
- onFailover func(ctx context.Context, addr string)
- onUpdate func(ctx context.Context)
-
- mu sync.RWMutex
- _masterAddr string
- sentinel *SentinelClient
- pubsub *PubSub
-}
-
-func (c *sentinelFailover) Close() error {
- c.mu.Lock()
- defer c.mu.Unlock()
- if c.sentinel != nil {
- return c.closeSentinel()
- }
- return nil
-}
-
-func (c *sentinelFailover) closeSentinel() error {
- firstErr := c.pubsub.Close()
- c.pubsub = nil
-
- err := c.sentinel.Close()
- if err != nil && firstErr == nil {
- firstErr = err
- }
- c.sentinel = nil
-
- return firstErr
-}
-
-func (c *sentinelFailover) RandomSlaveAddr(ctx context.Context) (string, error) {
- if c.opt == nil {
- return "", errors.New("opt is nil")
- }
-
- addresses, err := c.slaveAddrs(ctx, false)
- if err != nil {
- return "", err
- }
-
- if len(addresses) == 0 && c.opt.UseDisconnectedSlaves {
- addresses, err = c.slaveAddrs(ctx, true)
- if err != nil {
- return "", err
- }
- }
-
- if len(addresses) == 0 {
- return c.MasterAddr(ctx)
- }
- return addresses[rand.Intn(len(addresses))], nil
-}
-
-func (c *sentinelFailover) MasterAddr(ctx context.Context) (string, error) {
- c.mu.RLock()
- sentinel := c.sentinel
- c.mu.RUnlock()
-
- if sentinel != nil {
- addr := c.getMasterAddr(ctx, sentinel)
- if addr != "" {
- return addr, nil
- }
- }
-
- c.mu.Lock()
- defer c.mu.Unlock()
-
- if c.sentinel != nil {
- addr := c.getMasterAddr(ctx, c.sentinel)
- if addr != "" {
- return addr, nil
- }
- _ = c.closeSentinel()
- }
-
- for i, sentinelAddr := range c.sentinelAddrs {
- sentinel := NewSentinelClient(c.opt.sentinelOptions(sentinelAddr))
-
- masterAddr, err := sentinel.GetMasterAddrByName(ctx, c.opt.MasterName).Result()
- if err != nil {
- internal.Logger.Printf(ctx, "sentinel: GetMasterAddrByName master=%q failed: %s",
- c.opt.MasterName, err)
- _ = sentinel.Close()
- continue
- }
-
- // Push working sentinel to the top.
- c.sentinelAddrs[0], c.sentinelAddrs[i] = c.sentinelAddrs[i], c.sentinelAddrs[0]
- c.setSentinel(ctx, sentinel)
-
- addr := net.JoinHostPort(masterAddr[0], masterAddr[1])
- return addr, nil
- }
-
- return "", errors.New("redis: all sentinels specified in configuration are unreachable")
-}
-
-func (c *sentinelFailover) slaveAddrs(ctx context.Context, useDisconnected bool) ([]string, error) {
- c.mu.RLock()
- sentinel := c.sentinel
- c.mu.RUnlock()
-
- if sentinel != nil {
- addrs := c.getSlaveAddrs(ctx, sentinel)
- if len(addrs) > 0 {
- return addrs, nil
- }
- }
-
- c.mu.Lock()
- defer c.mu.Unlock()
-
- if c.sentinel != nil {
- addrs := c.getSlaveAddrs(ctx, c.sentinel)
- if len(addrs) > 0 {
- return addrs, nil
- }
- _ = c.closeSentinel()
- }
-
- var sentinelReachable bool
-
- for i, sentinelAddr := range c.sentinelAddrs {
- sentinel := NewSentinelClient(c.opt.sentinelOptions(sentinelAddr))
-
- slaves, err := sentinel.Slaves(ctx, c.opt.MasterName).Result()
- if err != nil {
- internal.Logger.Printf(ctx, "sentinel: Slaves master=%q failed: %s",
- c.opt.MasterName, err)
- _ = sentinel.Close()
- continue
- }
- sentinelReachable = true
- addrs := parseSlaveAddrs(slaves, useDisconnected)
- if len(addrs) == 0 {
- continue
- }
- // Push working sentinel to the top.
- c.sentinelAddrs[0], c.sentinelAddrs[i] = c.sentinelAddrs[i], c.sentinelAddrs[0]
- c.setSentinel(ctx, sentinel)
-
- return addrs, nil
- }
-
- if sentinelReachable {
- return []string{}, nil
- }
- return []string{}, errors.New("redis: all sentinels specified in configuration are unreachable")
-}
-
-func (c *sentinelFailover) getMasterAddr(ctx context.Context, sentinel *SentinelClient) string {
- addr, err := sentinel.GetMasterAddrByName(ctx, c.opt.MasterName).Result()
- if err != nil {
- internal.Logger.Printf(ctx, "sentinel: GetMasterAddrByName name=%q failed: %s",
- c.opt.MasterName, err)
- return ""
- }
- return net.JoinHostPort(addr[0], addr[1])
-}
-
-func (c *sentinelFailover) getSlaveAddrs(ctx context.Context, sentinel *SentinelClient) []string {
- addrs, err := sentinel.Slaves(ctx, c.opt.MasterName).Result()
- if err != nil {
- internal.Logger.Printf(ctx, "sentinel: Slaves name=%q failed: %s",
- c.opt.MasterName, err)
- return []string{}
- }
- return parseSlaveAddrs(addrs, false)
-}
-
-func parseSlaveAddrs(addrs []interface{}, keepDisconnected bool) []string {
- nodes := make([]string, 0, len(addrs))
- for _, node := range addrs {
- ip := ""
- port := ""
- flags := []string{}
- lastkey := ""
- isDown := false
-
- for _, key := range node.([]interface{}) {
- switch lastkey {
- case "ip":
- ip = key.(string)
- case "port":
- port = key.(string)
- case "flags":
- flags = strings.Split(key.(string), ",")
- }
- lastkey = key.(string)
- }
-
- for _, flag := range flags {
- switch flag {
- case "s_down", "o_down":
- isDown = true
- case "disconnected":
- if !keepDisconnected {
- isDown = true
- }
- }
- }
-
- if !isDown {
- nodes = append(nodes, net.JoinHostPort(ip, port))
- }
- }
-
- return nodes
-}
-
-func (c *sentinelFailover) trySwitchMaster(ctx context.Context, addr string) {
- c.mu.RLock()
- currentAddr := c._masterAddr //nolint:ifshort
- c.mu.RUnlock()
-
- if addr == currentAddr {
- return
- }
-
- c.mu.Lock()
- defer c.mu.Unlock()
-
- if addr == c._masterAddr {
- return
- }
- c._masterAddr = addr
-
- internal.Logger.Printf(ctx, "sentinel: new master=%q addr=%q",
- c.opt.MasterName, addr)
- if c.onFailover != nil {
- c.onFailover(ctx, addr)
- }
-}
-
-func (c *sentinelFailover) setSentinel(ctx context.Context, sentinel *SentinelClient) {
- if c.sentinel != nil {
- panic("not reached")
- }
- c.sentinel = sentinel
- c.discoverSentinels(ctx)
-
- c.pubsub = sentinel.Subscribe(ctx, "+switch-master", "+slave-reconf-done")
- go c.listen(c.pubsub)
-}
-
-func (c *sentinelFailover) discoverSentinels(ctx context.Context) {
- sentinels, err := c.sentinel.Sentinels(ctx, c.opt.MasterName).Result()
- if err != nil {
- internal.Logger.Printf(ctx, "sentinel: Sentinels master=%q failed: %s", c.opt.MasterName, err)
- return
- }
- for _, sentinel := range sentinels {
- vals := sentinel.([]interface{})
- var ip, port string
- for i := 0; i < len(vals); i += 2 {
- key := vals[i].(string)
- switch key {
- case "ip":
- ip = vals[i+1].(string)
- case "port":
- port = vals[i+1].(string)
- }
- }
- if ip != "" && port != "" {
- sentinelAddr := net.JoinHostPort(ip, port)
- if !contains(c.sentinelAddrs, sentinelAddr) {
- internal.Logger.Printf(ctx, "sentinel: discovered new sentinel=%q for master=%q",
- sentinelAddr, c.opt.MasterName)
- c.sentinelAddrs = append(c.sentinelAddrs, sentinelAddr)
- }
- }
- }
-}
-
-func (c *sentinelFailover) listen(pubsub *PubSub) {
- ctx := context.TODO()
-
- if c.onUpdate != nil {
- c.onUpdate(ctx)
- }
-
- ch := pubsub.Channel()
- for msg := range ch {
- if msg.Channel == "+switch-master" {
- parts := strings.Split(msg.Payload, " ")
- if parts[0] != c.opt.MasterName {
- internal.Logger.Printf(pubsub.getContext(), "sentinel: ignore addr for master=%q", parts[0])
- continue
- }
- addr := net.JoinHostPort(parts[3], parts[4])
- c.trySwitchMaster(pubsub.getContext(), addr)
- }
-
- if c.onUpdate != nil {
- c.onUpdate(ctx)
- }
- }
-}
-
-func contains(slice []string, str string) bool {
- for _, s := range slice {
- if s == str {
- return true
- }
- }
- return false
-}
-
-//------------------------------------------------------------------------------
-
-// NewFailoverClusterClient returns a client that supports routing read-only commands
-// to a slave node.
-func NewFailoverClusterClient(failoverOpt *FailoverOptions) *ClusterClient {
- sentinelAddrs := make([]string, len(failoverOpt.SentinelAddrs))
- copy(sentinelAddrs, failoverOpt.SentinelAddrs)
-
- failover := &sentinelFailover{
- opt: failoverOpt,
- sentinelAddrs: sentinelAddrs,
- }
-
- opt := failoverOpt.clusterOptions()
- opt.ClusterSlots = func(ctx context.Context) ([]ClusterSlot, error) {
- masterAddr, err := failover.MasterAddr(ctx)
- if err != nil {
- return nil, err
- }
-
- nodes := []ClusterNode{{
- Addr: masterAddr,
- }}
-
- slaveAddrs, err := failover.slaveAddrs(ctx, false)
- if err != nil {
- return nil, err
- }
-
- for _, slaveAddr := range slaveAddrs {
- nodes = append(nodes, ClusterNode{
- Addr: slaveAddr,
- })
- }
-
- slots := []ClusterSlot{
- {
- Start: 0,
- End: 16383,
- Nodes: nodes,
- },
- }
- return slots, nil
- }
-
- c := NewClusterClient(opt)
-
- failover.mu.Lock()
- failover.onUpdate = func(ctx context.Context) {
- c.ReloadState(ctx)
- }
- failover.mu.Unlock()
-
- return c
-}
diff --git a/vendor/github.com/go-redis/redis/v8/tx.go b/vendor/github.com/go-redis/redis/v8/tx.go
deleted file mode 100644
index 8c9d8720..00000000
--- a/vendor/github.com/go-redis/redis/v8/tx.go
+++ /dev/null
@@ -1,149 +0,0 @@
-package redis
-
-import (
- "context"
-
- "github.com/go-redis/redis/v8/internal/pool"
- "github.com/go-redis/redis/v8/internal/proto"
-)
-
-// TxFailedErr transaction redis failed.
-const TxFailedErr = proto.RedisError("redis: transaction failed")
-
-// Tx implements Redis transactions as described in
-// http://redis.io/topics/transactions. It's NOT safe for concurrent use
-// by multiple goroutines, because Exec resets list of watched keys.
-//
-// If you don't need WATCH, use Pipeline instead.
-type Tx struct {
- baseClient
- cmdable
- statefulCmdable
- hooks
- ctx context.Context
-}
-
-func (c *Client) newTx(ctx context.Context) *Tx {
- tx := Tx{
- baseClient: baseClient{
- opt: c.opt,
- connPool: pool.NewStickyConnPool(c.connPool),
- },
- hooks: c.hooks.clone(),
- ctx: ctx,
- }
- tx.init()
- return &tx
-}
-
-func (c *Tx) init() {
- c.cmdable = c.Process
- c.statefulCmdable = c.Process
-}
-
-func (c *Tx) Context() context.Context {
- return c.ctx
-}
-
-func (c *Tx) WithContext(ctx context.Context) *Tx {
- if ctx == nil {
- panic("nil context")
- }
- clone := *c
- clone.init()
- clone.hooks.lock()
- clone.ctx = ctx
- return &clone
-}
-
-func (c *Tx) Process(ctx context.Context, cmd Cmder) error {
- return c.hooks.process(ctx, cmd, c.baseClient.process)
-}
-
-// Watch prepares a transaction and marks the keys to be watched
-// for conditional execution if there are any keys.
-//
-// The transaction is automatically closed when fn exits.
-func (c *Client) Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error {
- tx := c.newTx(ctx)
- defer tx.Close(ctx)
- if len(keys) > 0 {
- if err := tx.Watch(ctx, keys...).Err(); err != nil {
- return err
- }
- }
- return fn(tx)
-}
-
-// Close closes the transaction, releasing any open resources.
-func (c *Tx) Close(ctx context.Context) error {
- _ = c.Unwatch(ctx).Err()
- return c.baseClient.Close()
-}
-
-// Watch marks the keys to be watched for conditional execution
-// of a transaction.
-func (c *Tx) Watch(ctx context.Context, keys ...string) *StatusCmd {
- args := make([]interface{}, 1+len(keys))
- args[0] = "watch"
- for i, key := range keys {
- args[1+i] = key
- }
- cmd := NewStatusCmd(ctx, args...)
- _ = c.Process(ctx, cmd)
- return cmd
-}
-
-// Unwatch flushes all the previously watched keys for a transaction.
-func (c *Tx) Unwatch(ctx context.Context, keys ...string) *StatusCmd {
- args := make([]interface{}, 1+len(keys))
- args[0] = "unwatch"
- for i, key := range keys {
- args[1+i] = key
- }
- cmd := NewStatusCmd(ctx, args...)
- _ = c.Process(ctx, cmd)
- return cmd
-}
-
-// Pipeline creates a pipeline. Usually it is more convenient to use Pipelined.
-func (c *Tx) Pipeline() Pipeliner {
- pipe := Pipeline{
- ctx: c.ctx,
- exec: func(ctx context.Context, cmds []Cmder) error {
- return c.hooks.processPipeline(ctx, cmds, c.baseClient.processPipeline)
- },
- }
- pipe.init()
- return &pipe
-}
-
-// Pipelined executes commands queued in the fn outside of the transaction.
-// Use TxPipelined if you need transactional behavior.
-func (c *Tx) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
- return c.Pipeline().Pipelined(ctx, fn)
-}
-
-// TxPipelined executes commands queued in the fn in the transaction.
-//
-// When using WATCH, EXEC will execute commands only if the watched keys
-// were not modified, allowing for a check-and-set mechanism.
-//
-// Exec always returns list of commands. If transaction fails
-// TxFailedErr is returned. Otherwise Exec returns an error of the first
-// failed command or nil.
-func (c *Tx) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
- return c.TxPipeline().Pipelined(ctx, fn)
-}
-
-// TxPipeline creates a pipeline. Usually it is more convenient to use TxPipelined.
-func (c *Tx) TxPipeline() Pipeliner {
- pipe := Pipeline{
- ctx: c.ctx,
- exec: func(ctx context.Context, cmds []Cmder) error {
- return c.hooks.processTxPipeline(ctx, cmds, c.baseClient.processTxPipeline)
- },
- }
- pipe.init()
- return &pipe
-}
diff --git a/vendor/github.com/go-redis/redis/v8/universal.go b/vendor/github.com/go-redis/redis/v8/universal.go
deleted file mode 100644
index c89b3e5d..00000000
--- a/vendor/github.com/go-redis/redis/v8/universal.go
+++ /dev/null
@@ -1,215 +0,0 @@
-package redis
-
-import (
- "context"
- "crypto/tls"
- "net"
- "time"
-)
-
-// UniversalOptions information is required by UniversalClient to establish
-// connections.
-type UniversalOptions struct {
- // Either a single address or a seed list of host:port addresses
- // of cluster/sentinel nodes.
- Addrs []string
-
- // Database to be selected after connecting to the server.
- // Only single-node and failover clients.
- DB int
-
- // Common options.
-
- Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
- OnConnect func(ctx context.Context, cn *Conn) error
-
- Username string
- Password string
- SentinelUsername string
- SentinelPassword string
-
- MaxRetries int
- MinRetryBackoff time.Duration
- MaxRetryBackoff time.Duration
-
- DialTimeout time.Duration
- ReadTimeout time.Duration
- WriteTimeout time.Duration
-
- // PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default LIFO).
- PoolFIFO bool
-
- PoolSize int
- MinIdleConns int
- MaxConnAge time.Duration
- PoolTimeout time.Duration
- IdleTimeout time.Duration
- IdleCheckFrequency time.Duration
-
- TLSConfig *tls.Config
-
- // Only cluster clients.
-
- MaxRedirects int
- ReadOnly bool
- RouteByLatency bool
- RouteRandomly bool
-
- // The sentinel master name.
- // Only failover clients.
-
- MasterName string
-}
-
-// Cluster returns cluster options created from the universal options.
-func (o *UniversalOptions) Cluster() *ClusterOptions {
- if len(o.Addrs) == 0 {
- o.Addrs = []string{"127.0.0.1:6379"}
- }
-
- return &ClusterOptions{
- Addrs: o.Addrs,
- Dialer: o.Dialer,
- OnConnect: o.OnConnect,
-
- Username: o.Username,
- Password: o.Password,
-
- MaxRedirects: o.MaxRedirects,
- ReadOnly: o.ReadOnly,
- RouteByLatency: o.RouteByLatency,
- RouteRandomly: o.RouteRandomly,
-
- MaxRetries: o.MaxRetries,
- MinRetryBackoff: o.MinRetryBackoff,
- MaxRetryBackoff: o.MaxRetryBackoff,
-
- DialTimeout: o.DialTimeout,
- ReadTimeout: o.ReadTimeout,
- WriteTimeout: o.WriteTimeout,
- PoolFIFO: o.PoolFIFO,
- PoolSize: o.PoolSize,
- MinIdleConns: o.MinIdleConns,
- MaxConnAge: o.MaxConnAge,
- PoolTimeout: o.PoolTimeout,
- IdleTimeout: o.IdleTimeout,
- IdleCheckFrequency: o.IdleCheckFrequency,
-
- TLSConfig: o.TLSConfig,
- }
-}
-
-// Failover returns failover options created from the universal options.
-func (o *UniversalOptions) Failover() *FailoverOptions {
- if len(o.Addrs) == 0 {
- o.Addrs = []string{"127.0.0.1:26379"}
- }
-
- return &FailoverOptions{
- SentinelAddrs: o.Addrs,
- MasterName: o.MasterName,
-
- Dialer: o.Dialer,
- OnConnect: o.OnConnect,
-
- DB: o.DB,
- Username: o.Username,
- Password: o.Password,
- SentinelUsername: o.SentinelUsername,
- SentinelPassword: o.SentinelPassword,
-
- MaxRetries: o.MaxRetries,
- MinRetryBackoff: o.MinRetryBackoff,
- MaxRetryBackoff: o.MaxRetryBackoff,
-
- DialTimeout: o.DialTimeout,
- ReadTimeout: o.ReadTimeout,
- WriteTimeout: o.WriteTimeout,
-
- PoolFIFO: o.PoolFIFO,
- PoolSize: o.PoolSize,
- MinIdleConns: o.MinIdleConns,
- MaxConnAge: o.MaxConnAge,
- PoolTimeout: o.PoolTimeout,
- IdleTimeout: o.IdleTimeout,
- IdleCheckFrequency: o.IdleCheckFrequency,
-
- TLSConfig: o.TLSConfig,
- }
-}
-
-// Simple returns basic options created from the universal options.
-func (o *UniversalOptions) Simple() *Options {
- addr := "127.0.0.1:6379"
- if len(o.Addrs) > 0 {
- addr = o.Addrs[0]
- }
-
- return &Options{
- Addr: addr,
- Dialer: o.Dialer,
- OnConnect: o.OnConnect,
-
- DB: o.DB,
- Username: o.Username,
- Password: o.Password,
-
- MaxRetries: o.MaxRetries,
- MinRetryBackoff: o.MinRetryBackoff,
- MaxRetryBackoff: o.MaxRetryBackoff,
-
- DialTimeout: o.DialTimeout,
- ReadTimeout: o.ReadTimeout,
- WriteTimeout: o.WriteTimeout,
-
- PoolFIFO: o.PoolFIFO,
- PoolSize: o.PoolSize,
- MinIdleConns: o.MinIdleConns,
- MaxConnAge: o.MaxConnAge,
- PoolTimeout: o.PoolTimeout,
- IdleTimeout: o.IdleTimeout,
- IdleCheckFrequency: o.IdleCheckFrequency,
-
- TLSConfig: o.TLSConfig,
- }
-}
-
-// --------------------------------------------------------------------
-
-// UniversalClient is an abstract client which - based on the provided options -
-// represents either a ClusterClient, a FailoverClient, or a single-node Client.
-// This can be useful for testing cluster-specific applications locally or having different
-// clients in different environments.
-type UniversalClient interface {
- Cmdable
- Context() context.Context
- AddHook(Hook)
- Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error
- Do(ctx context.Context, args ...interface{}) *Cmd
- Process(ctx context.Context, cmd Cmder) error
- Subscribe(ctx context.Context, channels ...string) *PubSub
- PSubscribe(ctx context.Context, channels ...string) *PubSub
- Close() error
- PoolStats() *PoolStats
-}
-
-var (
- _ UniversalClient = (*Client)(nil)
- _ UniversalClient = (*ClusterClient)(nil)
- _ UniversalClient = (*Ring)(nil)
-)
-
-// NewUniversalClient returns a new multi client. The type of the returned client depends
-// on the following conditions:
-//
-// 1. If the MasterName option is specified, a sentinel-backed FailoverClient is returned.
-// 2. if the number of Addrs is two or more, a ClusterClient is returned.
-// 3. Otherwise, a single-node Client is returned.
-func NewUniversalClient(opts *UniversalOptions) UniversalClient {
- if opts.MasterName != "" {
- return NewFailoverClient(opts.Failover())
- } else if len(opts.Addrs) > 1 {
- return NewClusterClient(opts.Cluster())
- }
- return NewClient(opts.Simple())
-}
diff --git a/vendor/github.com/go-redis/redis/v8/version.go b/vendor/github.com/go-redis/redis/v8/version.go
deleted file mode 100644
index 112c9a2d..00000000
--- a/vendor/github.com/go-redis/redis/v8/version.go
+++ /dev/null
@@ -1,6 +0,0 @@
-package redis
-
-// Version is the current release version.
-func Version() string {
- return "8.11.5"
-}
diff --git a/vendor/github.com/golang/protobuf/AUTHORS b/vendor/github.com/golang/protobuf/AUTHORS
deleted file mode 100644
index 15167cd7..00000000
--- a/vendor/github.com/golang/protobuf/AUTHORS
+++ /dev/null
@@ -1,3 +0,0 @@
-# This source code refers to The Go Authors for copyright purposes.
-# The master list of authors is in the main Go distribution,
-# visible at http://tip.golang.org/AUTHORS.
diff --git a/vendor/github.com/golang/protobuf/CONTRIBUTORS b/vendor/github.com/golang/protobuf/CONTRIBUTORS
deleted file mode 100644
index 1c4577e9..00000000
--- a/vendor/github.com/golang/protobuf/CONTRIBUTORS
+++ /dev/null
@@ -1,3 +0,0 @@
-# This source code was written by the Go contributors.
-# The master list of contributors is in the main Go distribution,
-# visible at http://tip.golang.org/CONTRIBUTORS.
diff --git a/vendor/github.com/golang/protobuf/LICENSE b/vendor/github.com/golang/protobuf/LICENSE
deleted file mode 100644
index 0f646931..00000000
--- a/vendor/github.com/golang/protobuf/LICENSE
+++ /dev/null
@@ -1,28 +0,0 @@
-Copyright 2010 The Go Authors. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
diff --git a/vendor/github.com/golang/protobuf/proto/buffer.go b/vendor/github.com/golang/protobuf/proto/buffer.go
deleted file mode 100644
index e810e6fe..00000000
--- a/vendor/github.com/golang/protobuf/proto/buffer.go
+++ /dev/null
@@ -1,324 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package proto
-
-import (
- "errors"
- "fmt"
-
- "google.golang.org/protobuf/encoding/prototext"
- "google.golang.org/protobuf/encoding/protowire"
- "google.golang.org/protobuf/runtime/protoimpl"
-)
-
-const (
- WireVarint = 0
- WireFixed32 = 5
- WireFixed64 = 1
- WireBytes = 2
- WireStartGroup = 3
- WireEndGroup = 4
-)
-
-// EncodeVarint returns the varint encoded bytes of v.
-func EncodeVarint(v uint64) []byte {
- return protowire.AppendVarint(nil, v)
-}
-
-// SizeVarint returns the length of the varint encoded bytes of v.
-// This is equal to len(EncodeVarint(v)).
-func SizeVarint(v uint64) int {
- return protowire.SizeVarint(v)
-}
-
-// DecodeVarint parses a varint encoded integer from b,
-// returning the integer value and the length of the varint.
-// It returns (0, 0) if there is a parse error.
-func DecodeVarint(b []byte) (uint64, int) {
- v, n := protowire.ConsumeVarint(b)
- if n < 0 {
- return 0, 0
- }
- return v, n
-}
-
-// Buffer is a buffer for encoding and decoding the protobuf wire format.
-// It may be reused between invocations to reduce memory usage.
-type Buffer struct {
- buf []byte
- idx int
- deterministic bool
-}
-
-// NewBuffer allocates a new Buffer initialized with buf,
-// where the contents of buf are considered the unread portion of the buffer.
-func NewBuffer(buf []byte) *Buffer {
- return &Buffer{buf: buf}
-}
-
-// SetDeterministic specifies whether to use deterministic serialization.
-//
-// Deterministic serialization guarantees that for a given binary, equal
-// messages will always be serialized to the same bytes. This implies:
-//
-// - Repeated serialization of a message will return the same bytes.
-// - Different processes of the same binary (which may be executing on
-// different machines) will serialize equal messages to the same bytes.
-//
-// Note that the deterministic serialization is NOT canonical across
-// languages. It is not guaranteed to remain stable over time. It is unstable
-// across different builds with schema changes due to unknown fields.
-// Users who need canonical serialization (e.g., persistent storage in a
-// canonical form, fingerprinting, etc.) should define their own
-// canonicalization specification and implement their own serializer rather
-// than relying on this API.
-//
-// If deterministic serialization is requested, map entries will be sorted
-// by keys in lexographical order. This is an implementation detail and
-// subject to change.
-func (b *Buffer) SetDeterministic(deterministic bool) {
- b.deterministic = deterministic
-}
-
-// SetBuf sets buf as the internal buffer,
-// where the contents of buf are considered the unread portion of the buffer.
-func (b *Buffer) SetBuf(buf []byte) {
- b.buf = buf
- b.idx = 0
-}
-
-// Reset clears the internal buffer of all written and unread data.
-func (b *Buffer) Reset() {
- b.buf = b.buf[:0]
- b.idx = 0
-}
-
-// Bytes returns the internal buffer.
-func (b *Buffer) Bytes() []byte {
- return b.buf
-}
-
-// Unread returns the unread portion of the buffer.
-func (b *Buffer) Unread() []byte {
- return b.buf[b.idx:]
-}
-
-// Marshal appends the wire-format encoding of m to the buffer.
-func (b *Buffer) Marshal(m Message) error {
- var err error
- b.buf, err = marshalAppend(b.buf, m, b.deterministic)
- return err
-}
-
-// Unmarshal parses the wire-format message in the buffer and
-// places the decoded results in m.
-// It does not reset m before unmarshaling.
-func (b *Buffer) Unmarshal(m Message) error {
- err := UnmarshalMerge(b.Unread(), m)
- b.idx = len(b.buf)
- return err
-}
-
-type unknownFields struct{ XXX_unrecognized protoimpl.UnknownFields }
-
-func (m *unknownFields) String() string { panic("not implemented") }
-func (m *unknownFields) Reset() { panic("not implemented") }
-func (m *unknownFields) ProtoMessage() { panic("not implemented") }
-
-// DebugPrint dumps the encoded bytes of b with a header and footer including s
-// to stdout. This is only intended for debugging.
-func (*Buffer) DebugPrint(s string, b []byte) {
- m := MessageReflect(new(unknownFields))
- m.SetUnknown(b)
- b, _ = prototext.MarshalOptions{AllowPartial: true, Indent: "\t"}.Marshal(m.Interface())
- fmt.Printf("==== %s ====\n%s==== %s ====\n", s, b, s)
-}
-
-// EncodeVarint appends an unsigned varint encoding to the buffer.
-func (b *Buffer) EncodeVarint(v uint64) error {
- b.buf = protowire.AppendVarint(b.buf, v)
- return nil
-}
-
-// EncodeZigzag32 appends a 32-bit zig-zag varint encoding to the buffer.
-func (b *Buffer) EncodeZigzag32(v uint64) error {
- return b.EncodeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31))))
-}
-
-// EncodeZigzag64 appends a 64-bit zig-zag varint encoding to the buffer.
-func (b *Buffer) EncodeZigzag64(v uint64) error {
- return b.EncodeVarint(uint64((uint64(v) << 1) ^ uint64((int64(v) >> 63))))
-}
-
-// EncodeFixed32 appends a 32-bit little-endian integer to the buffer.
-func (b *Buffer) EncodeFixed32(v uint64) error {
- b.buf = protowire.AppendFixed32(b.buf, uint32(v))
- return nil
-}
-
-// EncodeFixed64 appends a 64-bit little-endian integer to the buffer.
-func (b *Buffer) EncodeFixed64(v uint64) error {
- b.buf = protowire.AppendFixed64(b.buf, uint64(v))
- return nil
-}
-
-// EncodeRawBytes appends a length-prefixed raw bytes to the buffer.
-func (b *Buffer) EncodeRawBytes(v []byte) error {
- b.buf = protowire.AppendBytes(b.buf, v)
- return nil
-}
-
-// EncodeStringBytes appends a length-prefixed raw bytes to the buffer.
-// It does not validate whether v contains valid UTF-8.
-func (b *Buffer) EncodeStringBytes(v string) error {
- b.buf = protowire.AppendString(b.buf, v)
- return nil
-}
-
-// EncodeMessage appends a length-prefixed encoded message to the buffer.
-func (b *Buffer) EncodeMessage(m Message) error {
- var err error
- b.buf = protowire.AppendVarint(b.buf, uint64(Size(m)))
- b.buf, err = marshalAppend(b.buf, m, b.deterministic)
- return err
-}
-
-// DecodeVarint consumes an encoded unsigned varint from the buffer.
-func (b *Buffer) DecodeVarint() (uint64, error) {
- v, n := protowire.ConsumeVarint(b.buf[b.idx:])
- if n < 0 {
- return 0, protowire.ParseError(n)
- }
- b.idx += n
- return uint64(v), nil
-}
-
-// DecodeZigzag32 consumes an encoded 32-bit zig-zag varint from the buffer.
-func (b *Buffer) DecodeZigzag32() (uint64, error) {
- v, err := b.DecodeVarint()
- if err != nil {
- return 0, err
- }
- return uint64((uint32(v) >> 1) ^ uint32((int32(v&1)<<31)>>31)), nil
-}
-
-// DecodeZigzag64 consumes an encoded 64-bit zig-zag varint from the buffer.
-func (b *Buffer) DecodeZigzag64() (uint64, error) {
- v, err := b.DecodeVarint()
- if err != nil {
- return 0, err
- }
- return uint64((uint64(v) >> 1) ^ uint64((int64(v&1)<<63)>>63)), nil
-}
-
-// DecodeFixed32 consumes a 32-bit little-endian integer from the buffer.
-func (b *Buffer) DecodeFixed32() (uint64, error) {
- v, n := protowire.ConsumeFixed32(b.buf[b.idx:])
- if n < 0 {
- return 0, protowire.ParseError(n)
- }
- b.idx += n
- return uint64(v), nil
-}
-
-// DecodeFixed64 consumes a 64-bit little-endian integer from the buffer.
-func (b *Buffer) DecodeFixed64() (uint64, error) {
- v, n := protowire.ConsumeFixed64(b.buf[b.idx:])
- if n < 0 {
- return 0, protowire.ParseError(n)
- }
- b.idx += n
- return uint64(v), nil
-}
-
-// DecodeRawBytes consumes a length-prefixed raw bytes from the buffer.
-// If alloc is specified, it returns a copy the raw bytes
-// rather than a sub-slice of the buffer.
-func (b *Buffer) DecodeRawBytes(alloc bool) ([]byte, error) {
- v, n := protowire.ConsumeBytes(b.buf[b.idx:])
- if n < 0 {
- return nil, protowire.ParseError(n)
- }
- b.idx += n
- if alloc {
- v = append([]byte(nil), v...)
- }
- return v, nil
-}
-
-// DecodeStringBytes consumes a length-prefixed raw bytes from the buffer.
-// It does not validate whether the raw bytes contain valid UTF-8.
-func (b *Buffer) DecodeStringBytes() (string, error) {
- v, n := protowire.ConsumeString(b.buf[b.idx:])
- if n < 0 {
- return "", protowire.ParseError(n)
- }
- b.idx += n
- return v, nil
-}
-
-// DecodeMessage consumes a length-prefixed message from the buffer.
-// It does not reset m before unmarshaling.
-func (b *Buffer) DecodeMessage(m Message) error {
- v, err := b.DecodeRawBytes(false)
- if err != nil {
- return err
- }
- return UnmarshalMerge(v, m)
-}
-
-// DecodeGroup consumes a message group from the buffer.
-// It assumes that the start group marker has already been consumed and
-// consumes all bytes until (and including the end group marker).
-// It does not reset m before unmarshaling.
-func (b *Buffer) DecodeGroup(m Message) error {
- v, n, err := consumeGroup(b.buf[b.idx:])
- if err != nil {
- return err
- }
- b.idx += n
- return UnmarshalMerge(v, m)
-}
-
-// consumeGroup parses b until it finds an end group marker, returning
-// the raw bytes of the message (excluding the end group marker) and the
-// the total length of the message (including the end group marker).
-func consumeGroup(b []byte) ([]byte, int, error) {
- b0 := b
- depth := 1 // assume this follows a start group marker
- for {
- _, wtyp, tagLen := protowire.ConsumeTag(b)
- if tagLen < 0 {
- return nil, 0, protowire.ParseError(tagLen)
- }
- b = b[tagLen:]
-
- var valLen int
- switch wtyp {
- case protowire.VarintType:
- _, valLen = protowire.ConsumeVarint(b)
- case protowire.Fixed32Type:
- _, valLen = protowire.ConsumeFixed32(b)
- case protowire.Fixed64Type:
- _, valLen = protowire.ConsumeFixed64(b)
- case protowire.BytesType:
- _, valLen = protowire.ConsumeBytes(b)
- case protowire.StartGroupType:
- depth++
- case protowire.EndGroupType:
- depth--
- default:
- return nil, 0, errors.New("proto: cannot parse reserved wire type")
- }
- if valLen < 0 {
- return nil, 0, protowire.ParseError(valLen)
- }
- b = b[valLen:]
-
- if depth == 0 {
- return b0[:len(b0)-len(b)-tagLen], len(b0) - len(b), nil
- }
- }
-}
diff --git a/vendor/github.com/golang/protobuf/proto/defaults.go b/vendor/github.com/golang/protobuf/proto/defaults.go
deleted file mode 100644
index d399bf06..00000000
--- a/vendor/github.com/golang/protobuf/proto/defaults.go
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package proto
-
-import (
- "google.golang.org/protobuf/reflect/protoreflect"
-)
-
-// SetDefaults sets unpopulated scalar fields to their default values.
-// Fields within a oneof are not set even if they have a default value.
-// SetDefaults is recursively called upon any populated message fields.
-func SetDefaults(m Message) {
- if m != nil {
- setDefaults(MessageReflect(m))
- }
-}
-
-func setDefaults(m protoreflect.Message) {
- fds := m.Descriptor().Fields()
- for i := 0; i < fds.Len(); i++ {
- fd := fds.Get(i)
- if !m.Has(fd) {
- if fd.HasDefault() && fd.ContainingOneof() == nil {
- v := fd.Default()
- if fd.Kind() == protoreflect.BytesKind {
- v = protoreflect.ValueOf(append([]byte(nil), v.Bytes()...)) // copy the default bytes
- }
- m.Set(fd, v)
- }
- continue
- }
- }
-
- m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
- switch {
- // Handle singular message.
- case fd.Cardinality() != protoreflect.Repeated:
- if fd.Message() != nil {
- setDefaults(m.Get(fd).Message())
- }
- // Handle list of messages.
- case fd.IsList():
- if fd.Message() != nil {
- ls := m.Get(fd).List()
- for i := 0; i < ls.Len(); i++ {
- setDefaults(ls.Get(i).Message())
- }
- }
- // Handle map of messages.
- case fd.IsMap():
- if fd.MapValue().Message() != nil {
- ms := m.Get(fd).Map()
- ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool {
- setDefaults(v.Message())
- return true
- })
- }
- }
- return true
- })
-}
diff --git a/vendor/github.com/golang/protobuf/proto/deprecated.go b/vendor/github.com/golang/protobuf/proto/deprecated.go
deleted file mode 100644
index e8db57e0..00000000
--- a/vendor/github.com/golang/protobuf/proto/deprecated.go
+++ /dev/null
@@ -1,113 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package proto
-
-import (
- "encoding/json"
- "errors"
- "fmt"
- "strconv"
-
- protoV2 "google.golang.org/protobuf/proto"
-)
-
-var (
- // Deprecated: No longer returned.
- ErrNil = errors.New("proto: Marshal called with nil")
-
- // Deprecated: No longer returned.
- ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
-
- // Deprecated: No longer returned.
- ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
-)
-
-// Deprecated: Do not use.
-type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 }
-
-// Deprecated: Do not use.
-func GetStats() Stats { return Stats{} }
-
-// Deprecated: Do not use.
-func MarshalMessageSet(interface{}) ([]byte, error) {
- return nil, errors.New("proto: not implemented")
-}
-
-// Deprecated: Do not use.
-func UnmarshalMessageSet([]byte, interface{}) error {
- return errors.New("proto: not implemented")
-}
-
-// Deprecated: Do not use.
-func MarshalMessageSetJSON(interface{}) ([]byte, error) {
- return nil, errors.New("proto: not implemented")
-}
-
-// Deprecated: Do not use.
-func UnmarshalMessageSetJSON([]byte, interface{}) error {
- return errors.New("proto: not implemented")
-}
-
-// Deprecated: Do not use.
-func RegisterMessageSetType(Message, int32, string) {}
-
-// Deprecated: Do not use.
-func EnumName(m map[int32]string, v int32) string {
- s, ok := m[v]
- if ok {
- return s
- }
- return strconv.Itoa(int(v))
-}
-
-// Deprecated: Do not use.
-func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
- if data[0] == '"' {
- // New style: enums are strings.
- var repr string
- if err := json.Unmarshal(data, &repr); err != nil {
- return -1, err
- }
- val, ok := m[repr]
- if !ok {
- return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
- }
- return val, nil
- }
- // Old style: enums are ints.
- var val int32
- if err := json.Unmarshal(data, &val); err != nil {
- return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
- }
- return val, nil
-}
-
-// Deprecated: Do not use; this type existed for intenal-use only.
-type InternalMessageInfo struct{}
-
-// Deprecated: Do not use; this method existed for intenal-use only.
-func (*InternalMessageInfo) DiscardUnknown(m Message) {
- DiscardUnknown(m)
-}
-
-// Deprecated: Do not use; this method existed for intenal-use only.
-func (*InternalMessageInfo) Marshal(b []byte, m Message, deterministic bool) ([]byte, error) {
- return protoV2.MarshalOptions{Deterministic: deterministic}.MarshalAppend(b, MessageV2(m))
-}
-
-// Deprecated: Do not use; this method existed for intenal-use only.
-func (*InternalMessageInfo) Merge(dst, src Message) {
- protoV2.Merge(MessageV2(dst), MessageV2(src))
-}
-
-// Deprecated: Do not use; this method existed for intenal-use only.
-func (*InternalMessageInfo) Size(m Message) int {
- return protoV2.Size(MessageV2(m))
-}
-
-// Deprecated: Do not use; this method existed for intenal-use only.
-func (*InternalMessageInfo) Unmarshal(m Message, b []byte) error {
- return protoV2.UnmarshalOptions{Merge: true}.Unmarshal(b, MessageV2(m))
-}
diff --git a/vendor/github.com/golang/protobuf/proto/discard.go b/vendor/github.com/golang/protobuf/proto/discard.go
deleted file mode 100644
index 2187e877..00000000
--- a/vendor/github.com/golang/protobuf/proto/discard.go
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package proto
-
-import (
- "google.golang.org/protobuf/reflect/protoreflect"
-)
-
-// DiscardUnknown recursively discards all unknown fields from this message
-// and all embedded messages.
-//
-// When unmarshaling a message with unrecognized fields, the tags and values
-// of such fields are preserved in the Message. This allows a later call to
-// marshal to be able to produce a message that continues to have those
-// unrecognized fields. To avoid this, DiscardUnknown is used to
-// explicitly clear the unknown fields after unmarshaling.
-func DiscardUnknown(m Message) {
- if m != nil {
- discardUnknown(MessageReflect(m))
- }
-}
-
-func discardUnknown(m protoreflect.Message) {
- m.Range(func(fd protoreflect.FieldDescriptor, val protoreflect.Value) bool {
- switch {
- // Handle singular message.
- case fd.Cardinality() != protoreflect.Repeated:
- if fd.Message() != nil {
- discardUnknown(m.Get(fd).Message())
- }
- // Handle list of messages.
- case fd.IsList():
- if fd.Message() != nil {
- ls := m.Get(fd).List()
- for i := 0; i < ls.Len(); i++ {
- discardUnknown(ls.Get(i).Message())
- }
- }
- // Handle map of messages.
- case fd.IsMap():
- if fd.MapValue().Message() != nil {
- ms := m.Get(fd).Map()
- ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool {
- discardUnknown(v.Message())
- return true
- })
- }
- }
- return true
- })
-
- // Discard unknown fields.
- if len(m.GetUnknown()) > 0 {
- m.SetUnknown(nil)
- }
-}
diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go
deleted file mode 100644
index 42fc120c..00000000
--- a/vendor/github.com/golang/protobuf/proto/extensions.go
+++ /dev/null
@@ -1,356 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package proto
-
-import (
- "errors"
- "fmt"
- "reflect"
-
- "google.golang.org/protobuf/encoding/protowire"
- "google.golang.org/protobuf/proto"
- "google.golang.org/protobuf/reflect/protoreflect"
- "google.golang.org/protobuf/reflect/protoregistry"
- "google.golang.org/protobuf/runtime/protoiface"
- "google.golang.org/protobuf/runtime/protoimpl"
-)
-
-type (
- // ExtensionDesc represents an extension descriptor and
- // is used to interact with an extension field in a message.
- //
- // Variables of this type are generated in code by protoc-gen-go.
- ExtensionDesc = protoimpl.ExtensionInfo
-
- // ExtensionRange represents a range of message extensions.
- // Used in code generated by protoc-gen-go.
- ExtensionRange = protoiface.ExtensionRangeV1
-
- // Deprecated: Do not use; this is an internal type.
- Extension = protoimpl.ExtensionFieldV1
-
- // Deprecated: Do not use; this is an internal type.
- XXX_InternalExtensions = protoimpl.ExtensionFields
-)
-
-// ErrMissingExtension reports whether the extension was not present.
-var ErrMissingExtension = errors.New("proto: missing extension")
-
-var errNotExtendable = errors.New("proto: not an extendable proto.Message")
-
-// HasExtension reports whether the extension field is present in m
-// either as an explicitly populated field or as an unknown field.
-func HasExtension(m Message, xt *ExtensionDesc) (has bool) {
- mr := MessageReflect(m)
- if mr == nil || !mr.IsValid() {
- return false
- }
-
- // Check whether any populated known field matches the field number.
- xtd := xt.TypeDescriptor()
- if isValidExtension(mr.Descriptor(), xtd) {
- has = mr.Has(xtd)
- } else {
- mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
- has = int32(fd.Number()) == xt.Field
- return !has
- })
- }
-
- // Check whether any unknown field matches the field number.
- for b := mr.GetUnknown(); !has && len(b) > 0; {
- num, _, n := protowire.ConsumeField(b)
- has = int32(num) == xt.Field
- b = b[n:]
- }
- return has
-}
-
-// ClearExtension removes the extension field from m
-// either as an explicitly populated field or as an unknown field.
-func ClearExtension(m Message, xt *ExtensionDesc) {
- mr := MessageReflect(m)
- if mr == nil || !mr.IsValid() {
- return
- }
-
- xtd := xt.TypeDescriptor()
- if isValidExtension(mr.Descriptor(), xtd) {
- mr.Clear(xtd)
- } else {
- mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
- if int32(fd.Number()) == xt.Field {
- mr.Clear(fd)
- return false
- }
- return true
- })
- }
- clearUnknown(mr, fieldNum(xt.Field))
-}
-
-// ClearAllExtensions clears all extensions from m.
-// This includes populated fields and unknown fields in the extension range.
-func ClearAllExtensions(m Message) {
- mr := MessageReflect(m)
- if mr == nil || !mr.IsValid() {
- return
- }
-
- mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
- if fd.IsExtension() {
- mr.Clear(fd)
- }
- return true
- })
- clearUnknown(mr, mr.Descriptor().ExtensionRanges())
-}
-
-// GetExtension retrieves a proto2 extended field from m.
-//
-// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil),
-// then GetExtension parses the encoded field and returns a Go value of the specified type.
-// If the field is not present, then the default value is returned (if one is specified),
-// otherwise ErrMissingExtension is reported.
-//
-// If the descriptor is type incomplete (i.e., ExtensionDesc.ExtensionType is nil),
-// then GetExtension returns the raw encoded bytes for the extension field.
-func GetExtension(m Message, xt *ExtensionDesc) (interface{}, error) {
- mr := MessageReflect(m)
- if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 {
- return nil, errNotExtendable
- }
-
- // Retrieve the unknown fields for this extension field.
- var bo protoreflect.RawFields
- for bi := mr.GetUnknown(); len(bi) > 0; {
- num, _, n := protowire.ConsumeField(bi)
- if int32(num) == xt.Field {
- bo = append(bo, bi[:n]...)
- }
- bi = bi[n:]
- }
-
- // For type incomplete descriptors, only retrieve the unknown fields.
- if xt.ExtensionType == nil {
- return []byte(bo), nil
- }
-
- // If the extension field only exists as unknown fields, unmarshal it.
- // This is rarely done since proto.Unmarshal eagerly unmarshals extensions.
- xtd := xt.TypeDescriptor()
- if !isValidExtension(mr.Descriptor(), xtd) {
- return nil, fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m)
- }
- if !mr.Has(xtd) && len(bo) > 0 {
- m2 := mr.New()
- if err := (proto.UnmarshalOptions{
- Resolver: extensionResolver{xt},
- }.Unmarshal(bo, m2.Interface())); err != nil {
- return nil, err
- }
- if m2.Has(xtd) {
- mr.Set(xtd, m2.Get(xtd))
- clearUnknown(mr, fieldNum(xt.Field))
- }
- }
-
- // Check whether the message has the extension field set or a default.
- var pv protoreflect.Value
- switch {
- case mr.Has(xtd):
- pv = mr.Get(xtd)
- case xtd.HasDefault():
- pv = xtd.Default()
- default:
- return nil, ErrMissingExtension
- }
-
- v := xt.InterfaceOf(pv)
- rv := reflect.ValueOf(v)
- if isScalarKind(rv.Kind()) {
- rv2 := reflect.New(rv.Type())
- rv2.Elem().Set(rv)
- v = rv2.Interface()
- }
- return v, nil
-}
-
-// extensionResolver is a custom extension resolver that stores a single
-// extension type that takes precedence over the global registry.
-type extensionResolver struct{ xt protoreflect.ExtensionType }
-
-func (r extensionResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) {
- if xtd := r.xt.TypeDescriptor(); xtd.FullName() == field {
- return r.xt, nil
- }
- return protoregistry.GlobalTypes.FindExtensionByName(field)
-}
-
-func (r extensionResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) {
- if xtd := r.xt.TypeDescriptor(); xtd.ContainingMessage().FullName() == message && xtd.Number() == field {
- return r.xt, nil
- }
- return protoregistry.GlobalTypes.FindExtensionByNumber(message, field)
-}
-
-// GetExtensions returns a list of the extensions values present in m,
-// corresponding with the provided list of extension descriptors, xts.
-// If an extension is missing in m, the corresponding value is nil.
-func GetExtensions(m Message, xts []*ExtensionDesc) ([]interface{}, error) {
- mr := MessageReflect(m)
- if mr == nil || !mr.IsValid() {
- return nil, errNotExtendable
- }
-
- vs := make([]interface{}, len(xts))
- for i, xt := range xts {
- v, err := GetExtension(m, xt)
- if err != nil {
- if err == ErrMissingExtension {
- continue
- }
- return vs, err
- }
- vs[i] = v
- }
- return vs, nil
-}
-
-// SetExtension sets an extension field in m to the provided value.
-func SetExtension(m Message, xt *ExtensionDesc, v interface{}) error {
- mr := MessageReflect(m)
- if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 {
- return errNotExtendable
- }
-
- rv := reflect.ValueOf(v)
- if reflect.TypeOf(v) != reflect.TypeOf(xt.ExtensionType) {
- return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", v, xt.ExtensionType)
- }
- if rv.Kind() == reflect.Ptr {
- if rv.IsNil() {
- return fmt.Errorf("proto: SetExtension called with nil value of type %T", v)
- }
- if isScalarKind(rv.Elem().Kind()) {
- v = rv.Elem().Interface()
- }
- }
-
- xtd := xt.TypeDescriptor()
- if !isValidExtension(mr.Descriptor(), xtd) {
- return fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m)
- }
- mr.Set(xtd, xt.ValueOf(v))
- clearUnknown(mr, fieldNum(xt.Field))
- return nil
-}
-
-// SetRawExtension inserts b into the unknown fields of m.
-//
-// Deprecated: Use Message.ProtoReflect.SetUnknown instead.
-func SetRawExtension(m Message, fnum int32, b []byte) {
- mr := MessageReflect(m)
- if mr == nil || !mr.IsValid() {
- return
- }
-
- // Verify that the raw field is valid.
- for b0 := b; len(b0) > 0; {
- num, _, n := protowire.ConsumeField(b0)
- if int32(num) != fnum {
- panic(fmt.Sprintf("mismatching field number: got %d, want %d", num, fnum))
- }
- b0 = b0[n:]
- }
-
- ClearExtension(m, &ExtensionDesc{Field: fnum})
- mr.SetUnknown(append(mr.GetUnknown(), b...))
-}
-
-// ExtensionDescs returns a list of extension descriptors found in m,
-// containing descriptors for both populated extension fields in m and
-// also unknown fields of m that are in the extension range.
-// For the later case, an type incomplete descriptor is provided where only
-// the ExtensionDesc.Field field is populated.
-// The order of the extension descriptors is undefined.
-func ExtensionDescs(m Message) ([]*ExtensionDesc, error) {
- mr := MessageReflect(m)
- if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 {
- return nil, errNotExtendable
- }
-
- // Collect a set of known extension descriptors.
- extDescs := make(map[protoreflect.FieldNumber]*ExtensionDesc)
- mr.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
- if fd.IsExtension() {
- xt := fd.(protoreflect.ExtensionTypeDescriptor)
- if xd, ok := xt.Type().(*ExtensionDesc); ok {
- extDescs[fd.Number()] = xd
- }
- }
- return true
- })
-
- // Collect a set of unknown extension descriptors.
- extRanges := mr.Descriptor().ExtensionRanges()
- for b := mr.GetUnknown(); len(b) > 0; {
- num, _, n := protowire.ConsumeField(b)
- if extRanges.Has(num) && extDescs[num] == nil {
- extDescs[num] = nil
- }
- b = b[n:]
- }
-
- // Transpose the set of descriptors into a list.
- var xts []*ExtensionDesc
- for num, xt := range extDescs {
- if xt == nil {
- xt = &ExtensionDesc{Field: int32(num)}
- }
- xts = append(xts, xt)
- }
- return xts, nil
-}
-
-// isValidExtension reports whether xtd is a valid extension descriptor for md.
-func isValidExtension(md protoreflect.MessageDescriptor, xtd protoreflect.ExtensionTypeDescriptor) bool {
- return xtd.ContainingMessage() == md && md.ExtensionRanges().Has(xtd.Number())
-}
-
-// isScalarKind reports whether k is a protobuf scalar kind (except bytes).
-// This function exists for historical reasons since the representation of
-// scalars differs between v1 and v2, where v1 uses *T and v2 uses T.
-func isScalarKind(k reflect.Kind) bool {
- switch k {
- case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
- return true
- default:
- return false
- }
-}
-
-// clearUnknown removes unknown fields from m where remover.Has reports true.
-func clearUnknown(m protoreflect.Message, remover interface {
- Has(protoreflect.FieldNumber) bool
-}) {
- var bo protoreflect.RawFields
- for bi := m.GetUnknown(); len(bi) > 0; {
- num, _, n := protowire.ConsumeField(bi)
- if !remover.Has(num) {
- bo = append(bo, bi[:n]...)
- }
- bi = bi[n:]
- }
- if bi := m.GetUnknown(); len(bi) != len(bo) {
- m.SetUnknown(bo)
- }
-}
-
-type fieldNum protoreflect.FieldNumber
-
-func (n1 fieldNum) Has(n2 protoreflect.FieldNumber) bool {
- return protoreflect.FieldNumber(n1) == n2
-}
diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go
deleted file mode 100644
index dcdc2202..00000000
--- a/vendor/github.com/golang/protobuf/proto/properties.go
+++ /dev/null
@@ -1,306 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package proto
-
-import (
- "fmt"
- "reflect"
- "strconv"
- "strings"
- "sync"
-
- "google.golang.org/protobuf/reflect/protoreflect"
- "google.golang.org/protobuf/runtime/protoimpl"
-)
-
-// StructProperties represents protocol buffer type information for a
-// generated protobuf message in the open-struct API.
-//
-// Deprecated: Do not use.
-type StructProperties struct {
- // Prop are the properties for each field.
- //
- // Fields belonging to a oneof are stored in OneofTypes instead, with a
- // single Properties representing the parent oneof held here.
- //
- // The order of Prop matches the order of fields in the Go struct.
- // Struct fields that are not related to protobufs have a "XXX_" prefix
- // in the Properties.Name and must be ignored by the user.
- Prop []*Properties
-
- // OneofTypes contains information about the oneof fields in this message.
- // It is keyed by the protobuf field name.
- OneofTypes map[string]*OneofProperties
-}
-
-// Properties represents the type information for a protobuf message field.
-//
-// Deprecated: Do not use.
-type Properties struct {
- // Name is a placeholder name with little meaningful semantic value.
- // If the name has an "XXX_" prefix, the entire Properties must be ignored.
- Name string
- // OrigName is the protobuf field name or oneof name.
- OrigName string
- // JSONName is the JSON name for the protobuf field.
- JSONName string
- // Enum is a placeholder name for enums.
- // For historical reasons, this is neither the Go name for the enum,
- // nor the protobuf name for the enum.
- Enum string // Deprecated: Do not use.
- // Weak contains the full name of the weakly referenced message.
- Weak string
- // Wire is a string representation of the wire type.
- Wire string
- // WireType is the protobuf wire type for the field.
- WireType int
- // Tag is the protobuf field number.
- Tag int
- // Required reports whether this is a required field.
- Required bool
- // Optional reports whether this is a optional field.
- Optional bool
- // Repeated reports whether this is a repeated field.
- Repeated bool
- // Packed reports whether this is a packed repeated field of scalars.
- Packed bool
- // Proto3 reports whether this field operates under the proto3 syntax.
- Proto3 bool
- // Oneof reports whether this field belongs within a oneof.
- Oneof bool
-
- // Default is the default value in string form.
- Default string
- // HasDefault reports whether the field has a default value.
- HasDefault bool
-
- // MapKeyProp is the properties for the key field for a map field.
- MapKeyProp *Properties
- // MapValProp is the properties for the value field for a map field.
- MapValProp *Properties
-}
-
-// OneofProperties represents the type information for a protobuf oneof.
-//
-// Deprecated: Do not use.
-type OneofProperties struct {
- // Type is a pointer to the generated wrapper type for the field value.
- // This is nil for messages that are not in the open-struct API.
- Type reflect.Type
- // Field is the index into StructProperties.Prop for the containing oneof.
- Field int
- // Prop is the properties for the field.
- Prop *Properties
-}
-
-// String formats the properties in the protobuf struct field tag style.
-func (p *Properties) String() string {
- s := p.Wire
- s += "," + strconv.Itoa(p.Tag)
- if p.Required {
- s += ",req"
- }
- if p.Optional {
- s += ",opt"
- }
- if p.Repeated {
- s += ",rep"
- }
- if p.Packed {
- s += ",packed"
- }
- s += ",name=" + p.OrigName
- if p.JSONName != "" {
- s += ",json=" + p.JSONName
- }
- if len(p.Enum) > 0 {
- s += ",enum=" + p.Enum
- }
- if len(p.Weak) > 0 {
- s += ",weak=" + p.Weak
- }
- if p.Proto3 {
- s += ",proto3"
- }
- if p.Oneof {
- s += ",oneof"
- }
- if p.HasDefault {
- s += ",def=" + p.Default
- }
- return s
-}
-
-// Parse populates p by parsing a string in the protobuf struct field tag style.
-func (p *Properties) Parse(tag string) {
- // For example: "bytes,49,opt,name=foo,def=hello!"
- for len(tag) > 0 {
- i := strings.IndexByte(tag, ',')
- if i < 0 {
- i = len(tag)
- }
- switch s := tag[:i]; {
- case strings.HasPrefix(s, "name="):
- p.OrigName = s[len("name="):]
- case strings.HasPrefix(s, "json="):
- p.JSONName = s[len("json="):]
- case strings.HasPrefix(s, "enum="):
- p.Enum = s[len("enum="):]
- case strings.HasPrefix(s, "weak="):
- p.Weak = s[len("weak="):]
- case strings.Trim(s, "0123456789") == "":
- n, _ := strconv.ParseUint(s, 10, 32)
- p.Tag = int(n)
- case s == "opt":
- p.Optional = true
- case s == "req":
- p.Required = true
- case s == "rep":
- p.Repeated = true
- case s == "varint" || s == "zigzag32" || s == "zigzag64":
- p.Wire = s
- p.WireType = WireVarint
- case s == "fixed32":
- p.Wire = s
- p.WireType = WireFixed32
- case s == "fixed64":
- p.Wire = s
- p.WireType = WireFixed64
- case s == "bytes":
- p.Wire = s
- p.WireType = WireBytes
- case s == "group":
- p.Wire = s
- p.WireType = WireStartGroup
- case s == "packed":
- p.Packed = true
- case s == "proto3":
- p.Proto3 = true
- case s == "oneof":
- p.Oneof = true
- case strings.HasPrefix(s, "def="):
- // The default tag is special in that everything afterwards is the
- // default regardless of the presence of commas.
- p.HasDefault = true
- p.Default, i = tag[len("def="):], len(tag)
- }
- tag = strings.TrimPrefix(tag[i:], ",")
- }
-}
-
-// Init populates the properties from a protocol buffer struct tag.
-//
-// Deprecated: Do not use.
-func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
- p.Name = name
- p.OrigName = name
- if tag == "" {
- return
- }
- p.Parse(tag)
-
- if typ != nil && typ.Kind() == reflect.Map {
- p.MapKeyProp = new(Properties)
- p.MapKeyProp.Init(nil, "Key", f.Tag.Get("protobuf_key"), nil)
- p.MapValProp = new(Properties)
- p.MapValProp.Init(nil, "Value", f.Tag.Get("protobuf_val"), nil)
- }
-}
-
-var propertiesCache sync.Map // map[reflect.Type]*StructProperties
-
-// GetProperties returns the list of properties for the type represented by t,
-// which must be a generated protocol buffer message in the open-struct API,
-// where protobuf message fields are represented by exported Go struct fields.
-//
-// Deprecated: Use protobuf reflection instead.
-func GetProperties(t reflect.Type) *StructProperties {
- if p, ok := propertiesCache.Load(t); ok {
- return p.(*StructProperties)
- }
- p, _ := propertiesCache.LoadOrStore(t, newProperties(t))
- return p.(*StructProperties)
-}
-
-func newProperties(t reflect.Type) *StructProperties {
- if t.Kind() != reflect.Struct {
- panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t))
- }
-
- var hasOneof bool
- prop := new(StructProperties)
-
- // Construct a list of properties for each field in the struct.
- for i := 0; i < t.NumField(); i++ {
- p := new(Properties)
- f := t.Field(i)
- tagField := f.Tag.Get("protobuf")
- p.Init(f.Type, f.Name, tagField, &f)
-
- tagOneof := f.Tag.Get("protobuf_oneof")
- if tagOneof != "" {
- hasOneof = true
- p.OrigName = tagOneof
- }
-
- // Rename unrelated struct fields with the "XXX_" prefix since so much
- // user code simply checks for this to exclude special fields.
- if tagField == "" && tagOneof == "" && !strings.HasPrefix(p.Name, "XXX_") {
- p.Name = "XXX_" + p.Name
- p.OrigName = "XXX_" + p.OrigName
- } else if p.Weak != "" {
- p.Name = p.OrigName // avoid possible "XXX_" prefix on weak field
- }
-
- prop.Prop = append(prop.Prop, p)
- }
-
- // Construct a mapping of oneof field names to properties.
- if hasOneof {
- var oneofWrappers []interface{}
- if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofFuncs"); ok {
- oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[3].Interface().([]interface{})
- }
- if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofWrappers"); ok {
- oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[0].Interface().([]interface{})
- }
- if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(protoreflect.ProtoMessage); ok {
- if m, ok := m.ProtoReflect().(interface{ ProtoMessageInfo() *protoimpl.MessageInfo }); ok {
- oneofWrappers = m.ProtoMessageInfo().OneofWrappers
- }
- }
-
- prop.OneofTypes = make(map[string]*OneofProperties)
- for _, wrapper := range oneofWrappers {
- p := &OneofProperties{
- Type: reflect.ValueOf(wrapper).Type(), // *T
- Prop: new(Properties),
- }
- f := p.Type.Elem().Field(0)
- p.Prop.Name = f.Name
- p.Prop.Parse(f.Tag.Get("protobuf"))
-
- // Determine the struct field that contains this oneof.
- // Each wrapper is assignable to exactly one parent field.
- var foundOneof bool
- for i := 0; i < t.NumField() && !foundOneof; i++ {
- if p.Type.AssignableTo(t.Field(i).Type) {
- p.Field = i
- foundOneof = true
- }
- }
- if !foundOneof {
- panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t))
- }
- prop.OneofTypes[p.Prop.OrigName] = p
- }
- }
-
- return prop
-}
-
-func (sp *StructProperties) Len() int { return len(sp.Prop) }
-func (sp *StructProperties) Less(i, j int) bool { return false }
-func (sp *StructProperties) Swap(i, j int) { return }
diff --git a/vendor/github.com/golang/protobuf/proto/proto.go b/vendor/github.com/golang/protobuf/proto/proto.go
deleted file mode 100644
index 5aee89c3..00000000
--- a/vendor/github.com/golang/protobuf/proto/proto.go
+++ /dev/null
@@ -1,167 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package proto provides functionality for handling protocol buffer messages.
-// In particular, it provides marshaling and unmarshaling between a protobuf
-// message and the binary wire format.
-//
-// See https://developers.google.com/protocol-buffers/docs/gotutorial for
-// more information.
-//
-// Deprecated: Use the "google.golang.org/protobuf/proto" package instead.
-package proto
-
-import (
- protoV2 "google.golang.org/protobuf/proto"
- "google.golang.org/protobuf/reflect/protoreflect"
- "google.golang.org/protobuf/runtime/protoiface"
- "google.golang.org/protobuf/runtime/protoimpl"
-)
-
-const (
- ProtoPackageIsVersion1 = true
- ProtoPackageIsVersion2 = true
- ProtoPackageIsVersion3 = true
- ProtoPackageIsVersion4 = true
-)
-
-// GeneratedEnum is any enum type generated by protoc-gen-go
-// which is a named int32 kind.
-// This type exists for documentation purposes.
-type GeneratedEnum interface{}
-
-// GeneratedMessage is any message type generated by protoc-gen-go
-// which is a pointer to a named struct kind.
-// This type exists for documentation purposes.
-type GeneratedMessage interface{}
-
-// Message is a protocol buffer message.
-//
-// This is the v1 version of the message interface and is marginally better
-// than an empty interface as it lacks any method to programatically interact
-// with the contents of the message.
-//
-// A v2 message is declared in "google.golang.org/protobuf/proto".Message and
-// exposes protobuf reflection as a first-class feature of the interface.
-//
-// To convert a v1 message to a v2 message, use the MessageV2 function.
-// To convert a v2 message to a v1 message, use the MessageV1 function.
-type Message = protoiface.MessageV1
-
-// MessageV1 converts either a v1 or v2 message to a v1 message.
-// It returns nil if m is nil.
-func MessageV1(m GeneratedMessage) protoiface.MessageV1 {
- return protoimpl.X.ProtoMessageV1Of(m)
-}
-
-// MessageV2 converts either a v1 or v2 message to a v2 message.
-// It returns nil if m is nil.
-func MessageV2(m GeneratedMessage) protoV2.Message {
- return protoimpl.X.ProtoMessageV2Of(m)
-}
-
-// MessageReflect returns a reflective view for a message.
-// It returns nil if m is nil.
-func MessageReflect(m Message) protoreflect.Message {
- return protoimpl.X.MessageOf(m)
-}
-
-// Marshaler is implemented by messages that can marshal themselves.
-// This interface is used by the following functions: Size, Marshal,
-// Buffer.Marshal, and Buffer.EncodeMessage.
-//
-// Deprecated: Do not implement.
-type Marshaler interface {
- // Marshal formats the encoded bytes of the message.
- // It should be deterministic and emit valid protobuf wire data.
- // The caller takes ownership of the returned buffer.
- Marshal() ([]byte, error)
-}
-
-// Unmarshaler is implemented by messages that can unmarshal themselves.
-// This interface is used by the following functions: Unmarshal, UnmarshalMerge,
-// Buffer.Unmarshal, Buffer.DecodeMessage, and Buffer.DecodeGroup.
-//
-// Deprecated: Do not implement.
-type Unmarshaler interface {
- // Unmarshal parses the encoded bytes of the protobuf wire input.
- // The provided buffer is only valid for during method call.
- // It should not reset the receiver message.
- Unmarshal([]byte) error
-}
-
-// Merger is implemented by messages that can merge themselves.
-// This interface is used by the following functions: Clone and Merge.
-//
-// Deprecated: Do not implement.
-type Merger interface {
- // Merge merges the contents of src into the receiver message.
- // It clones all data structures in src such that it aliases no mutable
- // memory referenced by src.
- Merge(src Message)
-}
-
-// RequiredNotSetError is an error type returned when
-// marshaling or unmarshaling a message with missing required fields.
-type RequiredNotSetError struct {
- err error
-}
-
-func (e *RequiredNotSetError) Error() string {
- if e.err != nil {
- return e.err.Error()
- }
- return "proto: required field not set"
-}
-func (e *RequiredNotSetError) RequiredNotSet() bool {
- return true
-}
-
-func checkRequiredNotSet(m protoV2.Message) error {
- if err := protoV2.CheckInitialized(m); err != nil {
- return &RequiredNotSetError{err: err}
- }
- return nil
-}
-
-// Clone returns a deep copy of src.
-func Clone(src Message) Message {
- return MessageV1(protoV2.Clone(MessageV2(src)))
-}
-
-// Merge merges src into dst, which must be messages of the same type.
-//
-// Populated scalar fields in src are copied to dst, while populated
-// singular messages in src are merged into dst by recursively calling Merge.
-// The elements of every list field in src is appended to the corresponded
-// list fields in dst. The entries of every map field in src is copied into
-// the corresponding map field in dst, possibly replacing existing entries.
-// The unknown fields of src are appended to the unknown fields of dst.
-func Merge(dst, src Message) {
- protoV2.Merge(MessageV2(dst), MessageV2(src))
-}
-
-// Equal reports whether two messages are equal.
-// If two messages marshal to the same bytes under deterministic serialization,
-// then Equal is guaranteed to report true.
-//
-// Two messages are equal if they are the same protobuf message type,
-// have the same set of populated known and extension field values,
-// and the same set of unknown fields values.
-//
-// Scalar values are compared with the equivalent of the == operator in Go,
-// except bytes values which are compared using bytes.Equal and
-// floating point values which specially treat NaNs as equal.
-// Message values are compared by recursively calling Equal.
-// Lists are equal if each element value is also equal.
-// Maps are equal if they have the same set of keys, where the pair of values
-// for each key is also equal.
-func Equal(x, y Message) bool {
- return protoV2.Equal(MessageV2(x), MessageV2(y))
-}
-
-func isMessageSet(md protoreflect.MessageDescriptor) bool {
- ms, ok := md.(interface{ IsMessageSet() bool })
- return ok && ms.IsMessageSet()
-}
diff --git a/vendor/github.com/golang/protobuf/proto/registry.go b/vendor/github.com/golang/protobuf/proto/registry.go
deleted file mode 100644
index 066b4323..00000000
--- a/vendor/github.com/golang/protobuf/proto/registry.go
+++ /dev/null
@@ -1,317 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package proto
-
-import (
- "bytes"
- "compress/gzip"
- "fmt"
- "io/ioutil"
- "reflect"
- "strings"
- "sync"
-
- "google.golang.org/protobuf/reflect/protodesc"
- "google.golang.org/protobuf/reflect/protoreflect"
- "google.golang.org/protobuf/reflect/protoregistry"
- "google.golang.org/protobuf/runtime/protoimpl"
-)
-
-// filePath is the path to the proto source file.
-type filePath = string // e.g., "google/protobuf/descriptor.proto"
-
-// fileDescGZIP is the compressed contents of the encoded FileDescriptorProto.
-type fileDescGZIP = []byte
-
-var fileCache sync.Map // map[filePath]fileDescGZIP
-
-// RegisterFile is called from generated code to register the compressed
-// FileDescriptorProto with the file path for a proto source file.
-//
-// Deprecated: Use protoregistry.GlobalFiles.RegisterFile instead.
-func RegisterFile(s filePath, d fileDescGZIP) {
- // Decompress the descriptor.
- zr, err := gzip.NewReader(bytes.NewReader(d))
- if err != nil {
- panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err))
- }
- b, err := ioutil.ReadAll(zr)
- if err != nil {
- panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err))
- }
-
- // Construct a protoreflect.FileDescriptor from the raw descriptor.
- // Note that DescBuilder.Build automatically registers the constructed
- // file descriptor with the v2 registry.
- protoimpl.DescBuilder{RawDescriptor: b}.Build()
-
- // Locally cache the raw descriptor form for the file.
- fileCache.Store(s, d)
-}
-
-// FileDescriptor returns the compressed FileDescriptorProto given the file path
-// for a proto source file. It returns nil if not found.
-//
-// Deprecated: Use protoregistry.GlobalFiles.FindFileByPath instead.
-func FileDescriptor(s filePath) fileDescGZIP {
- if v, ok := fileCache.Load(s); ok {
- return v.(fileDescGZIP)
- }
-
- // Find the descriptor in the v2 registry.
- var b []byte
- if fd, _ := protoregistry.GlobalFiles.FindFileByPath(s); fd != nil {
- b, _ = Marshal(protodesc.ToFileDescriptorProto(fd))
- }
-
- // Locally cache the raw descriptor form for the file.
- if len(b) > 0 {
- v, _ := fileCache.LoadOrStore(s, protoimpl.X.CompressGZIP(b))
- return v.(fileDescGZIP)
- }
- return nil
-}
-
-// enumName is the name of an enum. For historical reasons, the enum name is
-// neither the full Go name nor the full protobuf name of the enum.
-// The name is the dot-separated combination of just the proto package that the
-// enum is declared within followed by the Go type name of the generated enum.
-type enumName = string // e.g., "my.proto.package.GoMessage_GoEnum"
-
-// enumsByName maps enum values by name to their numeric counterpart.
-type enumsByName = map[string]int32
-
-// enumsByNumber maps enum values by number to their name counterpart.
-type enumsByNumber = map[int32]string
-
-var enumCache sync.Map // map[enumName]enumsByName
-var numFilesCache sync.Map // map[protoreflect.FullName]int
-
-// RegisterEnum is called from the generated code to register the mapping of
-// enum value names to enum numbers for the enum identified by s.
-//
-// Deprecated: Use protoregistry.GlobalTypes.RegisterEnum instead.
-func RegisterEnum(s enumName, _ enumsByNumber, m enumsByName) {
- if _, ok := enumCache.Load(s); ok {
- panic("proto: duplicate enum registered: " + s)
- }
- enumCache.Store(s, m)
-
- // This does not forward registration to the v2 registry since this API
- // lacks sufficient information to construct a complete v2 enum descriptor.
-}
-
-// EnumValueMap returns the mapping from enum value names to enum numbers for
-// the enum of the given name. It returns nil if not found.
-//
-// Deprecated: Use protoregistry.GlobalTypes.FindEnumByName instead.
-func EnumValueMap(s enumName) enumsByName {
- if v, ok := enumCache.Load(s); ok {
- return v.(enumsByName)
- }
-
- // Check whether the cache is stale. If the number of files in the current
- // package differs, then it means that some enums may have been recently
- // registered upstream that we do not know about.
- var protoPkg protoreflect.FullName
- if i := strings.LastIndexByte(s, '.'); i >= 0 {
- protoPkg = protoreflect.FullName(s[:i])
- }
- v, _ := numFilesCache.Load(protoPkg)
- numFiles, _ := v.(int)
- if protoregistry.GlobalFiles.NumFilesByPackage(protoPkg) == numFiles {
- return nil // cache is up-to-date; was not found earlier
- }
-
- // Update the enum cache for all enums declared in the given proto package.
- numFiles = 0
- protoregistry.GlobalFiles.RangeFilesByPackage(protoPkg, func(fd protoreflect.FileDescriptor) bool {
- walkEnums(fd, func(ed protoreflect.EnumDescriptor) {
- name := protoimpl.X.LegacyEnumName(ed)
- if _, ok := enumCache.Load(name); !ok {
- m := make(enumsByName)
- evs := ed.Values()
- for i := evs.Len() - 1; i >= 0; i-- {
- ev := evs.Get(i)
- m[string(ev.Name())] = int32(ev.Number())
- }
- enumCache.LoadOrStore(name, m)
- }
- })
- numFiles++
- return true
- })
- numFilesCache.Store(protoPkg, numFiles)
-
- // Check cache again for enum map.
- if v, ok := enumCache.Load(s); ok {
- return v.(enumsByName)
- }
- return nil
-}
-
-// walkEnums recursively walks all enums declared in d.
-func walkEnums(d interface {
- Enums() protoreflect.EnumDescriptors
- Messages() protoreflect.MessageDescriptors
-}, f func(protoreflect.EnumDescriptor)) {
- eds := d.Enums()
- for i := eds.Len() - 1; i >= 0; i-- {
- f(eds.Get(i))
- }
- mds := d.Messages()
- for i := mds.Len() - 1; i >= 0; i-- {
- walkEnums(mds.Get(i), f)
- }
-}
-
-// messageName is the full name of protobuf message.
-type messageName = string
-
-var messageTypeCache sync.Map // map[messageName]reflect.Type
-
-// RegisterType is called from generated code to register the message Go type
-// for a message of the given name.
-//
-// Deprecated: Use protoregistry.GlobalTypes.RegisterMessage instead.
-func RegisterType(m Message, s messageName) {
- mt := protoimpl.X.LegacyMessageTypeOf(m, protoreflect.FullName(s))
- if err := protoregistry.GlobalTypes.RegisterMessage(mt); err != nil {
- panic(err)
- }
- messageTypeCache.Store(s, reflect.TypeOf(m))
-}
-
-// RegisterMapType is called from generated code to register the Go map type
-// for a protobuf message representing a map entry.
-//
-// Deprecated: Do not use.
-func RegisterMapType(m interface{}, s messageName) {
- t := reflect.TypeOf(m)
- if t.Kind() != reflect.Map {
- panic(fmt.Sprintf("invalid map kind: %v", t))
- }
- if _, ok := messageTypeCache.Load(s); ok {
- panic(fmt.Errorf("proto: duplicate proto message registered: %s", s))
- }
- messageTypeCache.Store(s, t)
-}
-
-// MessageType returns the message type for a named message.
-// It returns nil if not found.
-//
-// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead.
-func MessageType(s messageName) reflect.Type {
- if v, ok := messageTypeCache.Load(s); ok {
- return v.(reflect.Type)
- }
-
- // Derive the message type from the v2 registry.
- var t reflect.Type
- if mt, _ := protoregistry.GlobalTypes.FindMessageByName(protoreflect.FullName(s)); mt != nil {
- t = messageGoType(mt)
- }
-
- // If we could not get a concrete type, it is possible that it is a
- // pseudo-message for a map entry.
- if t == nil {
- d, _ := protoregistry.GlobalFiles.FindDescriptorByName(protoreflect.FullName(s))
- if md, _ := d.(protoreflect.MessageDescriptor); md != nil && md.IsMapEntry() {
- kt := goTypeForField(md.Fields().ByNumber(1))
- vt := goTypeForField(md.Fields().ByNumber(2))
- t = reflect.MapOf(kt, vt)
- }
- }
-
- // Locally cache the message type for the given name.
- if t != nil {
- v, _ := messageTypeCache.LoadOrStore(s, t)
- return v.(reflect.Type)
- }
- return nil
-}
-
-func goTypeForField(fd protoreflect.FieldDescriptor) reflect.Type {
- switch k := fd.Kind(); k {
- case protoreflect.EnumKind:
- if et, _ := protoregistry.GlobalTypes.FindEnumByName(fd.Enum().FullName()); et != nil {
- return enumGoType(et)
- }
- return reflect.TypeOf(protoreflect.EnumNumber(0))
- case protoreflect.MessageKind, protoreflect.GroupKind:
- if mt, _ := protoregistry.GlobalTypes.FindMessageByName(fd.Message().FullName()); mt != nil {
- return messageGoType(mt)
- }
- return reflect.TypeOf((*protoreflect.Message)(nil)).Elem()
- default:
- return reflect.TypeOf(fd.Default().Interface())
- }
-}
-
-func enumGoType(et protoreflect.EnumType) reflect.Type {
- return reflect.TypeOf(et.New(0))
-}
-
-func messageGoType(mt protoreflect.MessageType) reflect.Type {
- return reflect.TypeOf(MessageV1(mt.Zero().Interface()))
-}
-
-// MessageName returns the full protobuf name for the given message type.
-//
-// Deprecated: Use protoreflect.MessageDescriptor.FullName instead.
-func MessageName(m Message) messageName {
- if m == nil {
- return ""
- }
- if m, ok := m.(interface{ XXX_MessageName() messageName }); ok {
- return m.XXX_MessageName()
- }
- return messageName(protoimpl.X.MessageDescriptorOf(m).FullName())
-}
-
-// RegisterExtension is called from the generated code to register
-// the extension descriptor.
-//
-// Deprecated: Use protoregistry.GlobalTypes.RegisterExtension instead.
-func RegisterExtension(d *ExtensionDesc) {
- if err := protoregistry.GlobalTypes.RegisterExtension(d); err != nil {
- panic(err)
- }
-}
-
-type extensionsByNumber = map[int32]*ExtensionDesc
-
-var extensionCache sync.Map // map[messageName]extensionsByNumber
-
-// RegisteredExtensions returns a map of the registered extensions for the
-// provided protobuf message, indexed by the extension field number.
-//
-// Deprecated: Use protoregistry.GlobalTypes.RangeExtensionsByMessage instead.
-func RegisteredExtensions(m Message) extensionsByNumber {
- // Check whether the cache is stale. If the number of extensions for
- // the given message differs, then it means that some extensions were
- // recently registered upstream that we do not know about.
- s := MessageName(m)
- v, _ := extensionCache.Load(s)
- xs, _ := v.(extensionsByNumber)
- if protoregistry.GlobalTypes.NumExtensionsByMessage(protoreflect.FullName(s)) == len(xs) {
- return xs // cache is up-to-date
- }
-
- // Cache is stale, re-compute the extensions map.
- xs = make(extensionsByNumber)
- protoregistry.GlobalTypes.RangeExtensionsByMessage(protoreflect.FullName(s), func(xt protoreflect.ExtensionType) bool {
- if xd, ok := xt.(*ExtensionDesc); ok {
- xs[int32(xt.TypeDescriptor().Number())] = xd
- } else {
- // TODO: This implies that the protoreflect.ExtensionType is a
- // custom type not generated by protoc-gen-go. We could try and
- // convert the type to an ExtensionDesc.
- }
- return true
- })
- extensionCache.Store(s, xs)
- return xs
-}
diff --git a/vendor/github.com/golang/protobuf/proto/text_decode.go b/vendor/github.com/golang/protobuf/proto/text_decode.go
deleted file mode 100644
index 47eb3e44..00000000
--- a/vendor/github.com/golang/protobuf/proto/text_decode.go
+++ /dev/null
@@ -1,801 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package proto
-
-import (
- "encoding"
- "errors"
- "fmt"
- "reflect"
- "strconv"
- "strings"
- "unicode/utf8"
-
- "google.golang.org/protobuf/encoding/prototext"
- protoV2 "google.golang.org/protobuf/proto"
- "google.golang.org/protobuf/reflect/protoreflect"
- "google.golang.org/protobuf/reflect/protoregistry"
-)
-
-const wrapTextUnmarshalV2 = false
-
-// ParseError is returned by UnmarshalText.
-type ParseError struct {
- Message string
-
- // Deprecated: Do not use.
- Line, Offset int
-}
-
-func (e *ParseError) Error() string {
- if wrapTextUnmarshalV2 {
- return e.Message
- }
- if e.Line == 1 {
- return fmt.Sprintf("line 1.%d: %v", e.Offset, e.Message)
- }
- return fmt.Sprintf("line %d: %v", e.Line, e.Message)
-}
-
-// UnmarshalText parses a proto text formatted string into m.
-func UnmarshalText(s string, m Message) error {
- if u, ok := m.(encoding.TextUnmarshaler); ok {
- return u.UnmarshalText([]byte(s))
- }
-
- m.Reset()
- mi := MessageV2(m)
-
- if wrapTextUnmarshalV2 {
- err := prototext.UnmarshalOptions{
- AllowPartial: true,
- }.Unmarshal([]byte(s), mi)
- if err != nil {
- return &ParseError{Message: err.Error()}
- }
- return checkRequiredNotSet(mi)
- } else {
- if err := newTextParser(s).unmarshalMessage(mi.ProtoReflect(), ""); err != nil {
- return err
- }
- return checkRequiredNotSet(mi)
- }
-}
-
-type textParser struct {
- s string // remaining input
- done bool // whether the parsing is finished (success or error)
- backed bool // whether back() was called
- offset, line int
- cur token
-}
-
-type token struct {
- value string
- err *ParseError
- line int // line number
- offset int // byte number from start of input, not start of line
- unquoted string // the unquoted version of value, if it was a quoted string
-}
-
-func newTextParser(s string) *textParser {
- p := new(textParser)
- p.s = s
- p.line = 1
- p.cur.line = 1
- return p
-}
-
-func (p *textParser) unmarshalMessage(m protoreflect.Message, terminator string) (err error) {
- md := m.Descriptor()
- fds := md.Fields()
-
- // A struct is a sequence of "name: value", terminated by one of
- // '>' or '}', or the end of the input. A name may also be
- // "[extension]" or "[type/url]".
- //
- // The whole struct can also be an expanded Any message, like:
- // [type/url] < ... struct contents ... >
- seen := make(map[protoreflect.FieldNumber]bool)
- for {
- tok := p.next()
- if tok.err != nil {
- return tok.err
- }
- if tok.value == terminator {
- break
- }
- if tok.value == "[" {
- if err := p.unmarshalExtensionOrAny(m, seen); err != nil {
- return err
- }
- continue
- }
-
- // This is a normal, non-extension field.
- name := protoreflect.Name(tok.value)
- fd := fds.ByName(name)
- switch {
- case fd == nil:
- gd := fds.ByName(protoreflect.Name(strings.ToLower(string(name))))
- if gd != nil && gd.Kind() == protoreflect.GroupKind && gd.Message().Name() == name {
- fd = gd
- }
- case fd.Kind() == protoreflect.GroupKind && fd.Message().Name() != name:
- fd = nil
- case fd.IsWeak() && fd.Message().IsPlaceholder():
- fd = nil
- }
- if fd == nil {
- typeName := string(md.FullName())
- if m, ok := m.Interface().(Message); ok {
- t := reflect.TypeOf(m)
- if t.Kind() == reflect.Ptr {
- typeName = t.Elem().String()
- }
- }
- return p.errorf("unknown field name %q in %v", name, typeName)
- }
- if od := fd.ContainingOneof(); od != nil && m.WhichOneof(od) != nil {
- return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, od.Name())
- }
- if fd.Cardinality() != protoreflect.Repeated && seen[fd.Number()] {
- return p.errorf("non-repeated field %q was repeated", fd.Name())
- }
- seen[fd.Number()] = true
-
- // Consume any colon.
- if err := p.checkForColon(fd); err != nil {
- return err
- }
-
- // Parse into the field.
- v := m.Get(fd)
- if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) {
- v = m.Mutable(fd)
- }
- if v, err = p.unmarshalValue(v, fd); err != nil {
- return err
- }
- m.Set(fd, v)
-
- if err := p.consumeOptionalSeparator(); err != nil {
- return err
- }
- }
- return nil
-}
-
-func (p *textParser) unmarshalExtensionOrAny(m protoreflect.Message, seen map[protoreflect.FieldNumber]bool) error {
- name, err := p.consumeExtensionOrAnyName()
- if err != nil {
- return err
- }
-
- // If it contains a slash, it's an Any type URL.
- if slashIdx := strings.LastIndex(name, "/"); slashIdx >= 0 {
- tok := p.next()
- if tok.err != nil {
- return tok.err
- }
- // consume an optional colon
- if tok.value == ":" {
- tok = p.next()
- if tok.err != nil {
- return tok.err
- }
- }
-
- var terminator string
- switch tok.value {
- case "<":
- terminator = ">"
- case "{":
- terminator = "}"
- default:
- return p.errorf("expected '{' or '<', found %q", tok.value)
- }
-
- mt, err := protoregistry.GlobalTypes.FindMessageByURL(name)
- if err != nil {
- return p.errorf("unrecognized message %q in google.protobuf.Any", name[slashIdx+len("/"):])
- }
- m2 := mt.New()
- if err := p.unmarshalMessage(m2, terminator); err != nil {
- return err
- }
- b, err := protoV2.Marshal(m2.Interface())
- if err != nil {
- return p.errorf("failed to marshal message of type %q: %v", name[slashIdx+len("/"):], err)
- }
-
- urlFD := m.Descriptor().Fields().ByName("type_url")
- valFD := m.Descriptor().Fields().ByName("value")
- if seen[urlFD.Number()] {
- return p.errorf("Any message unpacked multiple times, or %q already set", urlFD.Name())
- }
- if seen[valFD.Number()] {
- return p.errorf("Any message unpacked multiple times, or %q already set", valFD.Name())
- }
- m.Set(urlFD, protoreflect.ValueOfString(name))
- m.Set(valFD, protoreflect.ValueOfBytes(b))
- seen[urlFD.Number()] = true
- seen[valFD.Number()] = true
- return nil
- }
-
- xname := protoreflect.FullName(name)
- xt, _ := protoregistry.GlobalTypes.FindExtensionByName(xname)
- if xt == nil && isMessageSet(m.Descriptor()) {
- xt, _ = protoregistry.GlobalTypes.FindExtensionByName(xname.Append("message_set_extension"))
- }
- if xt == nil {
- return p.errorf("unrecognized extension %q", name)
- }
- fd := xt.TypeDescriptor()
- if fd.ContainingMessage().FullName() != m.Descriptor().FullName() {
- return p.errorf("extension field %q does not extend message %q", name, m.Descriptor().FullName())
- }
-
- if err := p.checkForColon(fd); err != nil {
- return err
- }
-
- v := m.Get(fd)
- if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) {
- v = m.Mutable(fd)
- }
- v, err = p.unmarshalValue(v, fd)
- if err != nil {
- return err
- }
- m.Set(fd, v)
- return p.consumeOptionalSeparator()
-}
-
-func (p *textParser) unmarshalValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
- tok := p.next()
- if tok.err != nil {
- return v, tok.err
- }
- if tok.value == "" {
- return v, p.errorf("unexpected EOF")
- }
-
- switch {
- case fd.IsList():
- lv := v.List()
- var err error
- if tok.value == "[" {
- // Repeated field with list notation, like [1,2,3].
- for {
- vv := lv.NewElement()
- vv, err = p.unmarshalSingularValue(vv, fd)
- if err != nil {
- return v, err
- }
- lv.Append(vv)
-
- tok := p.next()
- if tok.err != nil {
- return v, tok.err
- }
- if tok.value == "]" {
- break
- }
- if tok.value != "," {
- return v, p.errorf("Expected ']' or ',' found %q", tok.value)
- }
- }
- return v, nil
- }
-
- // One value of the repeated field.
- p.back()
- vv := lv.NewElement()
- vv, err = p.unmarshalSingularValue(vv, fd)
- if err != nil {
- return v, err
- }
- lv.Append(vv)
- return v, nil
- case fd.IsMap():
- // The map entry should be this sequence of tokens:
- // < key : KEY value : VALUE >
- // However, implementations may omit key or value, and technically
- // we should support them in any order.
- var terminator string
- switch tok.value {
- case "<":
- terminator = ">"
- case "{":
- terminator = "}"
- default:
- return v, p.errorf("expected '{' or '<', found %q", tok.value)
- }
-
- keyFD := fd.MapKey()
- valFD := fd.MapValue()
-
- mv := v.Map()
- kv := keyFD.Default()
- vv := mv.NewValue()
- for {
- tok := p.next()
- if tok.err != nil {
- return v, tok.err
- }
- if tok.value == terminator {
- break
- }
- var err error
- switch tok.value {
- case "key":
- if err := p.consumeToken(":"); err != nil {
- return v, err
- }
- if kv, err = p.unmarshalSingularValue(kv, keyFD); err != nil {
- return v, err
- }
- if err := p.consumeOptionalSeparator(); err != nil {
- return v, err
- }
- case "value":
- if err := p.checkForColon(valFD); err != nil {
- return v, err
- }
- if vv, err = p.unmarshalSingularValue(vv, valFD); err != nil {
- return v, err
- }
- if err := p.consumeOptionalSeparator(); err != nil {
- return v, err
- }
- default:
- p.back()
- return v, p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value)
- }
- }
- mv.Set(kv.MapKey(), vv)
- return v, nil
- default:
- p.back()
- return p.unmarshalSingularValue(v, fd)
- }
-}
-
-func (p *textParser) unmarshalSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
- tok := p.next()
- if tok.err != nil {
- return v, tok.err
- }
- if tok.value == "" {
- return v, p.errorf("unexpected EOF")
- }
-
- switch fd.Kind() {
- case protoreflect.BoolKind:
- switch tok.value {
- case "true", "1", "t", "True":
- return protoreflect.ValueOfBool(true), nil
- case "false", "0", "f", "False":
- return protoreflect.ValueOfBool(false), nil
- }
- case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
- if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
- return protoreflect.ValueOfInt32(int32(x)), nil
- }
-
- // The C++ parser accepts large positive hex numbers that uses
- // two's complement arithmetic to represent negative numbers.
- // This feature is here for backwards compatibility with C++.
- if strings.HasPrefix(tok.value, "0x") {
- if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
- return protoreflect.ValueOfInt32(int32(-(int64(^x) + 1))), nil
- }
- }
- case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
- if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
- return protoreflect.ValueOfInt64(int64(x)), nil
- }
-
- // The C++ parser accepts large positive hex numbers that uses
- // two's complement arithmetic to represent negative numbers.
- // This feature is here for backwards compatibility with C++.
- if strings.HasPrefix(tok.value, "0x") {
- if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
- return protoreflect.ValueOfInt64(int64(-(int64(^x) + 1))), nil
- }
- }
- case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
- if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
- return protoreflect.ValueOfUint32(uint32(x)), nil
- }
- case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
- if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
- return protoreflect.ValueOfUint64(uint64(x)), nil
- }
- case protoreflect.FloatKind:
- // Ignore 'f' for compatibility with output generated by C++,
- // but don't remove 'f' when the value is "-inf" or "inf".
- v := tok.value
- if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" {
- v = v[:len(v)-len("f")]
- }
- if x, err := strconv.ParseFloat(v, 32); err == nil {
- return protoreflect.ValueOfFloat32(float32(x)), nil
- }
- case protoreflect.DoubleKind:
- // Ignore 'f' for compatibility with output generated by C++,
- // but don't remove 'f' when the value is "-inf" or "inf".
- v := tok.value
- if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" {
- v = v[:len(v)-len("f")]
- }
- if x, err := strconv.ParseFloat(v, 64); err == nil {
- return protoreflect.ValueOfFloat64(float64(x)), nil
- }
- case protoreflect.StringKind:
- if isQuote(tok.value[0]) {
- return protoreflect.ValueOfString(tok.unquoted), nil
- }
- case protoreflect.BytesKind:
- if isQuote(tok.value[0]) {
- return protoreflect.ValueOfBytes([]byte(tok.unquoted)), nil
- }
- case protoreflect.EnumKind:
- if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
- return protoreflect.ValueOfEnum(protoreflect.EnumNumber(x)), nil
- }
- vd := fd.Enum().Values().ByName(protoreflect.Name(tok.value))
- if vd != nil {
- return protoreflect.ValueOfEnum(vd.Number()), nil
- }
- case protoreflect.MessageKind, protoreflect.GroupKind:
- var terminator string
- switch tok.value {
- case "{":
- terminator = "}"
- case "<":
- terminator = ">"
- default:
- return v, p.errorf("expected '{' or '<', found %q", tok.value)
- }
- err := p.unmarshalMessage(v.Message(), terminator)
- return v, err
- default:
- panic(fmt.Sprintf("invalid kind %v", fd.Kind()))
- }
- return v, p.errorf("invalid %v: %v", fd.Kind(), tok.value)
-}
-
-// Consume a ':' from the input stream (if the next token is a colon),
-// returning an error if a colon is needed but not present.
-func (p *textParser) checkForColon(fd protoreflect.FieldDescriptor) *ParseError {
- tok := p.next()
- if tok.err != nil {
- return tok.err
- }
- if tok.value != ":" {
- if fd.Message() == nil {
- return p.errorf("expected ':', found %q", tok.value)
- }
- p.back()
- }
- return nil
-}
-
-// consumeExtensionOrAnyName consumes an extension name or an Any type URL and
-// the following ']'. It returns the name or URL consumed.
-func (p *textParser) consumeExtensionOrAnyName() (string, error) {
- tok := p.next()
- if tok.err != nil {
- return "", tok.err
- }
-
- // If extension name or type url is quoted, it's a single token.
- if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
- name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
- if err != nil {
- return "", err
- }
- return name, p.consumeToken("]")
- }
-
- // Consume everything up to "]"
- var parts []string
- for tok.value != "]" {
- parts = append(parts, tok.value)
- tok = p.next()
- if tok.err != nil {
- return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
- }
- if p.done && tok.value != "]" {
- return "", p.errorf("unclosed type_url or extension name")
- }
- }
- return strings.Join(parts, ""), nil
-}
-
-// consumeOptionalSeparator consumes an optional semicolon or comma.
-// It is used in unmarshalMessage to provide backward compatibility.
-func (p *textParser) consumeOptionalSeparator() error {
- tok := p.next()
- if tok.err != nil {
- return tok.err
- }
- if tok.value != ";" && tok.value != "," {
- p.back()
- }
- return nil
-}
-
-func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
- pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
- p.cur.err = pe
- p.done = true
- return pe
-}
-
-func (p *textParser) skipWhitespace() {
- i := 0
- for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
- if p.s[i] == '#' {
- // comment; skip to end of line or input
- for i < len(p.s) && p.s[i] != '\n' {
- i++
- }
- if i == len(p.s) {
- break
- }
- }
- if p.s[i] == '\n' {
- p.line++
- }
- i++
- }
- p.offset += i
- p.s = p.s[i:len(p.s)]
- if len(p.s) == 0 {
- p.done = true
- }
-}
-
-func (p *textParser) advance() {
- // Skip whitespace
- p.skipWhitespace()
- if p.done {
- return
- }
-
- // Start of non-whitespace
- p.cur.err = nil
- p.cur.offset, p.cur.line = p.offset, p.line
- p.cur.unquoted = ""
- switch p.s[0] {
- case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
- // Single symbol
- p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
- case '"', '\'':
- // Quoted string
- i := 1
- for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
- if p.s[i] == '\\' && i+1 < len(p.s) {
- // skip escaped char
- i++
- }
- i++
- }
- if i >= len(p.s) || p.s[i] != p.s[0] {
- p.errorf("unmatched quote")
- return
- }
- unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
- if err != nil {
- p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
- return
- }
- p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
- p.cur.unquoted = unq
- default:
- i := 0
- for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
- i++
- }
- if i == 0 {
- p.errorf("unexpected byte %#x", p.s[0])
- return
- }
- p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
- }
- p.offset += len(p.cur.value)
-}
-
-// Back off the parser by one token. Can only be done between calls to next().
-// It makes the next advance() a no-op.
-func (p *textParser) back() { p.backed = true }
-
-// Advances the parser and returns the new current token.
-func (p *textParser) next() *token {
- if p.backed || p.done {
- p.backed = false
- return &p.cur
- }
- p.advance()
- if p.done {
- p.cur.value = ""
- } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
- // Look for multiple quoted strings separated by whitespace,
- // and concatenate them.
- cat := p.cur
- for {
- p.skipWhitespace()
- if p.done || !isQuote(p.s[0]) {
- break
- }
- p.advance()
- if p.cur.err != nil {
- return &p.cur
- }
- cat.value += " " + p.cur.value
- cat.unquoted += p.cur.unquoted
- }
- p.done = false // parser may have seen EOF, but we want to return cat
- p.cur = cat
- }
- return &p.cur
-}
-
-func (p *textParser) consumeToken(s string) error {
- tok := p.next()
- if tok.err != nil {
- return tok.err
- }
- if tok.value != s {
- p.back()
- return p.errorf("expected %q, found %q", s, tok.value)
- }
- return nil
-}
-
-var errBadUTF8 = errors.New("proto: bad UTF-8")
-
-func unquoteC(s string, quote rune) (string, error) {
- // This is based on C++'s tokenizer.cc.
- // Despite its name, this is *not* parsing C syntax.
- // For instance, "\0" is an invalid quoted string.
-
- // Avoid allocation in trivial cases.
- simple := true
- for _, r := range s {
- if r == '\\' || r == quote {
- simple = false
- break
- }
- }
- if simple {
- return s, nil
- }
-
- buf := make([]byte, 0, 3*len(s)/2)
- for len(s) > 0 {
- r, n := utf8.DecodeRuneInString(s)
- if r == utf8.RuneError && n == 1 {
- return "", errBadUTF8
- }
- s = s[n:]
- if r != '\\' {
- if r < utf8.RuneSelf {
- buf = append(buf, byte(r))
- } else {
- buf = append(buf, string(r)...)
- }
- continue
- }
-
- ch, tail, err := unescape(s)
- if err != nil {
- return "", err
- }
- buf = append(buf, ch...)
- s = tail
- }
- return string(buf), nil
-}
-
-func unescape(s string) (ch string, tail string, err error) {
- r, n := utf8.DecodeRuneInString(s)
- if r == utf8.RuneError && n == 1 {
- return "", "", errBadUTF8
- }
- s = s[n:]
- switch r {
- case 'a':
- return "\a", s, nil
- case 'b':
- return "\b", s, nil
- case 'f':
- return "\f", s, nil
- case 'n':
- return "\n", s, nil
- case 'r':
- return "\r", s, nil
- case 't':
- return "\t", s, nil
- case 'v':
- return "\v", s, nil
- case '?':
- return "?", s, nil // trigraph workaround
- case '\'', '"', '\\':
- return string(r), s, nil
- case '0', '1', '2', '3', '4', '5', '6', '7':
- if len(s) < 2 {
- return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
- }
- ss := string(r) + s[:2]
- s = s[2:]
- i, err := strconv.ParseUint(ss, 8, 8)
- if err != nil {
- return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss)
- }
- return string([]byte{byte(i)}), s, nil
- case 'x', 'X', 'u', 'U':
- var n int
- switch r {
- case 'x', 'X':
- n = 2
- case 'u':
- n = 4
- case 'U':
- n = 8
- }
- if len(s) < n {
- return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n)
- }
- ss := s[:n]
- s = s[n:]
- i, err := strconv.ParseUint(ss, 16, 64)
- if err != nil {
- return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss)
- }
- if r == 'x' || r == 'X' {
- return string([]byte{byte(i)}), s, nil
- }
- if i > utf8.MaxRune {
- return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss)
- }
- return string(rune(i)), s, nil
- }
- return "", "", fmt.Errorf(`unknown escape \%c`, r)
-}
-
-func isIdentOrNumberChar(c byte) bool {
- switch {
- case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
- return true
- case '0' <= c && c <= '9':
- return true
- }
- switch c {
- case '-', '+', '.', '_':
- return true
- }
- return false
-}
-
-func isWhitespace(c byte) bool {
- switch c {
- case ' ', '\t', '\n', '\r':
- return true
- }
- return false
-}
-
-func isQuote(c byte) bool {
- switch c {
- case '"', '\'':
- return true
- }
- return false
-}
diff --git a/vendor/github.com/golang/protobuf/proto/text_encode.go b/vendor/github.com/golang/protobuf/proto/text_encode.go
deleted file mode 100644
index a31134ee..00000000
--- a/vendor/github.com/golang/protobuf/proto/text_encode.go
+++ /dev/null
@@ -1,560 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package proto
-
-import (
- "bytes"
- "encoding"
- "fmt"
- "io"
- "math"
- "sort"
- "strings"
-
- "google.golang.org/protobuf/encoding/prototext"
- "google.golang.org/protobuf/encoding/protowire"
- "google.golang.org/protobuf/proto"
- "google.golang.org/protobuf/reflect/protoreflect"
- "google.golang.org/protobuf/reflect/protoregistry"
-)
-
-const wrapTextMarshalV2 = false
-
-// TextMarshaler is a configurable text format marshaler.
-type TextMarshaler struct {
- Compact bool // use compact text format (one line)
- ExpandAny bool // expand google.protobuf.Any messages of known types
-}
-
-// Marshal writes the proto text format of m to w.
-func (tm *TextMarshaler) Marshal(w io.Writer, m Message) error {
- b, err := tm.marshal(m)
- if len(b) > 0 {
- if _, err := w.Write(b); err != nil {
- return err
- }
- }
- return err
-}
-
-// Text returns a proto text formatted string of m.
-func (tm *TextMarshaler) Text(m Message) string {
- b, _ := tm.marshal(m)
- return string(b)
-}
-
-func (tm *TextMarshaler) marshal(m Message) ([]byte, error) {
- mr := MessageReflect(m)
- if mr == nil || !mr.IsValid() {
- return []byte(""), nil
- }
-
- if wrapTextMarshalV2 {
- if m, ok := m.(encoding.TextMarshaler); ok {
- return m.MarshalText()
- }
-
- opts := prototext.MarshalOptions{
- AllowPartial: true,
- EmitUnknown: true,
- }
- if !tm.Compact {
- opts.Indent = " "
- }
- if !tm.ExpandAny {
- opts.Resolver = (*protoregistry.Types)(nil)
- }
- return opts.Marshal(mr.Interface())
- } else {
- w := &textWriter{
- compact: tm.Compact,
- expandAny: tm.ExpandAny,
- complete: true,
- }
-
- if m, ok := m.(encoding.TextMarshaler); ok {
- b, err := m.MarshalText()
- if err != nil {
- return nil, err
- }
- w.Write(b)
- return w.buf, nil
- }
-
- err := w.writeMessage(mr)
- return w.buf, err
- }
-}
-
-var (
- defaultTextMarshaler = TextMarshaler{}
- compactTextMarshaler = TextMarshaler{Compact: true}
-)
-
-// MarshalText writes the proto text format of m to w.
-func MarshalText(w io.Writer, m Message) error { return defaultTextMarshaler.Marshal(w, m) }
-
-// MarshalTextString returns a proto text formatted string of m.
-func MarshalTextString(m Message) string { return defaultTextMarshaler.Text(m) }
-
-// CompactText writes the compact proto text format of m to w.
-func CompactText(w io.Writer, m Message) error { return compactTextMarshaler.Marshal(w, m) }
-
-// CompactTextString returns a compact proto text formatted string of m.
-func CompactTextString(m Message) string { return compactTextMarshaler.Text(m) }
-
-var (
- newline = []byte("\n")
- endBraceNewline = []byte("}\n")
- posInf = []byte("inf")
- negInf = []byte("-inf")
- nan = []byte("nan")
-)
-
-// textWriter is an io.Writer that tracks its indentation level.
-type textWriter struct {
- compact bool // same as TextMarshaler.Compact
- expandAny bool // same as TextMarshaler.ExpandAny
- complete bool // whether the current position is a complete line
- indent int // indentation level; never negative
- buf []byte
-}
-
-func (w *textWriter) Write(p []byte) (n int, _ error) {
- newlines := bytes.Count(p, newline)
- if newlines == 0 {
- if !w.compact && w.complete {
- w.writeIndent()
- }
- w.buf = append(w.buf, p...)
- w.complete = false
- return len(p), nil
- }
-
- frags := bytes.SplitN(p, newline, newlines+1)
- if w.compact {
- for i, frag := range frags {
- if i > 0 {
- w.buf = append(w.buf, ' ')
- n++
- }
- w.buf = append(w.buf, frag...)
- n += len(frag)
- }
- return n, nil
- }
-
- for i, frag := range frags {
- if w.complete {
- w.writeIndent()
- }
- w.buf = append(w.buf, frag...)
- n += len(frag)
- if i+1 < len(frags) {
- w.buf = append(w.buf, '\n')
- n++
- }
- }
- w.complete = len(frags[len(frags)-1]) == 0
- return n, nil
-}
-
-func (w *textWriter) WriteByte(c byte) error {
- if w.compact && c == '\n' {
- c = ' '
- }
- if !w.compact && w.complete {
- w.writeIndent()
- }
- w.buf = append(w.buf, c)
- w.complete = c == '\n'
- return nil
-}
-
-func (w *textWriter) writeName(fd protoreflect.FieldDescriptor) {
- if !w.compact && w.complete {
- w.writeIndent()
- }
- w.complete = false
-
- if fd.Kind() != protoreflect.GroupKind {
- w.buf = append(w.buf, fd.Name()...)
- w.WriteByte(':')
- } else {
- // Use message type name for group field name.
- w.buf = append(w.buf, fd.Message().Name()...)
- }
-
- if !w.compact {
- w.WriteByte(' ')
- }
-}
-
-func requiresQuotes(u string) bool {
- // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
- for _, ch := range u {
- switch {
- case ch == '.' || ch == '/' || ch == '_':
- continue
- case '0' <= ch && ch <= '9':
- continue
- case 'A' <= ch && ch <= 'Z':
- continue
- case 'a' <= ch && ch <= 'z':
- continue
- default:
- return true
- }
- }
- return false
-}
-
-// writeProto3Any writes an expanded google.protobuf.Any message.
-//
-// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
-// required messages are not linked in).
-//
-// It returns (true, error) when sv was written in expanded format or an error
-// was encountered.
-func (w *textWriter) writeProto3Any(m protoreflect.Message) (bool, error) {
- md := m.Descriptor()
- fdURL := md.Fields().ByName("type_url")
- fdVal := md.Fields().ByName("value")
-
- url := m.Get(fdURL).String()
- mt, err := protoregistry.GlobalTypes.FindMessageByURL(url)
- if err != nil {
- return false, nil
- }
-
- b := m.Get(fdVal).Bytes()
- m2 := mt.New()
- if err := proto.Unmarshal(b, m2.Interface()); err != nil {
- return false, nil
- }
- w.Write([]byte("["))
- if requiresQuotes(url) {
- w.writeQuotedString(url)
- } else {
- w.Write([]byte(url))
- }
- if w.compact {
- w.Write([]byte("]:<"))
- } else {
- w.Write([]byte("]: <\n"))
- w.indent++
- }
- if err := w.writeMessage(m2); err != nil {
- return true, err
- }
- if w.compact {
- w.Write([]byte("> "))
- } else {
- w.indent--
- w.Write([]byte(">\n"))
- }
- return true, nil
-}
-
-func (w *textWriter) writeMessage(m protoreflect.Message) error {
- md := m.Descriptor()
- if w.expandAny && md.FullName() == "google.protobuf.Any" {
- if canExpand, err := w.writeProto3Any(m); canExpand {
- return err
- }
- }
-
- fds := md.Fields()
- for i := 0; i < fds.Len(); {
- fd := fds.Get(i)
- if od := fd.ContainingOneof(); od != nil {
- fd = m.WhichOneof(od)
- i += od.Fields().Len()
- } else {
- i++
- }
- if fd == nil || !m.Has(fd) {
- continue
- }
-
- switch {
- case fd.IsList():
- lv := m.Get(fd).List()
- for j := 0; j < lv.Len(); j++ {
- w.writeName(fd)
- v := lv.Get(j)
- if err := w.writeSingularValue(v, fd); err != nil {
- return err
- }
- w.WriteByte('\n')
- }
- case fd.IsMap():
- kfd := fd.MapKey()
- vfd := fd.MapValue()
- mv := m.Get(fd).Map()
-
- type entry struct{ key, val protoreflect.Value }
- var entries []entry
- mv.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool {
- entries = append(entries, entry{k.Value(), v})
- return true
- })
- sort.Slice(entries, func(i, j int) bool {
- switch kfd.Kind() {
- case protoreflect.BoolKind:
- return !entries[i].key.Bool() && entries[j].key.Bool()
- case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
- return entries[i].key.Int() < entries[j].key.Int()
- case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
- return entries[i].key.Uint() < entries[j].key.Uint()
- case protoreflect.StringKind:
- return entries[i].key.String() < entries[j].key.String()
- default:
- panic("invalid kind")
- }
- })
- for _, entry := range entries {
- w.writeName(fd)
- w.WriteByte('<')
- if !w.compact {
- w.WriteByte('\n')
- }
- w.indent++
- w.writeName(kfd)
- if err := w.writeSingularValue(entry.key, kfd); err != nil {
- return err
- }
- w.WriteByte('\n')
- w.writeName(vfd)
- if err := w.writeSingularValue(entry.val, vfd); err != nil {
- return err
- }
- w.WriteByte('\n')
- w.indent--
- w.WriteByte('>')
- w.WriteByte('\n')
- }
- default:
- w.writeName(fd)
- if err := w.writeSingularValue(m.Get(fd), fd); err != nil {
- return err
- }
- w.WriteByte('\n')
- }
- }
-
- if b := m.GetUnknown(); len(b) > 0 {
- w.writeUnknownFields(b)
- }
- return w.writeExtensions(m)
-}
-
-func (w *textWriter) writeSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) error {
- switch fd.Kind() {
- case protoreflect.FloatKind, protoreflect.DoubleKind:
- switch vf := v.Float(); {
- case math.IsInf(vf, +1):
- w.Write(posInf)
- case math.IsInf(vf, -1):
- w.Write(negInf)
- case math.IsNaN(vf):
- w.Write(nan)
- default:
- fmt.Fprint(w, v.Interface())
- }
- case protoreflect.StringKind:
- // NOTE: This does not validate UTF-8 for historical reasons.
- w.writeQuotedString(string(v.String()))
- case protoreflect.BytesKind:
- w.writeQuotedString(string(v.Bytes()))
- case protoreflect.MessageKind, protoreflect.GroupKind:
- var bra, ket byte = '<', '>'
- if fd.Kind() == protoreflect.GroupKind {
- bra, ket = '{', '}'
- }
- w.WriteByte(bra)
- if !w.compact {
- w.WriteByte('\n')
- }
- w.indent++
- m := v.Message()
- if m2, ok := m.Interface().(encoding.TextMarshaler); ok {
- b, err := m2.MarshalText()
- if err != nil {
- return err
- }
- w.Write(b)
- } else {
- w.writeMessage(m)
- }
- w.indent--
- w.WriteByte(ket)
- case protoreflect.EnumKind:
- if ev := fd.Enum().Values().ByNumber(v.Enum()); ev != nil {
- fmt.Fprint(w, ev.Name())
- } else {
- fmt.Fprint(w, v.Enum())
- }
- default:
- fmt.Fprint(w, v.Interface())
- }
- return nil
-}
-
-// writeQuotedString writes a quoted string in the protocol buffer text format.
-func (w *textWriter) writeQuotedString(s string) {
- w.WriteByte('"')
- for i := 0; i < len(s); i++ {
- switch c := s[i]; c {
- case '\n':
- w.buf = append(w.buf, `\n`...)
- case '\r':
- w.buf = append(w.buf, `\r`...)
- case '\t':
- w.buf = append(w.buf, `\t`...)
- case '"':
- w.buf = append(w.buf, `\"`...)
- case '\\':
- w.buf = append(w.buf, `\\`...)
- default:
- if isPrint := c >= 0x20 && c < 0x7f; isPrint {
- w.buf = append(w.buf, c)
- } else {
- w.buf = append(w.buf, fmt.Sprintf(`\%03o`, c)...)
- }
- }
- }
- w.WriteByte('"')
-}
-
-func (w *textWriter) writeUnknownFields(b []byte) {
- if !w.compact {
- fmt.Fprintf(w, "/* %d unknown bytes */\n", len(b))
- }
-
- for len(b) > 0 {
- num, wtyp, n := protowire.ConsumeTag(b)
- if n < 0 {
- return
- }
- b = b[n:]
-
- if wtyp == protowire.EndGroupType {
- w.indent--
- w.Write(endBraceNewline)
- continue
- }
- fmt.Fprint(w, num)
- if wtyp != protowire.StartGroupType {
- w.WriteByte(':')
- }
- if !w.compact || wtyp == protowire.StartGroupType {
- w.WriteByte(' ')
- }
- switch wtyp {
- case protowire.VarintType:
- v, n := protowire.ConsumeVarint(b)
- if n < 0 {
- return
- }
- b = b[n:]
- fmt.Fprint(w, v)
- case protowire.Fixed32Type:
- v, n := protowire.ConsumeFixed32(b)
- if n < 0 {
- return
- }
- b = b[n:]
- fmt.Fprint(w, v)
- case protowire.Fixed64Type:
- v, n := protowire.ConsumeFixed64(b)
- if n < 0 {
- return
- }
- b = b[n:]
- fmt.Fprint(w, v)
- case protowire.BytesType:
- v, n := protowire.ConsumeBytes(b)
- if n < 0 {
- return
- }
- b = b[n:]
- fmt.Fprintf(w, "%q", v)
- case protowire.StartGroupType:
- w.WriteByte('{')
- w.indent++
- default:
- fmt.Fprintf(w, "/* unknown wire type %d */", wtyp)
- }
- w.WriteByte('\n')
- }
-}
-
-// writeExtensions writes all the extensions in m.
-func (w *textWriter) writeExtensions(m protoreflect.Message) error {
- md := m.Descriptor()
- if md.ExtensionRanges().Len() == 0 {
- return nil
- }
-
- type ext struct {
- desc protoreflect.FieldDescriptor
- val protoreflect.Value
- }
- var exts []ext
- m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
- if fd.IsExtension() {
- exts = append(exts, ext{fd, v})
- }
- return true
- })
- sort.Slice(exts, func(i, j int) bool {
- return exts[i].desc.Number() < exts[j].desc.Number()
- })
-
- for _, ext := range exts {
- // For message set, use the name of the message as the extension name.
- name := string(ext.desc.FullName())
- if isMessageSet(ext.desc.ContainingMessage()) {
- name = strings.TrimSuffix(name, ".message_set_extension")
- }
-
- if !ext.desc.IsList() {
- if err := w.writeSingularExtension(name, ext.val, ext.desc); err != nil {
- return err
- }
- } else {
- lv := ext.val.List()
- for i := 0; i < lv.Len(); i++ {
- if err := w.writeSingularExtension(name, lv.Get(i), ext.desc); err != nil {
- return err
- }
- }
- }
- }
- return nil
-}
-
-func (w *textWriter) writeSingularExtension(name string, v protoreflect.Value, fd protoreflect.FieldDescriptor) error {
- fmt.Fprintf(w, "[%s]:", name)
- if !w.compact {
- w.WriteByte(' ')
- }
- if err := w.writeSingularValue(v, fd); err != nil {
- return err
- }
- w.WriteByte('\n')
- return nil
-}
-
-func (w *textWriter) writeIndent() {
- if !w.complete {
- return
- }
- for i := 0; i < w.indent*2; i++ {
- w.buf = append(w.buf, ' ')
- }
- w.complete = false
-}
diff --git a/vendor/github.com/golang/protobuf/proto/wire.go b/vendor/github.com/golang/protobuf/proto/wire.go
deleted file mode 100644
index d7c28da5..00000000
--- a/vendor/github.com/golang/protobuf/proto/wire.go
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package proto
-
-import (
- protoV2 "google.golang.org/protobuf/proto"
- "google.golang.org/protobuf/runtime/protoiface"
-)
-
-// Size returns the size in bytes of the wire-format encoding of m.
-func Size(m Message) int {
- if m == nil {
- return 0
- }
- mi := MessageV2(m)
- return protoV2.Size(mi)
-}
-
-// Marshal returns the wire-format encoding of m.
-func Marshal(m Message) ([]byte, error) {
- b, err := marshalAppend(nil, m, false)
- if b == nil {
- b = zeroBytes
- }
- return b, err
-}
-
-var zeroBytes = make([]byte, 0, 0)
-
-func marshalAppend(buf []byte, m Message, deterministic bool) ([]byte, error) {
- if m == nil {
- return nil, ErrNil
- }
- mi := MessageV2(m)
- nbuf, err := protoV2.MarshalOptions{
- Deterministic: deterministic,
- AllowPartial: true,
- }.MarshalAppend(buf, mi)
- if err != nil {
- return buf, err
- }
- if len(buf) == len(nbuf) {
- if !mi.ProtoReflect().IsValid() {
- return buf, ErrNil
- }
- }
- return nbuf, checkRequiredNotSet(mi)
-}
-
-// Unmarshal parses a wire-format message in b and places the decoded results in m.
-//
-// Unmarshal resets m before starting to unmarshal, so any existing data in m is always
-// removed. Use UnmarshalMerge to preserve and append to existing data.
-func Unmarshal(b []byte, m Message) error {
- m.Reset()
- return UnmarshalMerge(b, m)
-}
-
-// UnmarshalMerge parses a wire-format message in b and places the decoded results in m.
-func UnmarshalMerge(b []byte, m Message) error {
- mi := MessageV2(m)
- out, err := protoV2.UnmarshalOptions{
- AllowPartial: true,
- Merge: true,
- }.UnmarshalState(protoiface.UnmarshalInput{
- Buf: b,
- Message: mi.ProtoReflect(),
- })
- if err != nil {
- return err
- }
- if out.Flags&protoiface.UnmarshalInitialized > 0 {
- return nil
- }
- return checkRequiredNotSet(mi)
-}
diff --git a/vendor/github.com/golang/protobuf/proto/wrappers.go b/vendor/github.com/golang/protobuf/proto/wrappers.go
deleted file mode 100644
index 398e3485..00000000
--- a/vendor/github.com/golang/protobuf/proto/wrappers.go
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package proto
-
-// Bool stores v in a new bool value and returns a pointer to it.
-func Bool(v bool) *bool { return &v }
-
-// Int stores v in a new int32 value and returns a pointer to it.
-//
-// Deprecated: Use Int32 instead.
-func Int(v int) *int32 { return Int32(int32(v)) }
-
-// Int32 stores v in a new int32 value and returns a pointer to it.
-func Int32(v int32) *int32 { return &v }
-
-// Int64 stores v in a new int64 value and returns a pointer to it.
-func Int64(v int64) *int64 { return &v }
-
-// Uint32 stores v in a new uint32 value and returns a pointer to it.
-func Uint32(v uint32) *uint32 { return &v }
-
-// Uint64 stores v in a new uint64 value and returns a pointer to it.
-func Uint64(v uint64) *uint64 { return &v }
-
-// Float32 stores v in a new float32 value and returns a pointer to it.
-func Float32(v float32) *float32 { return &v }
-
-// Float64 stores v in a new float64 value and returns a pointer to it.
-func Float64(v float64) *float64 { return &v }
-
-// String stores v in a new string value and returns a pointer to it.
-func String(v string) *string { return &v }
diff --git a/vendor/github.com/gorilla/websocket/.gitignore b/vendor/github.com/gorilla/websocket/.gitignore
deleted file mode 100644
index cd3fcd1e..00000000
--- a/vendor/github.com/gorilla/websocket/.gitignore
+++ /dev/null
@@ -1,25 +0,0 @@
-# Compiled Object files, Static and Dynamic libs (Shared Objects)
-*.o
-*.a
-*.so
-
-# Folders
-_obj
-_test
-
-# Architecture specific extensions/prefixes
-*.[568vq]
-[568vq].out
-
-*.cgo1.go
-*.cgo2.c
-_cgo_defun.c
-_cgo_gotypes.go
-_cgo_export.*
-
-_testmain.go
-
-*.exe
-
-.idea/
-*.iml
diff --git a/vendor/github.com/gorilla/websocket/AUTHORS b/vendor/github.com/gorilla/websocket/AUTHORS
deleted file mode 100644
index 1931f400..00000000
--- a/vendor/github.com/gorilla/websocket/AUTHORS
+++ /dev/null
@@ -1,9 +0,0 @@
-# This is the official list of Gorilla WebSocket authors for copyright
-# purposes.
-#
-# Please keep the list sorted.
-
-Gary Burd
-Google LLC (https://opensource.google.com/)
-Joachim Bauch
-
diff --git a/vendor/github.com/gorilla/websocket/LICENSE b/vendor/github.com/gorilla/websocket/LICENSE
deleted file mode 100644
index 9171c972..00000000
--- a/vendor/github.com/gorilla/websocket/LICENSE
+++ /dev/null
@@ -1,22 +0,0 @@
-Copyright (c) 2013 The Gorilla WebSocket Authors. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- Redistributions of source code must retain the above copyright notice, this
- list of conditions and the following disclaimer.
-
- Redistributions in binary form must reproduce the above copyright notice,
- this list of conditions and the following disclaimer in the documentation
- and/or other materials provided with the distribution.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/gorilla/websocket/README.md b/vendor/github.com/gorilla/websocket/README.md
deleted file mode 100644
index 2517a287..00000000
--- a/vendor/github.com/gorilla/websocket/README.md
+++ /dev/null
@@ -1,39 +0,0 @@
-# Gorilla WebSocket
-
-[![GoDoc](https://godoc.org/github.com/gorilla/websocket?status.svg)](https://godoc.org/github.com/gorilla/websocket)
-[![CircleCI](https://circleci.com/gh/gorilla/websocket.svg?style=svg)](https://circleci.com/gh/gorilla/websocket)
-
-Gorilla WebSocket is a [Go](http://golang.org/) implementation of the
-[WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol.
-
-
----
-
-⚠️ **[The Gorilla WebSocket Package is looking for a new maintainer](https://github.com/gorilla/websocket/issues/370)**
-
----
-
-### Documentation
-
-* [API Reference](https://pkg.go.dev/github.com/gorilla/websocket?tab=doc)
-* [Chat example](https://github.com/gorilla/websocket/tree/master/examples/chat)
-* [Command example](https://github.com/gorilla/websocket/tree/master/examples/command)
-* [Client and server example](https://github.com/gorilla/websocket/tree/master/examples/echo)
-* [File watch example](https://github.com/gorilla/websocket/tree/master/examples/filewatch)
-
-### Status
-
-The Gorilla WebSocket package provides a complete and tested implementation of
-the [WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. The
-package API is stable.
-
-### Installation
-
- go get github.com/gorilla/websocket
-
-### Protocol Compliance
-
-The Gorilla WebSocket package passes the server tests in the [Autobahn Test
-Suite](https://github.com/crossbario/autobahn-testsuite) using the application in the [examples/autobahn
-subdirectory](https://github.com/gorilla/websocket/tree/master/examples/autobahn).
-
diff --git a/vendor/github.com/gorilla/websocket/client.go b/vendor/github.com/gorilla/websocket/client.go
deleted file mode 100644
index 2efd8355..00000000
--- a/vendor/github.com/gorilla/websocket/client.go
+++ /dev/null
@@ -1,422 +0,0 @@
-// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package websocket
-
-import (
- "bytes"
- "context"
- "crypto/tls"
- "errors"
- "io"
- "io/ioutil"
- "net"
- "net/http"
- "net/http/httptrace"
- "net/url"
- "strings"
- "time"
-)
-
-// ErrBadHandshake is returned when the server response to opening handshake is
-// invalid.
-var ErrBadHandshake = errors.New("websocket: bad handshake")
-
-var errInvalidCompression = errors.New("websocket: invalid compression negotiation")
-
-// NewClient creates a new client connection using the given net connection.
-// The URL u specifies the host and request URI. Use requestHeader to specify
-// the origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies
-// (Cookie). Use the response.Header to get the selected subprotocol
-// (Sec-WebSocket-Protocol) and cookies (Set-Cookie).
-//
-// If the WebSocket handshake fails, ErrBadHandshake is returned along with a
-// non-nil *http.Response so that callers can handle redirects, authentication,
-// etc.
-//
-// Deprecated: Use Dialer instead.
-func NewClient(netConn net.Conn, u *url.URL, requestHeader http.Header, readBufSize, writeBufSize int) (c *Conn, response *http.Response, err error) {
- d := Dialer{
- ReadBufferSize: readBufSize,
- WriteBufferSize: writeBufSize,
- NetDial: func(net, addr string) (net.Conn, error) {
- return netConn, nil
- },
- }
- return d.Dial(u.String(), requestHeader)
-}
-
-// A Dialer contains options for connecting to WebSocket server.
-//
-// It is safe to call Dialer's methods concurrently.
-type Dialer struct {
- // NetDial specifies the dial function for creating TCP connections. If
- // NetDial is nil, net.Dial is used.
- NetDial func(network, addr string) (net.Conn, error)
-
- // NetDialContext specifies the dial function for creating TCP connections. If
- // NetDialContext is nil, NetDial is used.
- NetDialContext func(ctx context.Context, network, addr string) (net.Conn, error)
-
- // NetDialTLSContext specifies the dial function for creating TLS/TCP connections. If
- // NetDialTLSContext is nil, NetDialContext is used.
- // If NetDialTLSContext is set, Dial assumes the TLS handshake is done there and
- // TLSClientConfig is ignored.
- NetDialTLSContext func(ctx context.Context, network, addr string) (net.Conn, error)
-
- // Proxy specifies a function to return a proxy for a given
- // Request. If the function returns a non-nil error, the
- // request is aborted with the provided error.
- // If Proxy is nil or returns a nil *URL, no proxy is used.
- Proxy func(*http.Request) (*url.URL, error)
-
- // TLSClientConfig specifies the TLS configuration to use with tls.Client.
- // If nil, the default configuration is used.
- // If either NetDialTLS or NetDialTLSContext are set, Dial assumes the TLS handshake
- // is done there and TLSClientConfig is ignored.
- TLSClientConfig *tls.Config
-
- // HandshakeTimeout specifies the duration for the handshake to complete.
- HandshakeTimeout time.Duration
-
- // ReadBufferSize and WriteBufferSize specify I/O buffer sizes in bytes. If a buffer
- // size is zero, then a useful default size is used. The I/O buffer sizes
- // do not limit the size of the messages that can be sent or received.
- ReadBufferSize, WriteBufferSize int
-
- // WriteBufferPool is a pool of buffers for write operations. If the value
- // is not set, then write buffers are allocated to the connection for the
- // lifetime of the connection.
- //
- // A pool is most useful when the application has a modest volume of writes
- // across a large number of connections.
- //
- // Applications should use a single pool for each unique value of
- // WriteBufferSize.
- WriteBufferPool BufferPool
-
- // Subprotocols specifies the client's requested subprotocols.
- Subprotocols []string
-
- // EnableCompression specifies if the client should attempt to negotiate
- // per message compression (RFC 7692). Setting this value to true does not
- // guarantee that compression will be supported. Currently only "no context
- // takeover" modes are supported.
- EnableCompression bool
-
- // Jar specifies the cookie jar.
- // If Jar is nil, cookies are not sent in requests and ignored
- // in responses.
- Jar http.CookieJar
-}
-
-// Dial creates a new client connection by calling DialContext with a background context.
-func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) {
- return d.DialContext(context.Background(), urlStr, requestHeader)
-}
-
-var errMalformedURL = errors.New("malformed ws or wss URL")
-
-func hostPortNoPort(u *url.URL) (hostPort, hostNoPort string) {
- hostPort = u.Host
- hostNoPort = u.Host
- if i := strings.LastIndex(u.Host, ":"); i > strings.LastIndex(u.Host, "]") {
- hostNoPort = hostNoPort[:i]
- } else {
- switch u.Scheme {
- case "wss":
- hostPort += ":443"
- case "https":
- hostPort += ":443"
- default:
- hostPort += ":80"
- }
- }
- return hostPort, hostNoPort
-}
-
-// DefaultDialer is a dialer with all fields set to the default values.
-var DefaultDialer = &Dialer{
- Proxy: http.ProxyFromEnvironment,
- HandshakeTimeout: 45 * time.Second,
-}
-
-// nilDialer is dialer to use when receiver is nil.
-var nilDialer = *DefaultDialer
-
-// DialContext creates a new client connection. Use requestHeader to specify the
-// origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies (Cookie).
-// Use the response.Header to get the selected subprotocol
-// (Sec-WebSocket-Protocol) and cookies (Set-Cookie).
-//
-// The context will be used in the request and in the Dialer.
-//
-// If the WebSocket handshake fails, ErrBadHandshake is returned along with a
-// non-nil *http.Response so that callers can handle redirects, authentication,
-// etcetera. The response body may not contain the entire response and does not
-// need to be closed by the application.
-func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) {
- if d == nil {
- d = &nilDialer
- }
-
- challengeKey, err := generateChallengeKey()
- if err != nil {
- return nil, nil, err
- }
-
- u, err := url.Parse(urlStr)
- if err != nil {
- return nil, nil, err
- }
-
- switch u.Scheme {
- case "ws":
- u.Scheme = "http"
- case "wss":
- u.Scheme = "https"
- default:
- return nil, nil, errMalformedURL
- }
-
- if u.User != nil {
- // User name and password are not allowed in websocket URIs.
- return nil, nil, errMalformedURL
- }
-
- req := &http.Request{
- Method: http.MethodGet,
- URL: u,
- Proto: "HTTP/1.1",
- ProtoMajor: 1,
- ProtoMinor: 1,
- Header: make(http.Header),
- Host: u.Host,
- }
- req = req.WithContext(ctx)
-
- // Set the cookies present in the cookie jar of the dialer
- if d.Jar != nil {
- for _, cookie := range d.Jar.Cookies(u) {
- req.AddCookie(cookie)
- }
- }
-
- // Set the request headers using the capitalization for names and values in
- // RFC examples. Although the capitalization shouldn't matter, there are
- // servers that depend on it. The Header.Set method is not used because the
- // method canonicalizes the header names.
- req.Header["Upgrade"] = []string{"websocket"}
- req.Header["Connection"] = []string{"Upgrade"}
- req.Header["Sec-WebSocket-Key"] = []string{challengeKey}
- req.Header["Sec-WebSocket-Version"] = []string{"13"}
- if len(d.Subprotocols) > 0 {
- req.Header["Sec-WebSocket-Protocol"] = []string{strings.Join(d.Subprotocols, ", ")}
- }
- for k, vs := range requestHeader {
- switch {
- case k == "Host":
- if len(vs) > 0 {
- req.Host = vs[0]
- }
- case k == "Upgrade" ||
- k == "Connection" ||
- k == "Sec-Websocket-Key" ||
- k == "Sec-Websocket-Version" ||
- k == "Sec-Websocket-Extensions" ||
- (k == "Sec-Websocket-Protocol" && len(d.Subprotocols) > 0):
- return nil, nil, errors.New("websocket: duplicate header not allowed: " + k)
- case k == "Sec-Websocket-Protocol":
- req.Header["Sec-WebSocket-Protocol"] = vs
- default:
- req.Header[k] = vs
- }
- }
-
- if d.EnableCompression {
- req.Header["Sec-WebSocket-Extensions"] = []string{"permessage-deflate; server_no_context_takeover; client_no_context_takeover"}
- }
-
- if d.HandshakeTimeout != 0 {
- var cancel func()
- ctx, cancel = context.WithTimeout(ctx, d.HandshakeTimeout)
- defer cancel()
- }
-
- // Get network dial function.
- var netDial func(network, add string) (net.Conn, error)
-
- switch u.Scheme {
- case "http":
- if d.NetDialContext != nil {
- netDial = func(network, addr string) (net.Conn, error) {
- return d.NetDialContext(ctx, network, addr)
- }
- } else if d.NetDial != nil {
- netDial = d.NetDial
- }
- case "https":
- if d.NetDialTLSContext != nil {
- netDial = func(network, addr string) (net.Conn, error) {
- return d.NetDialTLSContext(ctx, network, addr)
- }
- } else if d.NetDialContext != nil {
- netDial = func(network, addr string) (net.Conn, error) {
- return d.NetDialContext(ctx, network, addr)
- }
- } else if d.NetDial != nil {
- netDial = d.NetDial
- }
- default:
- return nil, nil, errMalformedURL
- }
-
- if netDial == nil {
- netDialer := &net.Dialer{}
- netDial = func(network, addr string) (net.Conn, error) {
- return netDialer.DialContext(ctx, network, addr)
- }
- }
-
- // If needed, wrap the dial function to set the connection deadline.
- if deadline, ok := ctx.Deadline(); ok {
- forwardDial := netDial
- netDial = func(network, addr string) (net.Conn, error) {
- c, err := forwardDial(network, addr)
- if err != nil {
- return nil, err
- }
- err = c.SetDeadline(deadline)
- if err != nil {
- c.Close()
- return nil, err
- }
- return c, nil
- }
- }
-
- // If needed, wrap the dial function to connect through a proxy.
- if d.Proxy != nil {
- proxyURL, err := d.Proxy(req)
- if err != nil {
- return nil, nil, err
- }
- if proxyURL != nil {
- dialer, err := proxy_FromURL(proxyURL, netDialerFunc(netDial))
- if err != nil {
- return nil, nil, err
- }
- netDial = dialer.Dial
- }
- }
-
- hostPort, hostNoPort := hostPortNoPort(u)
- trace := httptrace.ContextClientTrace(ctx)
- if trace != nil && trace.GetConn != nil {
- trace.GetConn(hostPort)
- }
-
- netConn, err := netDial("tcp", hostPort)
- if trace != nil && trace.GotConn != nil {
- trace.GotConn(httptrace.GotConnInfo{
- Conn: netConn,
- })
- }
- if err != nil {
- return nil, nil, err
- }
-
- defer func() {
- if netConn != nil {
- netConn.Close()
- }
- }()
-
- if u.Scheme == "https" && d.NetDialTLSContext == nil {
- // If NetDialTLSContext is set, assume that the TLS handshake has already been done
-
- cfg := cloneTLSConfig(d.TLSClientConfig)
- if cfg.ServerName == "" {
- cfg.ServerName = hostNoPort
- }
- tlsConn := tls.Client(netConn, cfg)
- netConn = tlsConn
-
- if trace != nil && trace.TLSHandshakeStart != nil {
- trace.TLSHandshakeStart()
- }
- err := doHandshake(ctx, tlsConn, cfg)
- if trace != nil && trace.TLSHandshakeDone != nil {
- trace.TLSHandshakeDone(tlsConn.ConnectionState(), err)
- }
-
- if err != nil {
- return nil, nil, err
- }
- }
-
- conn := newConn(netConn, false, d.ReadBufferSize, d.WriteBufferSize, d.WriteBufferPool, nil, nil)
-
- if err := req.Write(netConn); err != nil {
- return nil, nil, err
- }
-
- if trace != nil && trace.GotFirstResponseByte != nil {
- if peek, err := conn.br.Peek(1); err == nil && len(peek) == 1 {
- trace.GotFirstResponseByte()
- }
- }
-
- resp, err := http.ReadResponse(conn.br, req)
- if err != nil {
- return nil, nil, err
- }
-
- if d.Jar != nil {
- if rc := resp.Cookies(); len(rc) > 0 {
- d.Jar.SetCookies(u, rc)
- }
- }
-
- if resp.StatusCode != 101 ||
- !tokenListContainsValue(resp.Header, "Upgrade", "websocket") ||
- !tokenListContainsValue(resp.Header, "Connection", "upgrade") ||
- resp.Header.Get("Sec-Websocket-Accept") != computeAcceptKey(challengeKey) {
- // Before closing the network connection on return from this
- // function, slurp up some of the response to aid application
- // debugging.
- buf := make([]byte, 1024)
- n, _ := io.ReadFull(resp.Body, buf)
- resp.Body = ioutil.NopCloser(bytes.NewReader(buf[:n]))
- return nil, resp, ErrBadHandshake
- }
-
- for _, ext := range parseExtensions(resp.Header) {
- if ext[""] != "permessage-deflate" {
- continue
- }
- _, snct := ext["server_no_context_takeover"]
- _, cnct := ext["client_no_context_takeover"]
- if !snct || !cnct {
- return nil, resp, errInvalidCompression
- }
- conn.newCompressionWriter = compressNoContextTakeover
- conn.newDecompressionReader = decompressNoContextTakeover
- break
- }
-
- resp.Body = ioutil.NopCloser(bytes.NewReader([]byte{}))
- conn.subprotocol = resp.Header.Get("Sec-Websocket-Protocol")
-
- netConn.SetDeadline(time.Time{})
- netConn = nil // to avoid close in defer.
- return conn, resp, nil
-}
-
-func cloneTLSConfig(cfg *tls.Config) *tls.Config {
- if cfg == nil {
- return &tls.Config{}
- }
- return cfg.Clone()
-}
diff --git a/vendor/github.com/gorilla/websocket/compression.go b/vendor/github.com/gorilla/websocket/compression.go
deleted file mode 100644
index 813ffb1e..00000000
--- a/vendor/github.com/gorilla/websocket/compression.go
+++ /dev/null
@@ -1,148 +0,0 @@
-// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package websocket
-
-import (
- "compress/flate"
- "errors"
- "io"
- "strings"
- "sync"
-)
-
-const (
- minCompressionLevel = -2 // flate.HuffmanOnly not defined in Go < 1.6
- maxCompressionLevel = flate.BestCompression
- defaultCompressionLevel = 1
-)
-
-var (
- flateWriterPools [maxCompressionLevel - minCompressionLevel + 1]sync.Pool
- flateReaderPool = sync.Pool{New: func() interface{} {
- return flate.NewReader(nil)
- }}
-)
-
-func decompressNoContextTakeover(r io.Reader) io.ReadCloser {
- const tail =
- // Add four bytes as specified in RFC
- "\x00\x00\xff\xff" +
- // Add final block to squelch unexpected EOF error from flate reader.
- "\x01\x00\x00\xff\xff"
-
- fr, _ := flateReaderPool.Get().(io.ReadCloser)
- fr.(flate.Resetter).Reset(io.MultiReader(r, strings.NewReader(tail)), nil)
- return &flateReadWrapper{fr}
-}
-
-func isValidCompressionLevel(level int) bool {
- return minCompressionLevel <= level && level <= maxCompressionLevel
-}
-
-func compressNoContextTakeover(w io.WriteCloser, level int) io.WriteCloser {
- p := &flateWriterPools[level-minCompressionLevel]
- tw := &truncWriter{w: w}
- fw, _ := p.Get().(*flate.Writer)
- if fw == nil {
- fw, _ = flate.NewWriter(tw, level)
- } else {
- fw.Reset(tw)
- }
- return &flateWriteWrapper{fw: fw, tw: tw, p: p}
-}
-
-// truncWriter is an io.Writer that writes all but the last four bytes of the
-// stream to another io.Writer.
-type truncWriter struct {
- w io.WriteCloser
- n int
- p [4]byte
-}
-
-func (w *truncWriter) Write(p []byte) (int, error) {
- n := 0
-
- // fill buffer first for simplicity.
- if w.n < len(w.p) {
- n = copy(w.p[w.n:], p)
- p = p[n:]
- w.n += n
- if len(p) == 0 {
- return n, nil
- }
- }
-
- m := len(p)
- if m > len(w.p) {
- m = len(w.p)
- }
-
- if nn, err := w.w.Write(w.p[:m]); err != nil {
- return n + nn, err
- }
-
- copy(w.p[:], w.p[m:])
- copy(w.p[len(w.p)-m:], p[len(p)-m:])
- nn, err := w.w.Write(p[:len(p)-m])
- return n + nn, err
-}
-
-type flateWriteWrapper struct {
- fw *flate.Writer
- tw *truncWriter
- p *sync.Pool
-}
-
-func (w *flateWriteWrapper) Write(p []byte) (int, error) {
- if w.fw == nil {
- return 0, errWriteClosed
- }
- return w.fw.Write(p)
-}
-
-func (w *flateWriteWrapper) Close() error {
- if w.fw == nil {
- return errWriteClosed
- }
- err1 := w.fw.Flush()
- w.p.Put(w.fw)
- w.fw = nil
- if w.tw.p != [4]byte{0, 0, 0xff, 0xff} {
- return errors.New("websocket: internal error, unexpected bytes at end of flate stream")
- }
- err2 := w.tw.w.Close()
- if err1 != nil {
- return err1
- }
- return err2
-}
-
-type flateReadWrapper struct {
- fr io.ReadCloser
-}
-
-func (r *flateReadWrapper) Read(p []byte) (int, error) {
- if r.fr == nil {
- return 0, io.ErrClosedPipe
- }
- n, err := r.fr.Read(p)
- if err == io.EOF {
- // Preemptively place the reader back in the pool. This helps with
- // scenarios where the application does not call NextReader() soon after
- // this final read.
- r.Close()
- }
- return n, err
-}
-
-func (r *flateReadWrapper) Close() error {
- if r.fr == nil {
- return io.ErrClosedPipe
- }
- err := r.fr.Close()
- flateReaderPool.Put(r.fr)
- r.fr = nil
- return err
-}
diff --git a/vendor/github.com/gorilla/websocket/conn.go b/vendor/github.com/gorilla/websocket/conn.go
deleted file mode 100644
index 331eebc8..00000000
--- a/vendor/github.com/gorilla/websocket/conn.go
+++ /dev/null
@@ -1,1230 +0,0 @@
-// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package websocket
-
-import (
- "bufio"
- "encoding/binary"
- "errors"
- "io"
- "io/ioutil"
- "math/rand"
- "net"
- "strconv"
- "strings"
- "sync"
- "time"
- "unicode/utf8"
-)
-
-const (
- // Frame header byte 0 bits from Section 5.2 of RFC 6455
- finalBit = 1 << 7
- rsv1Bit = 1 << 6
- rsv2Bit = 1 << 5
- rsv3Bit = 1 << 4
-
- // Frame header byte 1 bits from Section 5.2 of RFC 6455
- maskBit = 1 << 7
-
- maxFrameHeaderSize = 2 + 8 + 4 // Fixed header + length + mask
- maxControlFramePayloadSize = 125
-
- writeWait = time.Second
-
- defaultReadBufferSize = 4096
- defaultWriteBufferSize = 4096
-
- continuationFrame = 0
- noFrame = -1
-)
-
-// Close codes defined in RFC 6455, section 11.7.
-const (
- CloseNormalClosure = 1000
- CloseGoingAway = 1001
- CloseProtocolError = 1002
- CloseUnsupportedData = 1003
- CloseNoStatusReceived = 1005
- CloseAbnormalClosure = 1006
- CloseInvalidFramePayloadData = 1007
- ClosePolicyViolation = 1008
- CloseMessageTooBig = 1009
- CloseMandatoryExtension = 1010
- CloseInternalServerErr = 1011
- CloseServiceRestart = 1012
- CloseTryAgainLater = 1013
- CloseTLSHandshake = 1015
-)
-
-// The message types are defined in RFC 6455, section 11.8.
-const (
- // TextMessage denotes a text data message. The text message payload is
- // interpreted as UTF-8 encoded text data.
- TextMessage = 1
-
- // BinaryMessage denotes a binary data message.
- BinaryMessage = 2
-
- // CloseMessage denotes a close control message. The optional message
- // payload contains a numeric code and text. Use the FormatCloseMessage
- // function to format a close message payload.
- CloseMessage = 8
-
- // PingMessage denotes a ping control message. The optional message payload
- // is UTF-8 encoded text.
- PingMessage = 9
-
- // PongMessage denotes a pong control message. The optional message payload
- // is UTF-8 encoded text.
- PongMessage = 10
-)
-
-// ErrCloseSent is returned when the application writes a message to the
-// connection after sending a close message.
-var ErrCloseSent = errors.New("websocket: close sent")
-
-// ErrReadLimit is returned when reading a message that is larger than the
-// read limit set for the connection.
-var ErrReadLimit = errors.New("websocket: read limit exceeded")
-
-// netError satisfies the net Error interface.
-type netError struct {
- msg string
- temporary bool
- timeout bool
-}
-
-func (e *netError) Error() string { return e.msg }
-func (e *netError) Temporary() bool { return e.temporary }
-func (e *netError) Timeout() bool { return e.timeout }
-
-// CloseError represents a close message.
-type CloseError struct {
- // Code is defined in RFC 6455, section 11.7.
- Code int
-
- // Text is the optional text payload.
- Text string
-}
-
-func (e *CloseError) Error() string {
- s := []byte("websocket: close ")
- s = strconv.AppendInt(s, int64(e.Code), 10)
- switch e.Code {
- case CloseNormalClosure:
- s = append(s, " (normal)"...)
- case CloseGoingAway:
- s = append(s, " (going away)"...)
- case CloseProtocolError:
- s = append(s, " (protocol error)"...)
- case CloseUnsupportedData:
- s = append(s, " (unsupported data)"...)
- case CloseNoStatusReceived:
- s = append(s, " (no status)"...)
- case CloseAbnormalClosure:
- s = append(s, " (abnormal closure)"...)
- case CloseInvalidFramePayloadData:
- s = append(s, " (invalid payload data)"...)
- case ClosePolicyViolation:
- s = append(s, " (policy violation)"...)
- case CloseMessageTooBig:
- s = append(s, " (message too big)"...)
- case CloseMandatoryExtension:
- s = append(s, " (mandatory extension missing)"...)
- case CloseInternalServerErr:
- s = append(s, " (internal server error)"...)
- case CloseTLSHandshake:
- s = append(s, " (TLS handshake error)"...)
- }
- if e.Text != "" {
- s = append(s, ": "...)
- s = append(s, e.Text...)
- }
- return string(s)
-}
-
-// IsCloseError returns boolean indicating whether the error is a *CloseError
-// with one of the specified codes.
-func IsCloseError(err error, codes ...int) bool {
- if e, ok := err.(*CloseError); ok {
- for _, code := range codes {
- if e.Code == code {
- return true
- }
- }
- }
- return false
-}
-
-// IsUnexpectedCloseError returns boolean indicating whether the error is a
-// *CloseError with a code not in the list of expected codes.
-func IsUnexpectedCloseError(err error, expectedCodes ...int) bool {
- if e, ok := err.(*CloseError); ok {
- for _, code := range expectedCodes {
- if e.Code == code {
- return false
- }
- }
- return true
- }
- return false
-}
-
-var (
- errWriteTimeout = &netError{msg: "websocket: write timeout", timeout: true, temporary: true}
- errUnexpectedEOF = &CloseError{Code: CloseAbnormalClosure, Text: io.ErrUnexpectedEOF.Error()}
- errBadWriteOpCode = errors.New("websocket: bad write message type")
- errWriteClosed = errors.New("websocket: write closed")
- errInvalidControlFrame = errors.New("websocket: invalid control frame")
-)
-
-func newMaskKey() [4]byte {
- n := rand.Uint32()
- return [4]byte{byte(n), byte(n >> 8), byte(n >> 16), byte(n >> 24)}
-}
-
-func hideTempErr(err error) error {
- if e, ok := err.(net.Error); ok && e.Temporary() {
- err = &netError{msg: e.Error(), timeout: e.Timeout()}
- }
- return err
-}
-
-func isControl(frameType int) bool {
- return frameType == CloseMessage || frameType == PingMessage || frameType == PongMessage
-}
-
-func isData(frameType int) bool {
- return frameType == TextMessage || frameType == BinaryMessage
-}
-
-var validReceivedCloseCodes = map[int]bool{
- // see http://www.iana.org/assignments/websocket/websocket.xhtml#close-code-number
-
- CloseNormalClosure: true,
- CloseGoingAway: true,
- CloseProtocolError: true,
- CloseUnsupportedData: true,
- CloseNoStatusReceived: false,
- CloseAbnormalClosure: false,
- CloseInvalidFramePayloadData: true,
- ClosePolicyViolation: true,
- CloseMessageTooBig: true,
- CloseMandatoryExtension: true,
- CloseInternalServerErr: true,
- CloseServiceRestart: true,
- CloseTryAgainLater: true,
- CloseTLSHandshake: false,
-}
-
-func isValidReceivedCloseCode(code int) bool {
- return validReceivedCloseCodes[code] || (code >= 3000 && code <= 4999)
-}
-
-// BufferPool represents a pool of buffers. The *sync.Pool type satisfies this
-// interface. The type of the value stored in a pool is not specified.
-type BufferPool interface {
- // Get gets a value from the pool or returns nil if the pool is empty.
- Get() interface{}
- // Put adds a value to the pool.
- Put(interface{})
-}
-
-// writePoolData is the type added to the write buffer pool. This wrapper is
-// used to prevent applications from peeking at and depending on the values
-// added to the pool.
-type writePoolData struct{ buf []byte }
-
-// The Conn type represents a WebSocket connection.
-type Conn struct {
- conn net.Conn
- isServer bool
- subprotocol string
-
- // Write fields
- mu chan struct{} // used as mutex to protect write to conn
- writeBuf []byte // frame is constructed in this buffer.
- writePool BufferPool
- writeBufSize int
- writeDeadline time.Time
- writer io.WriteCloser // the current writer returned to the application
- isWriting bool // for best-effort concurrent write detection
-
- writeErrMu sync.Mutex
- writeErr error
-
- enableWriteCompression bool
- compressionLevel int
- newCompressionWriter func(io.WriteCloser, int) io.WriteCloser
-
- // Read fields
- reader io.ReadCloser // the current reader returned to the application
- readErr error
- br *bufio.Reader
- // bytes remaining in current frame.
- // set setReadRemaining to safely update this value and prevent overflow
- readRemaining int64
- readFinal bool // true the current message has more frames.
- readLength int64 // Message size.
- readLimit int64 // Maximum message size.
- readMaskPos int
- readMaskKey [4]byte
- handlePong func(string) error
- handlePing func(string) error
- handleClose func(int, string) error
- readErrCount int
- messageReader *messageReader // the current low-level reader
-
- readDecompress bool // whether last read frame had RSV1 set
- newDecompressionReader func(io.Reader) io.ReadCloser
-}
-
-func newConn(conn net.Conn, isServer bool, readBufferSize, writeBufferSize int, writeBufferPool BufferPool, br *bufio.Reader, writeBuf []byte) *Conn {
-
- if br == nil {
- if readBufferSize == 0 {
- readBufferSize = defaultReadBufferSize
- } else if readBufferSize < maxControlFramePayloadSize {
- // must be large enough for control frame
- readBufferSize = maxControlFramePayloadSize
- }
- br = bufio.NewReaderSize(conn, readBufferSize)
- }
-
- if writeBufferSize <= 0 {
- writeBufferSize = defaultWriteBufferSize
- }
- writeBufferSize += maxFrameHeaderSize
-
- if writeBuf == nil && writeBufferPool == nil {
- writeBuf = make([]byte, writeBufferSize)
- }
-
- mu := make(chan struct{}, 1)
- mu <- struct{}{}
- c := &Conn{
- isServer: isServer,
- br: br,
- conn: conn,
- mu: mu,
- readFinal: true,
- writeBuf: writeBuf,
- writePool: writeBufferPool,
- writeBufSize: writeBufferSize,
- enableWriteCompression: true,
- compressionLevel: defaultCompressionLevel,
- }
- c.SetCloseHandler(nil)
- c.SetPingHandler(nil)
- c.SetPongHandler(nil)
- return c
-}
-
-// setReadRemaining tracks the number of bytes remaining on the connection. If n
-// overflows, an ErrReadLimit is returned.
-func (c *Conn) setReadRemaining(n int64) error {
- if n < 0 {
- return ErrReadLimit
- }
-
- c.readRemaining = n
- return nil
-}
-
-// Subprotocol returns the negotiated protocol for the connection.
-func (c *Conn) Subprotocol() string {
- return c.subprotocol
-}
-
-// Close closes the underlying network connection without sending or waiting
-// for a close message.
-func (c *Conn) Close() error {
- return c.conn.Close()
-}
-
-// LocalAddr returns the local network address.
-func (c *Conn) LocalAddr() net.Addr {
- return c.conn.LocalAddr()
-}
-
-// RemoteAddr returns the remote network address.
-func (c *Conn) RemoteAddr() net.Addr {
- return c.conn.RemoteAddr()
-}
-
-// Write methods
-
-func (c *Conn) writeFatal(err error) error {
- err = hideTempErr(err)
- c.writeErrMu.Lock()
- if c.writeErr == nil {
- c.writeErr = err
- }
- c.writeErrMu.Unlock()
- return err
-}
-
-func (c *Conn) read(n int) ([]byte, error) {
- p, err := c.br.Peek(n)
- if err == io.EOF {
- err = errUnexpectedEOF
- }
- c.br.Discard(len(p))
- return p, err
-}
-
-func (c *Conn) write(frameType int, deadline time.Time, buf0, buf1 []byte) error {
- <-c.mu
- defer func() { c.mu <- struct{}{} }()
-
- c.writeErrMu.Lock()
- err := c.writeErr
- c.writeErrMu.Unlock()
- if err != nil {
- return err
- }
-
- c.conn.SetWriteDeadline(deadline)
- if len(buf1) == 0 {
- _, err = c.conn.Write(buf0)
- } else {
- err = c.writeBufs(buf0, buf1)
- }
- if err != nil {
- return c.writeFatal(err)
- }
- if frameType == CloseMessage {
- c.writeFatal(ErrCloseSent)
- }
- return nil
-}
-
-func (c *Conn) writeBufs(bufs ...[]byte) error {
- b := net.Buffers(bufs)
- _, err := b.WriteTo(c.conn)
- return err
-}
-
-// WriteControl writes a control message with the given deadline. The allowed
-// message types are CloseMessage, PingMessage and PongMessage.
-func (c *Conn) WriteControl(messageType int, data []byte, deadline time.Time) error {
- if !isControl(messageType) {
- return errBadWriteOpCode
- }
- if len(data) > maxControlFramePayloadSize {
- return errInvalidControlFrame
- }
-
- b0 := byte(messageType) | finalBit
- b1 := byte(len(data))
- if !c.isServer {
- b1 |= maskBit
- }
-
- buf := make([]byte, 0, maxFrameHeaderSize+maxControlFramePayloadSize)
- buf = append(buf, b0, b1)
-
- if c.isServer {
- buf = append(buf, data...)
- } else {
- key := newMaskKey()
- buf = append(buf, key[:]...)
- buf = append(buf, data...)
- maskBytes(key, 0, buf[6:])
- }
-
- d := 1000 * time.Hour
- if !deadline.IsZero() {
- d = deadline.Sub(time.Now())
- if d < 0 {
- return errWriteTimeout
- }
- }
-
- timer := time.NewTimer(d)
- select {
- case <-c.mu:
- timer.Stop()
- case <-timer.C:
- return errWriteTimeout
- }
- defer func() { c.mu <- struct{}{} }()
-
- c.writeErrMu.Lock()
- err := c.writeErr
- c.writeErrMu.Unlock()
- if err != nil {
- return err
- }
-
- c.conn.SetWriteDeadline(deadline)
- _, err = c.conn.Write(buf)
- if err != nil {
- return c.writeFatal(err)
- }
- if messageType == CloseMessage {
- c.writeFatal(ErrCloseSent)
- }
- return err
-}
-
-// beginMessage prepares a connection and message writer for a new message.
-func (c *Conn) beginMessage(mw *messageWriter, messageType int) error {
- // Close previous writer if not already closed by the application. It's
- // probably better to return an error in this situation, but we cannot
- // change this without breaking existing applications.
- if c.writer != nil {
- c.writer.Close()
- c.writer = nil
- }
-
- if !isControl(messageType) && !isData(messageType) {
- return errBadWriteOpCode
- }
-
- c.writeErrMu.Lock()
- err := c.writeErr
- c.writeErrMu.Unlock()
- if err != nil {
- return err
- }
-
- mw.c = c
- mw.frameType = messageType
- mw.pos = maxFrameHeaderSize
-
- if c.writeBuf == nil {
- wpd, ok := c.writePool.Get().(writePoolData)
- if ok {
- c.writeBuf = wpd.buf
- } else {
- c.writeBuf = make([]byte, c.writeBufSize)
- }
- }
- return nil
-}
-
-// NextWriter returns a writer for the next message to send. The writer's Close
-// method flushes the complete message to the network.
-//
-// There can be at most one open writer on a connection. NextWriter closes the
-// previous writer if the application has not already done so.
-//
-// All message types (TextMessage, BinaryMessage, CloseMessage, PingMessage and
-// PongMessage) are supported.
-func (c *Conn) NextWriter(messageType int) (io.WriteCloser, error) {
- var mw messageWriter
- if err := c.beginMessage(&mw, messageType); err != nil {
- return nil, err
- }
- c.writer = &mw
- if c.newCompressionWriter != nil && c.enableWriteCompression && isData(messageType) {
- w := c.newCompressionWriter(c.writer, c.compressionLevel)
- mw.compress = true
- c.writer = w
- }
- return c.writer, nil
-}
-
-type messageWriter struct {
- c *Conn
- compress bool // whether next call to flushFrame should set RSV1
- pos int // end of data in writeBuf.
- frameType int // type of the current frame.
- err error
-}
-
-func (w *messageWriter) endMessage(err error) error {
- if w.err != nil {
- return err
- }
- c := w.c
- w.err = err
- c.writer = nil
- if c.writePool != nil {
- c.writePool.Put(writePoolData{buf: c.writeBuf})
- c.writeBuf = nil
- }
- return err
-}
-
-// flushFrame writes buffered data and extra as a frame to the network. The
-// final argument indicates that this is the last frame in the message.
-func (w *messageWriter) flushFrame(final bool, extra []byte) error {
- c := w.c
- length := w.pos - maxFrameHeaderSize + len(extra)
-
- // Check for invalid control frames.
- if isControl(w.frameType) &&
- (!final || length > maxControlFramePayloadSize) {
- return w.endMessage(errInvalidControlFrame)
- }
-
- b0 := byte(w.frameType)
- if final {
- b0 |= finalBit
- }
- if w.compress {
- b0 |= rsv1Bit
- }
- w.compress = false
-
- b1 := byte(0)
- if !c.isServer {
- b1 |= maskBit
- }
-
- // Assume that the frame starts at beginning of c.writeBuf.
- framePos := 0
- if c.isServer {
- // Adjust up if mask not included in the header.
- framePos = 4
- }
-
- switch {
- case length >= 65536:
- c.writeBuf[framePos] = b0
- c.writeBuf[framePos+1] = b1 | 127
- binary.BigEndian.PutUint64(c.writeBuf[framePos+2:], uint64(length))
- case length > 125:
- framePos += 6
- c.writeBuf[framePos] = b0
- c.writeBuf[framePos+1] = b1 | 126
- binary.BigEndian.PutUint16(c.writeBuf[framePos+2:], uint16(length))
- default:
- framePos += 8
- c.writeBuf[framePos] = b0
- c.writeBuf[framePos+1] = b1 | byte(length)
- }
-
- if !c.isServer {
- key := newMaskKey()
- copy(c.writeBuf[maxFrameHeaderSize-4:], key[:])
- maskBytes(key, 0, c.writeBuf[maxFrameHeaderSize:w.pos])
- if len(extra) > 0 {
- return w.endMessage(c.writeFatal(errors.New("websocket: internal error, extra used in client mode")))
- }
- }
-
- // Write the buffers to the connection with best-effort detection of
- // concurrent writes. See the concurrency section in the package
- // documentation for more info.
-
- if c.isWriting {
- panic("concurrent write to websocket connection")
- }
- c.isWriting = true
-
- err := c.write(w.frameType, c.writeDeadline, c.writeBuf[framePos:w.pos], extra)
-
- if !c.isWriting {
- panic("concurrent write to websocket connection")
- }
- c.isWriting = false
-
- if err != nil {
- return w.endMessage(err)
- }
-
- if final {
- w.endMessage(errWriteClosed)
- return nil
- }
-
- // Setup for next frame.
- w.pos = maxFrameHeaderSize
- w.frameType = continuationFrame
- return nil
-}
-
-func (w *messageWriter) ncopy(max int) (int, error) {
- n := len(w.c.writeBuf) - w.pos
- if n <= 0 {
- if err := w.flushFrame(false, nil); err != nil {
- return 0, err
- }
- n = len(w.c.writeBuf) - w.pos
- }
- if n > max {
- n = max
- }
- return n, nil
-}
-
-func (w *messageWriter) Write(p []byte) (int, error) {
- if w.err != nil {
- return 0, w.err
- }
-
- if len(p) > 2*len(w.c.writeBuf) && w.c.isServer {
- // Don't buffer large messages.
- err := w.flushFrame(false, p)
- if err != nil {
- return 0, err
- }
- return len(p), nil
- }
-
- nn := len(p)
- for len(p) > 0 {
- n, err := w.ncopy(len(p))
- if err != nil {
- return 0, err
- }
- copy(w.c.writeBuf[w.pos:], p[:n])
- w.pos += n
- p = p[n:]
- }
- return nn, nil
-}
-
-func (w *messageWriter) WriteString(p string) (int, error) {
- if w.err != nil {
- return 0, w.err
- }
-
- nn := len(p)
- for len(p) > 0 {
- n, err := w.ncopy(len(p))
- if err != nil {
- return 0, err
- }
- copy(w.c.writeBuf[w.pos:], p[:n])
- w.pos += n
- p = p[n:]
- }
- return nn, nil
-}
-
-func (w *messageWriter) ReadFrom(r io.Reader) (nn int64, err error) {
- if w.err != nil {
- return 0, w.err
- }
- for {
- if w.pos == len(w.c.writeBuf) {
- err = w.flushFrame(false, nil)
- if err != nil {
- break
- }
- }
- var n int
- n, err = r.Read(w.c.writeBuf[w.pos:])
- w.pos += n
- nn += int64(n)
- if err != nil {
- if err == io.EOF {
- err = nil
- }
- break
- }
- }
- return nn, err
-}
-
-func (w *messageWriter) Close() error {
- if w.err != nil {
- return w.err
- }
- return w.flushFrame(true, nil)
-}
-
-// WritePreparedMessage writes prepared message into connection.
-func (c *Conn) WritePreparedMessage(pm *PreparedMessage) error {
- frameType, frameData, err := pm.frame(prepareKey{
- isServer: c.isServer,
- compress: c.newCompressionWriter != nil && c.enableWriteCompression && isData(pm.messageType),
- compressionLevel: c.compressionLevel,
- })
- if err != nil {
- return err
- }
- if c.isWriting {
- panic("concurrent write to websocket connection")
- }
- c.isWriting = true
- err = c.write(frameType, c.writeDeadline, frameData, nil)
- if !c.isWriting {
- panic("concurrent write to websocket connection")
- }
- c.isWriting = false
- return err
-}
-
-// WriteMessage is a helper method for getting a writer using NextWriter,
-// writing the message and closing the writer.
-func (c *Conn) WriteMessage(messageType int, data []byte) error {
-
- if c.isServer && (c.newCompressionWriter == nil || !c.enableWriteCompression) {
- // Fast path with no allocations and single frame.
-
- var mw messageWriter
- if err := c.beginMessage(&mw, messageType); err != nil {
- return err
- }
- n := copy(c.writeBuf[mw.pos:], data)
- mw.pos += n
- data = data[n:]
- return mw.flushFrame(true, data)
- }
-
- w, err := c.NextWriter(messageType)
- if err != nil {
- return err
- }
- if _, err = w.Write(data); err != nil {
- return err
- }
- return w.Close()
-}
-
-// SetWriteDeadline sets the write deadline on the underlying network
-// connection. After a write has timed out, the websocket state is corrupt and
-// all future writes will return an error. A zero value for t means writes will
-// not time out.
-func (c *Conn) SetWriteDeadline(t time.Time) error {
- c.writeDeadline = t
- return nil
-}
-
-// Read methods
-
-func (c *Conn) advanceFrame() (int, error) {
- // 1. Skip remainder of previous frame.
-
- if c.readRemaining > 0 {
- if _, err := io.CopyN(ioutil.Discard, c.br, c.readRemaining); err != nil {
- return noFrame, err
- }
- }
-
- // 2. Read and parse first two bytes of frame header.
- // To aid debugging, collect and report all errors in the first two bytes
- // of the header.
-
- var errors []string
-
- p, err := c.read(2)
- if err != nil {
- return noFrame, err
- }
-
- frameType := int(p[0] & 0xf)
- final := p[0]&finalBit != 0
- rsv1 := p[0]&rsv1Bit != 0
- rsv2 := p[0]&rsv2Bit != 0
- rsv3 := p[0]&rsv3Bit != 0
- mask := p[1]&maskBit != 0
- c.setReadRemaining(int64(p[1] & 0x7f))
-
- c.readDecompress = false
- if rsv1 {
- if c.newDecompressionReader != nil {
- c.readDecompress = true
- } else {
- errors = append(errors, "RSV1 set")
- }
- }
-
- if rsv2 {
- errors = append(errors, "RSV2 set")
- }
-
- if rsv3 {
- errors = append(errors, "RSV3 set")
- }
-
- switch frameType {
- case CloseMessage, PingMessage, PongMessage:
- if c.readRemaining > maxControlFramePayloadSize {
- errors = append(errors, "len > 125 for control")
- }
- if !final {
- errors = append(errors, "FIN not set on control")
- }
- case TextMessage, BinaryMessage:
- if !c.readFinal {
- errors = append(errors, "data before FIN")
- }
- c.readFinal = final
- case continuationFrame:
- if c.readFinal {
- errors = append(errors, "continuation after FIN")
- }
- c.readFinal = final
- default:
- errors = append(errors, "bad opcode "+strconv.Itoa(frameType))
- }
-
- if mask != c.isServer {
- errors = append(errors, "bad MASK")
- }
-
- if len(errors) > 0 {
- return noFrame, c.handleProtocolError(strings.Join(errors, ", "))
- }
-
- // 3. Read and parse frame length as per
- // https://tools.ietf.org/html/rfc6455#section-5.2
- //
- // The length of the "Payload data", in bytes: if 0-125, that is the payload
- // length.
- // - If 126, the following 2 bytes interpreted as a 16-bit unsigned
- // integer are the payload length.
- // - If 127, the following 8 bytes interpreted as
- // a 64-bit unsigned integer (the most significant bit MUST be 0) are the
- // payload length. Multibyte length quantities are expressed in network byte
- // order.
-
- switch c.readRemaining {
- case 126:
- p, err := c.read(2)
- if err != nil {
- return noFrame, err
- }
-
- if err := c.setReadRemaining(int64(binary.BigEndian.Uint16(p))); err != nil {
- return noFrame, err
- }
- case 127:
- p, err := c.read(8)
- if err != nil {
- return noFrame, err
- }
-
- if err := c.setReadRemaining(int64(binary.BigEndian.Uint64(p))); err != nil {
- return noFrame, err
- }
- }
-
- // 4. Handle frame masking.
-
- if mask {
- c.readMaskPos = 0
- p, err := c.read(len(c.readMaskKey))
- if err != nil {
- return noFrame, err
- }
- copy(c.readMaskKey[:], p)
- }
-
- // 5. For text and binary messages, enforce read limit and return.
-
- if frameType == continuationFrame || frameType == TextMessage || frameType == BinaryMessage {
-
- c.readLength += c.readRemaining
- // Don't allow readLength to overflow in the presence of a large readRemaining
- // counter.
- if c.readLength < 0 {
- return noFrame, ErrReadLimit
- }
-
- if c.readLimit > 0 && c.readLength > c.readLimit {
- c.WriteControl(CloseMessage, FormatCloseMessage(CloseMessageTooBig, ""), time.Now().Add(writeWait))
- return noFrame, ErrReadLimit
- }
-
- return frameType, nil
- }
-
- // 6. Read control frame payload.
-
- var payload []byte
- if c.readRemaining > 0 {
- payload, err = c.read(int(c.readRemaining))
- c.setReadRemaining(0)
- if err != nil {
- return noFrame, err
- }
- if c.isServer {
- maskBytes(c.readMaskKey, 0, payload)
- }
- }
-
- // 7. Process control frame payload.
-
- switch frameType {
- case PongMessage:
- if err := c.handlePong(string(payload)); err != nil {
- return noFrame, err
- }
- case PingMessage:
- if err := c.handlePing(string(payload)); err != nil {
- return noFrame, err
- }
- case CloseMessage:
- closeCode := CloseNoStatusReceived
- closeText := ""
- if len(payload) >= 2 {
- closeCode = int(binary.BigEndian.Uint16(payload))
- if !isValidReceivedCloseCode(closeCode) {
- return noFrame, c.handleProtocolError("bad close code " + strconv.Itoa(closeCode))
- }
- closeText = string(payload[2:])
- if !utf8.ValidString(closeText) {
- return noFrame, c.handleProtocolError("invalid utf8 payload in close frame")
- }
- }
- if err := c.handleClose(closeCode, closeText); err != nil {
- return noFrame, err
- }
- return noFrame, &CloseError{Code: closeCode, Text: closeText}
- }
-
- return frameType, nil
-}
-
-func (c *Conn) handleProtocolError(message string) error {
- data := FormatCloseMessage(CloseProtocolError, message)
- if len(data) > maxControlFramePayloadSize {
- data = data[:maxControlFramePayloadSize]
- }
- c.WriteControl(CloseMessage, data, time.Now().Add(writeWait))
- return errors.New("websocket: " + message)
-}
-
-// NextReader returns the next data message received from the peer. The
-// returned messageType is either TextMessage or BinaryMessage.
-//
-// There can be at most one open reader on a connection. NextReader discards
-// the previous message if the application has not already consumed it.
-//
-// Applications must break out of the application's read loop when this method
-// returns a non-nil error value. Errors returned from this method are
-// permanent. Once this method returns a non-nil error, all subsequent calls to
-// this method return the same error.
-func (c *Conn) NextReader() (messageType int, r io.Reader, err error) {
- // Close previous reader, only relevant for decompression.
- if c.reader != nil {
- c.reader.Close()
- c.reader = nil
- }
-
- c.messageReader = nil
- c.readLength = 0
-
- for c.readErr == nil {
- frameType, err := c.advanceFrame()
- if err != nil {
- c.readErr = hideTempErr(err)
- break
- }
-
- if frameType == TextMessage || frameType == BinaryMessage {
- c.messageReader = &messageReader{c}
- c.reader = c.messageReader
- if c.readDecompress {
- c.reader = c.newDecompressionReader(c.reader)
- }
- return frameType, c.reader, nil
- }
- }
-
- // Applications that do handle the error returned from this method spin in
- // tight loop on connection failure. To help application developers detect
- // this error, panic on repeated reads to the failed connection.
- c.readErrCount++
- if c.readErrCount >= 1000 {
- panic("repeated read on failed websocket connection")
- }
-
- return noFrame, nil, c.readErr
-}
-
-type messageReader struct{ c *Conn }
-
-func (r *messageReader) Read(b []byte) (int, error) {
- c := r.c
- if c.messageReader != r {
- return 0, io.EOF
- }
-
- for c.readErr == nil {
-
- if c.readRemaining > 0 {
- if int64(len(b)) > c.readRemaining {
- b = b[:c.readRemaining]
- }
- n, err := c.br.Read(b)
- c.readErr = hideTempErr(err)
- if c.isServer {
- c.readMaskPos = maskBytes(c.readMaskKey, c.readMaskPos, b[:n])
- }
- rem := c.readRemaining
- rem -= int64(n)
- c.setReadRemaining(rem)
- if c.readRemaining > 0 && c.readErr == io.EOF {
- c.readErr = errUnexpectedEOF
- }
- return n, c.readErr
- }
-
- if c.readFinal {
- c.messageReader = nil
- return 0, io.EOF
- }
-
- frameType, err := c.advanceFrame()
- switch {
- case err != nil:
- c.readErr = hideTempErr(err)
- case frameType == TextMessage || frameType == BinaryMessage:
- c.readErr = errors.New("websocket: internal error, unexpected text or binary in Reader")
- }
- }
-
- err := c.readErr
- if err == io.EOF && c.messageReader == r {
- err = errUnexpectedEOF
- }
- return 0, err
-}
-
-func (r *messageReader) Close() error {
- return nil
-}
-
-// ReadMessage is a helper method for getting a reader using NextReader and
-// reading from that reader to a buffer.
-func (c *Conn) ReadMessage() (messageType int, p []byte, err error) {
- var r io.Reader
- messageType, r, err = c.NextReader()
- if err != nil {
- return messageType, nil, err
- }
- p, err = ioutil.ReadAll(r)
- return messageType, p, err
-}
-
-// SetReadDeadline sets the read deadline on the underlying network connection.
-// After a read has timed out, the websocket connection state is corrupt and
-// all future reads will return an error. A zero value for t means reads will
-// not time out.
-func (c *Conn) SetReadDeadline(t time.Time) error {
- return c.conn.SetReadDeadline(t)
-}
-
-// SetReadLimit sets the maximum size in bytes for a message read from the peer. If a
-// message exceeds the limit, the connection sends a close message to the peer
-// and returns ErrReadLimit to the application.
-func (c *Conn) SetReadLimit(limit int64) {
- c.readLimit = limit
-}
-
-// CloseHandler returns the current close handler
-func (c *Conn) CloseHandler() func(code int, text string) error {
- return c.handleClose
-}
-
-// SetCloseHandler sets the handler for close messages received from the peer.
-// The code argument to h is the received close code or CloseNoStatusReceived
-// if the close message is empty. The default close handler sends a close
-// message back to the peer.
-//
-// The handler function is called from the NextReader, ReadMessage and message
-// reader Read methods. The application must read the connection to process
-// close messages as described in the section on Control Messages above.
-//
-// The connection read methods return a CloseError when a close message is
-// received. Most applications should handle close messages as part of their
-// normal error handling. Applications should only set a close handler when the
-// application must perform some action before sending a close message back to
-// the peer.
-func (c *Conn) SetCloseHandler(h func(code int, text string) error) {
- if h == nil {
- h = func(code int, text string) error {
- message := FormatCloseMessage(code, "")
- c.WriteControl(CloseMessage, message, time.Now().Add(writeWait))
- return nil
- }
- }
- c.handleClose = h
-}
-
-// PingHandler returns the current ping handler
-func (c *Conn) PingHandler() func(appData string) error {
- return c.handlePing
-}
-
-// SetPingHandler sets the handler for ping messages received from the peer.
-// The appData argument to h is the PING message application data. The default
-// ping handler sends a pong to the peer.
-//
-// The handler function is called from the NextReader, ReadMessage and message
-// reader Read methods. The application must read the connection to process
-// ping messages as described in the section on Control Messages above.
-func (c *Conn) SetPingHandler(h func(appData string) error) {
- if h == nil {
- h = func(message string) error {
- err := c.WriteControl(PongMessage, []byte(message), time.Now().Add(writeWait))
- if err == ErrCloseSent {
- return nil
- } else if e, ok := err.(net.Error); ok && e.Temporary() {
- return nil
- }
- return err
- }
- }
- c.handlePing = h
-}
-
-// PongHandler returns the current pong handler
-func (c *Conn) PongHandler() func(appData string) error {
- return c.handlePong
-}
-
-// SetPongHandler sets the handler for pong messages received from the peer.
-// The appData argument to h is the PONG message application data. The default
-// pong handler does nothing.
-//
-// The handler function is called from the NextReader, ReadMessage and message
-// reader Read methods. The application must read the connection to process
-// pong messages as described in the section on Control Messages above.
-func (c *Conn) SetPongHandler(h func(appData string) error) {
- if h == nil {
- h = func(string) error { return nil }
- }
- c.handlePong = h
-}
-
-// UnderlyingConn returns the internal net.Conn. This can be used to further
-// modifications to connection specific flags.
-func (c *Conn) UnderlyingConn() net.Conn {
- return c.conn
-}
-
-// EnableWriteCompression enables and disables write compression of
-// subsequent text and binary messages. This function is a noop if
-// compression was not negotiated with the peer.
-func (c *Conn) EnableWriteCompression(enable bool) {
- c.enableWriteCompression = enable
-}
-
-// SetCompressionLevel sets the flate compression level for subsequent text and
-// binary messages. This function is a noop if compression was not negotiated
-// with the peer. See the compress/flate package for a description of
-// compression levels.
-func (c *Conn) SetCompressionLevel(level int) error {
- if !isValidCompressionLevel(level) {
- return errors.New("websocket: invalid compression level")
- }
- c.compressionLevel = level
- return nil
-}
-
-// FormatCloseMessage formats closeCode and text as a WebSocket close message.
-// An empty message is returned for code CloseNoStatusReceived.
-func FormatCloseMessage(closeCode int, text string) []byte {
- if closeCode == CloseNoStatusReceived {
- // Return empty message because it's illegal to send
- // CloseNoStatusReceived. Return non-nil value in case application
- // checks for nil.
- return []byte{}
- }
- buf := make([]byte, 2+len(text))
- binary.BigEndian.PutUint16(buf, uint16(closeCode))
- copy(buf[2:], text)
- return buf
-}
diff --git a/vendor/github.com/gorilla/websocket/doc.go b/vendor/github.com/gorilla/websocket/doc.go
deleted file mode 100644
index 8db0cef9..00000000
--- a/vendor/github.com/gorilla/websocket/doc.go
+++ /dev/null
@@ -1,227 +0,0 @@
-// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package websocket implements the WebSocket protocol defined in RFC 6455.
-//
-// Overview
-//
-// The Conn type represents a WebSocket connection. A server application calls
-// the Upgrader.Upgrade method from an HTTP request handler to get a *Conn:
-//
-// var upgrader = websocket.Upgrader{
-// ReadBufferSize: 1024,
-// WriteBufferSize: 1024,
-// }
-//
-// func handler(w http.ResponseWriter, r *http.Request) {
-// conn, err := upgrader.Upgrade(w, r, nil)
-// if err != nil {
-// log.Println(err)
-// return
-// }
-// ... Use conn to send and receive messages.
-// }
-//
-// Call the connection's WriteMessage and ReadMessage methods to send and
-// receive messages as a slice of bytes. This snippet of code shows how to echo
-// messages using these methods:
-//
-// for {
-// messageType, p, err := conn.ReadMessage()
-// if err != nil {
-// log.Println(err)
-// return
-// }
-// if err := conn.WriteMessage(messageType, p); err != nil {
-// log.Println(err)
-// return
-// }
-// }
-//
-// In above snippet of code, p is a []byte and messageType is an int with value
-// websocket.BinaryMessage or websocket.TextMessage.
-//
-// An application can also send and receive messages using the io.WriteCloser
-// and io.Reader interfaces. To send a message, call the connection NextWriter
-// method to get an io.WriteCloser, write the message to the writer and close
-// the writer when done. To receive a message, call the connection NextReader
-// method to get an io.Reader and read until io.EOF is returned. This snippet
-// shows how to echo messages using the NextWriter and NextReader methods:
-//
-// for {
-// messageType, r, err := conn.NextReader()
-// if err != nil {
-// return
-// }
-// w, err := conn.NextWriter(messageType)
-// if err != nil {
-// return err
-// }
-// if _, err := io.Copy(w, r); err != nil {
-// return err
-// }
-// if err := w.Close(); err != nil {
-// return err
-// }
-// }
-//
-// Data Messages
-//
-// The WebSocket protocol distinguishes between text and binary data messages.
-// Text messages are interpreted as UTF-8 encoded text. The interpretation of
-// binary messages is left to the application.
-//
-// This package uses the TextMessage and BinaryMessage integer constants to
-// identify the two data message types. The ReadMessage and NextReader methods
-// return the type of the received message. The messageType argument to the
-// WriteMessage and NextWriter methods specifies the type of a sent message.
-//
-// It is the application's responsibility to ensure that text messages are
-// valid UTF-8 encoded text.
-//
-// Control Messages
-//
-// The WebSocket protocol defines three types of control messages: close, ping
-// and pong. Call the connection WriteControl, WriteMessage or NextWriter
-// methods to send a control message to the peer.
-//
-// Connections handle received close messages by calling the handler function
-// set with the SetCloseHandler method and by returning a *CloseError from the
-// NextReader, ReadMessage or the message Read method. The default close
-// handler sends a close message to the peer.
-//
-// Connections handle received ping messages by calling the handler function
-// set with the SetPingHandler method. The default ping handler sends a pong
-// message to the peer.
-//
-// Connections handle received pong messages by calling the handler function
-// set with the SetPongHandler method. The default pong handler does nothing.
-// If an application sends ping messages, then the application should set a
-// pong handler to receive the corresponding pong.
-//
-// The control message handler functions are called from the NextReader,
-// ReadMessage and message reader Read methods. The default close and ping
-// handlers can block these methods for a short time when the handler writes to
-// the connection.
-//
-// The application must read the connection to process close, ping and pong
-// messages sent from the peer. If the application is not otherwise interested
-// in messages from the peer, then the application should start a goroutine to
-// read and discard messages from the peer. A simple example is:
-//
-// func readLoop(c *websocket.Conn) {
-// for {
-// if _, _, err := c.NextReader(); err != nil {
-// c.Close()
-// break
-// }
-// }
-// }
-//
-// Concurrency
-//
-// Connections support one concurrent reader and one concurrent writer.
-//
-// Applications are responsible for ensuring that no more than one goroutine
-// calls the write methods (NextWriter, SetWriteDeadline, WriteMessage,
-// WriteJSON, EnableWriteCompression, SetCompressionLevel) concurrently and
-// that no more than one goroutine calls the read methods (NextReader,
-// SetReadDeadline, ReadMessage, ReadJSON, SetPongHandler, SetPingHandler)
-// concurrently.
-//
-// The Close and WriteControl methods can be called concurrently with all other
-// methods.
-//
-// Origin Considerations
-//
-// Web browsers allow Javascript applications to open a WebSocket connection to
-// any host. It's up to the server to enforce an origin policy using the Origin
-// request header sent by the browser.
-//
-// The Upgrader calls the function specified in the CheckOrigin field to check
-// the origin. If the CheckOrigin function returns false, then the Upgrade
-// method fails the WebSocket handshake with HTTP status 403.
-//
-// If the CheckOrigin field is nil, then the Upgrader uses a safe default: fail
-// the handshake if the Origin request header is present and the Origin host is
-// not equal to the Host request header.
-//
-// The deprecated package-level Upgrade function does not perform origin
-// checking. The application is responsible for checking the Origin header
-// before calling the Upgrade function.
-//
-// Buffers
-//
-// Connections buffer network input and output to reduce the number
-// of system calls when reading or writing messages.
-//
-// Write buffers are also used for constructing WebSocket frames. See RFC 6455,
-// Section 5 for a discussion of message framing. A WebSocket frame header is
-// written to the network each time a write buffer is flushed to the network.
-// Decreasing the size of the write buffer can increase the amount of framing
-// overhead on the connection.
-//
-// The buffer sizes in bytes are specified by the ReadBufferSize and
-// WriteBufferSize fields in the Dialer and Upgrader. The Dialer uses a default
-// size of 4096 when a buffer size field is set to zero. The Upgrader reuses
-// buffers created by the HTTP server when a buffer size field is set to zero.
-// The HTTP server buffers have a size of 4096 at the time of this writing.
-//
-// The buffer sizes do not limit the size of a message that can be read or
-// written by a connection.
-//
-// Buffers are held for the lifetime of the connection by default. If the
-// Dialer or Upgrader WriteBufferPool field is set, then a connection holds the
-// write buffer only when writing a message.
-//
-// Applications should tune the buffer sizes to balance memory use and
-// performance. Increasing the buffer size uses more memory, but can reduce the
-// number of system calls to read or write the network. In the case of writing,
-// increasing the buffer size can reduce the number of frame headers written to
-// the network.
-//
-// Some guidelines for setting buffer parameters are:
-//
-// Limit the buffer sizes to the maximum expected message size. Buffers larger
-// than the largest message do not provide any benefit.
-//
-// Depending on the distribution of message sizes, setting the buffer size to
-// a value less than the maximum expected message size can greatly reduce memory
-// use with a small impact on performance. Here's an example: If 99% of the
-// messages are smaller than 256 bytes and the maximum message size is 512
-// bytes, then a buffer size of 256 bytes will result in 1.01 more system calls
-// than a buffer size of 512 bytes. The memory savings is 50%.
-//
-// A write buffer pool is useful when the application has a modest number
-// writes over a large number of connections. when buffers are pooled, a larger
-// buffer size has a reduced impact on total memory use and has the benefit of
-// reducing system calls and frame overhead.
-//
-// Compression EXPERIMENTAL
-//
-// Per message compression extensions (RFC 7692) are experimentally supported
-// by this package in a limited capacity. Setting the EnableCompression option
-// to true in Dialer or Upgrader will attempt to negotiate per message deflate
-// support.
-//
-// var upgrader = websocket.Upgrader{
-// EnableCompression: true,
-// }
-//
-// If compression was successfully negotiated with the connection's peer, any
-// message received in compressed form will be automatically decompressed.
-// All Read methods will return uncompressed bytes.
-//
-// Per message compression of messages written to a connection can be enabled
-// or disabled by calling the corresponding Conn method:
-//
-// conn.EnableWriteCompression(false)
-//
-// Currently this package does not support compression with "context takeover".
-// This means that messages must be compressed and decompressed in isolation,
-// without retaining sliding window or dictionary state across messages. For
-// more details refer to RFC 7692.
-//
-// Use of compression is experimental and may result in decreased performance.
-package websocket
diff --git a/vendor/github.com/gorilla/websocket/join.go b/vendor/github.com/gorilla/websocket/join.go
deleted file mode 100644
index c64f8c82..00000000
--- a/vendor/github.com/gorilla/websocket/join.go
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2019 The Gorilla WebSocket Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package websocket
-
-import (
- "io"
- "strings"
-)
-
-// JoinMessages concatenates received messages to create a single io.Reader.
-// The string term is appended to each message. The returned reader does not
-// support concurrent calls to the Read method.
-func JoinMessages(c *Conn, term string) io.Reader {
- return &joinReader{c: c, term: term}
-}
-
-type joinReader struct {
- c *Conn
- term string
- r io.Reader
-}
-
-func (r *joinReader) Read(p []byte) (int, error) {
- if r.r == nil {
- var err error
- _, r.r, err = r.c.NextReader()
- if err != nil {
- return 0, err
- }
- if r.term != "" {
- r.r = io.MultiReader(r.r, strings.NewReader(r.term))
- }
- }
- n, err := r.r.Read(p)
- if err == io.EOF {
- err = nil
- r.r = nil
- }
- return n, err
-}
diff --git a/vendor/github.com/gorilla/websocket/json.go b/vendor/github.com/gorilla/websocket/json.go
deleted file mode 100644
index dc2c1f64..00000000
--- a/vendor/github.com/gorilla/websocket/json.go
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package websocket
-
-import (
- "encoding/json"
- "io"
-)
-
-// WriteJSON writes the JSON encoding of v as a message.
-//
-// Deprecated: Use c.WriteJSON instead.
-func WriteJSON(c *Conn, v interface{}) error {
- return c.WriteJSON(v)
-}
-
-// WriteJSON writes the JSON encoding of v as a message.
-//
-// See the documentation for encoding/json Marshal for details about the
-// conversion of Go values to JSON.
-func (c *Conn) WriteJSON(v interface{}) error {
- w, err := c.NextWriter(TextMessage)
- if err != nil {
- return err
- }
- err1 := json.NewEncoder(w).Encode(v)
- err2 := w.Close()
- if err1 != nil {
- return err1
- }
- return err2
-}
-
-// ReadJSON reads the next JSON-encoded message from the connection and stores
-// it in the value pointed to by v.
-//
-// Deprecated: Use c.ReadJSON instead.
-func ReadJSON(c *Conn, v interface{}) error {
- return c.ReadJSON(v)
-}
-
-// ReadJSON reads the next JSON-encoded message from the connection and stores
-// it in the value pointed to by v.
-//
-// See the documentation for the encoding/json Unmarshal function for details
-// about the conversion of JSON to a Go value.
-func (c *Conn) ReadJSON(v interface{}) error {
- _, r, err := c.NextReader()
- if err != nil {
- return err
- }
- err = json.NewDecoder(r).Decode(v)
- if err == io.EOF {
- // One value is expected in the message.
- err = io.ErrUnexpectedEOF
- }
- return err
-}
diff --git a/vendor/github.com/gorilla/websocket/mask.go b/vendor/github.com/gorilla/websocket/mask.go
deleted file mode 100644
index d0742bf2..00000000
--- a/vendor/github.com/gorilla/websocket/mask.go
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of
-// this source code is governed by a BSD-style license that can be found in the
-// LICENSE file.
-
-//go:build !appengine
-// +build !appengine
-
-package websocket
-
-import "unsafe"
-
-const wordSize = int(unsafe.Sizeof(uintptr(0)))
-
-func maskBytes(key [4]byte, pos int, b []byte) int {
- // Mask one byte at a time for small buffers.
- if len(b) < 2*wordSize {
- for i := range b {
- b[i] ^= key[pos&3]
- pos++
- }
- return pos & 3
- }
-
- // Mask one byte at a time to word boundary.
- if n := int(uintptr(unsafe.Pointer(&b[0]))) % wordSize; n != 0 {
- n = wordSize - n
- for i := range b[:n] {
- b[i] ^= key[pos&3]
- pos++
- }
- b = b[n:]
- }
-
- // Create aligned word size key.
- var k [wordSize]byte
- for i := range k {
- k[i] = key[(pos+i)&3]
- }
- kw := *(*uintptr)(unsafe.Pointer(&k))
-
- // Mask one word at a time.
- n := (len(b) / wordSize) * wordSize
- for i := 0; i < n; i += wordSize {
- *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&b[0])) + uintptr(i))) ^= kw
- }
-
- // Mask one byte at a time for remaining bytes.
- b = b[n:]
- for i := range b {
- b[i] ^= key[pos&3]
- pos++
- }
-
- return pos & 3
-}
diff --git a/vendor/github.com/gorilla/websocket/mask_safe.go b/vendor/github.com/gorilla/websocket/mask_safe.go
deleted file mode 100644
index 36250ca7..00000000
--- a/vendor/github.com/gorilla/websocket/mask_safe.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of
-// this source code is governed by a BSD-style license that can be found in the
-// LICENSE file.
-
-//go:build appengine
-// +build appengine
-
-package websocket
-
-func maskBytes(key [4]byte, pos int, b []byte) int {
- for i := range b {
- b[i] ^= key[pos&3]
- pos++
- }
- return pos & 3
-}
diff --git a/vendor/github.com/gorilla/websocket/prepared.go b/vendor/github.com/gorilla/websocket/prepared.go
deleted file mode 100644
index c854225e..00000000
--- a/vendor/github.com/gorilla/websocket/prepared.go
+++ /dev/null
@@ -1,102 +0,0 @@
-// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package websocket
-
-import (
- "bytes"
- "net"
- "sync"
- "time"
-)
-
-// PreparedMessage caches on the wire representations of a message payload.
-// Use PreparedMessage to efficiently send a message payload to multiple
-// connections. PreparedMessage is especially useful when compression is used
-// because the CPU and memory expensive compression operation can be executed
-// once for a given set of compression options.
-type PreparedMessage struct {
- messageType int
- data []byte
- mu sync.Mutex
- frames map[prepareKey]*preparedFrame
-}
-
-// prepareKey defines a unique set of options to cache prepared frames in PreparedMessage.
-type prepareKey struct {
- isServer bool
- compress bool
- compressionLevel int
-}
-
-// preparedFrame contains data in wire representation.
-type preparedFrame struct {
- once sync.Once
- data []byte
-}
-
-// NewPreparedMessage returns an initialized PreparedMessage. You can then send
-// it to connection using WritePreparedMessage method. Valid wire
-// representation will be calculated lazily only once for a set of current
-// connection options.
-func NewPreparedMessage(messageType int, data []byte) (*PreparedMessage, error) {
- pm := &PreparedMessage{
- messageType: messageType,
- frames: make(map[prepareKey]*preparedFrame),
- data: data,
- }
-
- // Prepare a plain server frame.
- _, frameData, err := pm.frame(prepareKey{isServer: true, compress: false})
- if err != nil {
- return nil, err
- }
-
- // To protect against caller modifying the data argument, remember the data
- // copied to the plain server frame.
- pm.data = frameData[len(frameData)-len(data):]
- return pm, nil
-}
-
-func (pm *PreparedMessage) frame(key prepareKey) (int, []byte, error) {
- pm.mu.Lock()
- frame, ok := pm.frames[key]
- if !ok {
- frame = &preparedFrame{}
- pm.frames[key] = frame
- }
- pm.mu.Unlock()
-
- var err error
- frame.once.Do(func() {
- // Prepare a frame using a 'fake' connection.
- // TODO: Refactor code in conn.go to allow more direct construction of
- // the frame.
- mu := make(chan struct{}, 1)
- mu <- struct{}{}
- var nc prepareConn
- c := &Conn{
- conn: &nc,
- mu: mu,
- isServer: key.isServer,
- compressionLevel: key.compressionLevel,
- enableWriteCompression: true,
- writeBuf: make([]byte, defaultWriteBufferSize+maxFrameHeaderSize),
- }
- if key.compress {
- c.newCompressionWriter = compressNoContextTakeover
- }
- err = c.WriteMessage(pm.messageType, pm.data)
- frame.data = nc.buf.Bytes()
- })
- return pm.messageType, frame.data, err
-}
-
-type prepareConn struct {
- buf bytes.Buffer
- net.Conn
-}
-
-func (pc *prepareConn) Write(p []byte) (int, error) { return pc.buf.Write(p) }
-func (pc *prepareConn) SetWriteDeadline(t time.Time) error { return nil }
diff --git a/vendor/github.com/gorilla/websocket/proxy.go b/vendor/github.com/gorilla/websocket/proxy.go
deleted file mode 100644
index e0f466b7..00000000
--- a/vendor/github.com/gorilla/websocket/proxy.go
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package websocket
-
-import (
- "bufio"
- "encoding/base64"
- "errors"
- "net"
- "net/http"
- "net/url"
- "strings"
-)
-
-type netDialerFunc func(network, addr string) (net.Conn, error)
-
-func (fn netDialerFunc) Dial(network, addr string) (net.Conn, error) {
- return fn(network, addr)
-}
-
-func init() {
- proxy_RegisterDialerType("http", func(proxyURL *url.URL, forwardDialer proxy_Dialer) (proxy_Dialer, error) {
- return &httpProxyDialer{proxyURL: proxyURL, forwardDial: forwardDialer.Dial}, nil
- })
-}
-
-type httpProxyDialer struct {
- proxyURL *url.URL
- forwardDial func(network, addr string) (net.Conn, error)
-}
-
-func (hpd *httpProxyDialer) Dial(network string, addr string) (net.Conn, error) {
- hostPort, _ := hostPortNoPort(hpd.proxyURL)
- conn, err := hpd.forwardDial(network, hostPort)
- if err != nil {
- return nil, err
- }
-
- connectHeader := make(http.Header)
- if user := hpd.proxyURL.User; user != nil {
- proxyUser := user.Username()
- if proxyPassword, passwordSet := user.Password(); passwordSet {
- credential := base64.StdEncoding.EncodeToString([]byte(proxyUser + ":" + proxyPassword))
- connectHeader.Set("Proxy-Authorization", "Basic "+credential)
- }
- }
-
- connectReq := &http.Request{
- Method: http.MethodConnect,
- URL: &url.URL{Opaque: addr},
- Host: addr,
- Header: connectHeader,
- }
-
- if err := connectReq.Write(conn); err != nil {
- conn.Close()
- return nil, err
- }
-
- // Read response. It's OK to use and discard buffered reader here becaue
- // the remote server does not speak until spoken to.
- br := bufio.NewReader(conn)
- resp, err := http.ReadResponse(br, connectReq)
- if err != nil {
- conn.Close()
- return nil, err
- }
-
- if resp.StatusCode != 200 {
- conn.Close()
- f := strings.SplitN(resp.Status, " ", 2)
- return nil, errors.New(f[1])
- }
- return conn, nil
-}
diff --git a/vendor/github.com/gorilla/websocket/server.go b/vendor/github.com/gorilla/websocket/server.go
deleted file mode 100644
index 24d53b38..00000000
--- a/vendor/github.com/gorilla/websocket/server.go
+++ /dev/null
@@ -1,365 +0,0 @@
-// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package websocket
-
-import (
- "bufio"
- "errors"
- "io"
- "net/http"
- "net/url"
- "strings"
- "time"
-)
-
-// HandshakeError describes an error with the handshake from the peer.
-type HandshakeError struct {
- message string
-}
-
-func (e HandshakeError) Error() string { return e.message }
-
-// Upgrader specifies parameters for upgrading an HTTP connection to a
-// WebSocket connection.
-//
-// It is safe to call Upgrader's methods concurrently.
-type Upgrader struct {
- // HandshakeTimeout specifies the duration for the handshake to complete.
- HandshakeTimeout time.Duration
-
- // ReadBufferSize and WriteBufferSize specify I/O buffer sizes in bytes. If a buffer
- // size is zero, then buffers allocated by the HTTP server are used. The
- // I/O buffer sizes do not limit the size of the messages that can be sent
- // or received.
- ReadBufferSize, WriteBufferSize int
-
- // WriteBufferPool is a pool of buffers for write operations. If the value
- // is not set, then write buffers are allocated to the connection for the
- // lifetime of the connection.
- //
- // A pool is most useful when the application has a modest volume of writes
- // across a large number of connections.
- //
- // Applications should use a single pool for each unique value of
- // WriteBufferSize.
- WriteBufferPool BufferPool
-
- // Subprotocols specifies the server's supported protocols in order of
- // preference. If this field is not nil, then the Upgrade method negotiates a
- // subprotocol by selecting the first match in this list with a protocol
- // requested by the client. If there's no match, then no protocol is
- // negotiated (the Sec-Websocket-Protocol header is not included in the
- // handshake response).
- Subprotocols []string
-
- // Error specifies the function for generating HTTP error responses. If Error
- // is nil, then http.Error is used to generate the HTTP response.
- Error func(w http.ResponseWriter, r *http.Request, status int, reason error)
-
- // CheckOrigin returns true if the request Origin header is acceptable. If
- // CheckOrigin is nil, then a safe default is used: return false if the
- // Origin request header is present and the origin host is not equal to
- // request Host header.
- //
- // A CheckOrigin function should carefully validate the request origin to
- // prevent cross-site request forgery.
- CheckOrigin func(r *http.Request) bool
-
- // EnableCompression specify if the server should attempt to negotiate per
- // message compression (RFC 7692). Setting this value to true does not
- // guarantee that compression will be supported. Currently only "no context
- // takeover" modes are supported.
- EnableCompression bool
-}
-
-func (u *Upgrader) returnError(w http.ResponseWriter, r *http.Request, status int, reason string) (*Conn, error) {
- err := HandshakeError{reason}
- if u.Error != nil {
- u.Error(w, r, status, err)
- } else {
- w.Header().Set("Sec-Websocket-Version", "13")
- http.Error(w, http.StatusText(status), status)
- }
- return nil, err
-}
-
-// checkSameOrigin returns true if the origin is not set or is equal to the request host.
-func checkSameOrigin(r *http.Request) bool {
- origin := r.Header["Origin"]
- if len(origin) == 0 {
- return true
- }
- u, err := url.Parse(origin[0])
- if err != nil {
- return false
- }
- return equalASCIIFold(u.Host, r.Host)
-}
-
-func (u *Upgrader) selectSubprotocol(r *http.Request, responseHeader http.Header) string {
- if u.Subprotocols != nil {
- clientProtocols := Subprotocols(r)
- for _, serverProtocol := range u.Subprotocols {
- for _, clientProtocol := range clientProtocols {
- if clientProtocol == serverProtocol {
- return clientProtocol
- }
- }
- }
- } else if responseHeader != nil {
- return responseHeader.Get("Sec-Websocket-Protocol")
- }
- return ""
-}
-
-// Upgrade upgrades the HTTP server connection to the WebSocket protocol.
-//
-// The responseHeader is included in the response to the client's upgrade
-// request. Use the responseHeader to specify cookies (Set-Cookie). To specify
-// subprotocols supported by the server, set Upgrader.Subprotocols directly.
-//
-// If the upgrade fails, then Upgrade replies to the client with an HTTP error
-// response.
-func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header) (*Conn, error) {
- const badHandshake = "websocket: the client is not using the websocket protocol: "
-
- if !tokenListContainsValue(r.Header, "Connection", "upgrade") {
- return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'upgrade' token not found in 'Connection' header")
- }
-
- if !tokenListContainsValue(r.Header, "Upgrade", "websocket") {
- return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'websocket' token not found in 'Upgrade' header")
- }
-
- if r.Method != http.MethodGet {
- return u.returnError(w, r, http.StatusMethodNotAllowed, badHandshake+"request method is not GET")
- }
-
- if !tokenListContainsValue(r.Header, "Sec-Websocket-Version", "13") {
- return u.returnError(w, r, http.StatusBadRequest, "websocket: unsupported version: 13 not found in 'Sec-Websocket-Version' header")
- }
-
- if _, ok := responseHeader["Sec-Websocket-Extensions"]; ok {
- return u.returnError(w, r, http.StatusInternalServerError, "websocket: application specific 'Sec-WebSocket-Extensions' headers are unsupported")
- }
-
- checkOrigin := u.CheckOrigin
- if checkOrigin == nil {
- checkOrigin = checkSameOrigin
- }
- if !checkOrigin(r) {
- return u.returnError(w, r, http.StatusForbidden, "websocket: request origin not allowed by Upgrader.CheckOrigin")
- }
-
- challengeKey := r.Header.Get("Sec-Websocket-Key")
- if challengeKey == "" {
- return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: 'Sec-WebSocket-Key' header is missing or blank")
- }
-
- subprotocol := u.selectSubprotocol(r, responseHeader)
-
- // Negotiate PMCE
- var compress bool
- if u.EnableCompression {
- for _, ext := range parseExtensions(r.Header) {
- if ext[""] != "permessage-deflate" {
- continue
- }
- compress = true
- break
- }
- }
-
- h, ok := w.(http.Hijacker)
- if !ok {
- return u.returnError(w, r, http.StatusInternalServerError, "websocket: response does not implement http.Hijacker")
- }
- var brw *bufio.ReadWriter
- netConn, brw, err := h.Hijack()
- if err != nil {
- return u.returnError(w, r, http.StatusInternalServerError, err.Error())
- }
-
- if brw.Reader.Buffered() > 0 {
- netConn.Close()
- return nil, errors.New("websocket: client sent data before handshake is complete")
- }
-
- var br *bufio.Reader
- if u.ReadBufferSize == 0 && bufioReaderSize(netConn, brw.Reader) > 256 {
- // Reuse hijacked buffered reader as connection reader.
- br = brw.Reader
- }
-
- buf := bufioWriterBuffer(netConn, brw.Writer)
-
- var writeBuf []byte
- if u.WriteBufferPool == nil && u.WriteBufferSize == 0 && len(buf) >= maxFrameHeaderSize+256 {
- // Reuse hijacked write buffer as connection buffer.
- writeBuf = buf
- }
-
- c := newConn(netConn, true, u.ReadBufferSize, u.WriteBufferSize, u.WriteBufferPool, br, writeBuf)
- c.subprotocol = subprotocol
-
- if compress {
- c.newCompressionWriter = compressNoContextTakeover
- c.newDecompressionReader = decompressNoContextTakeover
- }
-
- // Use larger of hijacked buffer and connection write buffer for header.
- p := buf
- if len(c.writeBuf) > len(p) {
- p = c.writeBuf
- }
- p = p[:0]
-
- p = append(p, "HTTP/1.1 101 Switching Protocols\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nSec-WebSocket-Accept: "...)
- p = append(p, computeAcceptKey(challengeKey)...)
- p = append(p, "\r\n"...)
- if c.subprotocol != "" {
- p = append(p, "Sec-WebSocket-Protocol: "...)
- p = append(p, c.subprotocol...)
- p = append(p, "\r\n"...)
- }
- if compress {
- p = append(p, "Sec-WebSocket-Extensions: permessage-deflate; server_no_context_takeover; client_no_context_takeover\r\n"...)
- }
- for k, vs := range responseHeader {
- if k == "Sec-Websocket-Protocol" {
- continue
- }
- for _, v := range vs {
- p = append(p, k...)
- p = append(p, ": "...)
- for i := 0; i < len(v); i++ {
- b := v[i]
- if b <= 31 {
- // prevent response splitting.
- b = ' '
- }
- p = append(p, b)
- }
- p = append(p, "\r\n"...)
- }
- }
- p = append(p, "\r\n"...)
-
- // Clear deadlines set by HTTP server.
- netConn.SetDeadline(time.Time{})
-
- if u.HandshakeTimeout > 0 {
- netConn.SetWriteDeadline(time.Now().Add(u.HandshakeTimeout))
- }
- if _, err = netConn.Write(p); err != nil {
- netConn.Close()
- return nil, err
- }
- if u.HandshakeTimeout > 0 {
- netConn.SetWriteDeadline(time.Time{})
- }
-
- return c, nil
-}
-
-// Upgrade upgrades the HTTP server connection to the WebSocket protocol.
-//
-// Deprecated: Use websocket.Upgrader instead.
-//
-// Upgrade does not perform origin checking. The application is responsible for
-// checking the Origin header before calling Upgrade. An example implementation
-// of the same origin policy check is:
-//
-// if req.Header.Get("Origin") != "http://"+req.Host {
-// http.Error(w, "Origin not allowed", http.StatusForbidden)
-// return
-// }
-//
-// If the endpoint supports subprotocols, then the application is responsible
-// for negotiating the protocol used on the connection. Use the Subprotocols()
-// function to get the subprotocols requested by the client. Use the
-// Sec-Websocket-Protocol response header to specify the subprotocol selected
-// by the application.
-//
-// The responseHeader is included in the response to the client's upgrade
-// request. Use the responseHeader to specify cookies (Set-Cookie) and the
-// negotiated subprotocol (Sec-Websocket-Protocol).
-//
-// The connection buffers IO to the underlying network connection. The
-// readBufSize and writeBufSize parameters specify the size of the buffers to
-// use. Messages can be larger than the buffers.
-//
-// If the request is not a valid WebSocket handshake, then Upgrade returns an
-// error of type HandshakeError. Applications should handle this error by
-// replying to the client with an HTTP error response.
-func Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header, readBufSize, writeBufSize int) (*Conn, error) {
- u := Upgrader{ReadBufferSize: readBufSize, WriteBufferSize: writeBufSize}
- u.Error = func(w http.ResponseWriter, r *http.Request, status int, reason error) {
- // don't return errors to maintain backwards compatibility
- }
- u.CheckOrigin = func(r *http.Request) bool {
- // allow all connections by default
- return true
- }
- return u.Upgrade(w, r, responseHeader)
-}
-
-// Subprotocols returns the subprotocols requested by the client in the
-// Sec-Websocket-Protocol header.
-func Subprotocols(r *http.Request) []string {
- h := strings.TrimSpace(r.Header.Get("Sec-Websocket-Protocol"))
- if h == "" {
- return nil
- }
- protocols := strings.Split(h, ",")
- for i := range protocols {
- protocols[i] = strings.TrimSpace(protocols[i])
- }
- return protocols
-}
-
-// IsWebSocketUpgrade returns true if the client requested upgrade to the
-// WebSocket protocol.
-func IsWebSocketUpgrade(r *http.Request) bool {
- return tokenListContainsValue(r.Header, "Connection", "upgrade") &&
- tokenListContainsValue(r.Header, "Upgrade", "websocket")
-}
-
-// bufioReaderSize size returns the size of a bufio.Reader.
-func bufioReaderSize(originalReader io.Reader, br *bufio.Reader) int {
- // This code assumes that peek on a reset reader returns
- // bufio.Reader.buf[:0].
- // TODO: Use bufio.Reader.Size() after Go 1.10
- br.Reset(originalReader)
- if p, err := br.Peek(0); err == nil {
- return cap(p)
- }
- return 0
-}
-
-// writeHook is an io.Writer that records the last slice passed to it vio
-// io.Writer.Write.
-type writeHook struct {
- p []byte
-}
-
-func (wh *writeHook) Write(p []byte) (int, error) {
- wh.p = p
- return len(p), nil
-}
-
-// bufioWriterBuffer grabs the buffer from a bufio.Writer.
-func bufioWriterBuffer(originalWriter io.Writer, bw *bufio.Writer) []byte {
- // This code assumes that bufio.Writer.buf[:1] is passed to the
- // bufio.Writer's underlying writer.
- var wh writeHook
- bw.Reset(&wh)
- bw.WriteByte(0)
- bw.Flush()
-
- bw.Reset(originalWriter)
-
- return wh.p[:cap(wh.p)]
-}
diff --git a/vendor/github.com/gorilla/websocket/tls_handshake.go b/vendor/github.com/gorilla/websocket/tls_handshake.go
deleted file mode 100644
index a62b68cc..00000000
--- a/vendor/github.com/gorilla/websocket/tls_handshake.go
+++ /dev/null
@@ -1,21 +0,0 @@
-//go:build go1.17
-// +build go1.17
-
-package websocket
-
-import (
- "context"
- "crypto/tls"
-)
-
-func doHandshake(ctx context.Context, tlsConn *tls.Conn, cfg *tls.Config) error {
- if err := tlsConn.HandshakeContext(ctx); err != nil {
- return err
- }
- if !cfg.InsecureSkipVerify {
- if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil {
- return err
- }
- }
- return nil
-}
diff --git a/vendor/github.com/gorilla/websocket/tls_handshake_116.go b/vendor/github.com/gorilla/websocket/tls_handshake_116.go
deleted file mode 100644
index e1b2b44f..00000000
--- a/vendor/github.com/gorilla/websocket/tls_handshake_116.go
+++ /dev/null
@@ -1,21 +0,0 @@
-//go:build !go1.17
-// +build !go1.17
-
-package websocket
-
-import (
- "context"
- "crypto/tls"
-)
-
-func doHandshake(ctx context.Context, tlsConn *tls.Conn, cfg *tls.Config) error {
- if err := tlsConn.Handshake(); err != nil {
- return err
- }
- if !cfg.InsecureSkipVerify {
- if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil {
- return err
- }
- }
- return nil
-}
diff --git a/vendor/github.com/gorilla/websocket/util.go b/vendor/github.com/gorilla/websocket/util.go
deleted file mode 100644
index 7bf2f66c..00000000
--- a/vendor/github.com/gorilla/websocket/util.go
+++ /dev/null
@@ -1,283 +0,0 @@
-// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package websocket
-
-import (
- "crypto/rand"
- "crypto/sha1"
- "encoding/base64"
- "io"
- "net/http"
- "strings"
- "unicode/utf8"
-)
-
-var keyGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11")
-
-func computeAcceptKey(challengeKey string) string {
- h := sha1.New()
- h.Write([]byte(challengeKey))
- h.Write(keyGUID)
- return base64.StdEncoding.EncodeToString(h.Sum(nil))
-}
-
-func generateChallengeKey() (string, error) {
- p := make([]byte, 16)
- if _, err := io.ReadFull(rand.Reader, p); err != nil {
- return "", err
- }
- return base64.StdEncoding.EncodeToString(p), nil
-}
-
-// Token octets per RFC 2616.
-var isTokenOctet = [256]bool{
- '!': true,
- '#': true,
- '$': true,
- '%': true,
- '&': true,
- '\'': true,
- '*': true,
- '+': true,
- '-': true,
- '.': true,
- '0': true,
- '1': true,
- '2': true,
- '3': true,
- '4': true,
- '5': true,
- '6': true,
- '7': true,
- '8': true,
- '9': true,
- 'A': true,
- 'B': true,
- 'C': true,
- 'D': true,
- 'E': true,
- 'F': true,
- 'G': true,
- 'H': true,
- 'I': true,
- 'J': true,
- 'K': true,
- 'L': true,
- 'M': true,
- 'N': true,
- 'O': true,
- 'P': true,
- 'Q': true,
- 'R': true,
- 'S': true,
- 'T': true,
- 'U': true,
- 'W': true,
- 'V': true,
- 'X': true,
- 'Y': true,
- 'Z': true,
- '^': true,
- '_': true,
- '`': true,
- 'a': true,
- 'b': true,
- 'c': true,
- 'd': true,
- 'e': true,
- 'f': true,
- 'g': true,
- 'h': true,
- 'i': true,
- 'j': true,
- 'k': true,
- 'l': true,
- 'm': true,
- 'n': true,
- 'o': true,
- 'p': true,
- 'q': true,
- 'r': true,
- 's': true,
- 't': true,
- 'u': true,
- 'v': true,
- 'w': true,
- 'x': true,
- 'y': true,
- 'z': true,
- '|': true,
- '~': true,
-}
-
-// skipSpace returns a slice of the string s with all leading RFC 2616 linear
-// whitespace removed.
-func skipSpace(s string) (rest string) {
- i := 0
- for ; i < len(s); i++ {
- if b := s[i]; b != ' ' && b != '\t' {
- break
- }
- }
- return s[i:]
-}
-
-// nextToken returns the leading RFC 2616 token of s and the string following
-// the token.
-func nextToken(s string) (token, rest string) {
- i := 0
- for ; i < len(s); i++ {
- if !isTokenOctet[s[i]] {
- break
- }
- }
- return s[:i], s[i:]
-}
-
-// nextTokenOrQuoted returns the leading token or quoted string per RFC 2616
-// and the string following the token or quoted string.
-func nextTokenOrQuoted(s string) (value string, rest string) {
- if !strings.HasPrefix(s, "\"") {
- return nextToken(s)
- }
- s = s[1:]
- for i := 0; i < len(s); i++ {
- switch s[i] {
- case '"':
- return s[:i], s[i+1:]
- case '\\':
- p := make([]byte, len(s)-1)
- j := copy(p, s[:i])
- escape := true
- for i = i + 1; i < len(s); i++ {
- b := s[i]
- switch {
- case escape:
- escape = false
- p[j] = b
- j++
- case b == '\\':
- escape = true
- case b == '"':
- return string(p[:j]), s[i+1:]
- default:
- p[j] = b
- j++
- }
- }
- return "", ""
- }
- }
- return "", ""
-}
-
-// equalASCIIFold returns true if s is equal to t with ASCII case folding as
-// defined in RFC 4790.
-func equalASCIIFold(s, t string) bool {
- for s != "" && t != "" {
- sr, size := utf8.DecodeRuneInString(s)
- s = s[size:]
- tr, size := utf8.DecodeRuneInString(t)
- t = t[size:]
- if sr == tr {
- continue
- }
- if 'A' <= sr && sr <= 'Z' {
- sr = sr + 'a' - 'A'
- }
- if 'A' <= tr && tr <= 'Z' {
- tr = tr + 'a' - 'A'
- }
- if sr != tr {
- return false
- }
- }
- return s == t
-}
-
-// tokenListContainsValue returns true if the 1#token header with the given
-// name contains a token equal to value with ASCII case folding.
-func tokenListContainsValue(header http.Header, name string, value string) bool {
-headers:
- for _, s := range header[name] {
- for {
- var t string
- t, s = nextToken(skipSpace(s))
- if t == "" {
- continue headers
- }
- s = skipSpace(s)
- if s != "" && s[0] != ',' {
- continue headers
- }
- if equalASCIIFold(t, value) {
- return true
- }
- if s == "" {
- continue headers
- }
- s = s[1:]
- }
- }
- return false
-}
-
-// parseExtensions parses WebSocket extensions from a header.
-func parseExtensions(header http.Header) []map[string]string {
- // From RFC 6455:
- //
- // Sec-WebSocket-Extensions = extension-list
- // extension-list = 1#extension
- // extension = extension-token *( ";" extension-param )
- // extension-token = registered-token
- // registered-token = token
- // extension-param = token [ "=" (token | quoted-string) ]
- // ;When using the quoted-string syntax variant, the value
- // ;after quoted-string unescaping MUST conform to the
- // ;'token' ABNF.
-
- var result []map[string]string
-headers:
- for _, s := range header["Sec-Websocket-Extensions"] {
- for {
- var t string
- t, s = nextToken(skipSpace(s))
- if t == "" {
- continue headers
- }
- ext := map[string]string{"": t}
- for {
- s = skipSpace(s)
- if !strings.HasPrefix(s, ";") {
- break
- }
- var k string
- k, s = nextToken(skipSpace(s[1:]))
- if k == "" {
- continue headers
- }
- s = skipSpace(s)
- var v string
- if strings.HasPrefix(s, "=") {
- v, s = nextTokenOrQuoted(skipSpace(s[1:]))
- s = skipSpace(s)
- }
- if s != "" && s[0] != ',' && s[0] != ';' {
- continue headers
- }
- ext[k] = v
- }
- if s != "" && s[0] != ',' {
- continue headers
- }
- result = append(result, ext)
- if s == "" {
- continue headers
- }
- s = s[1:]
- }
- }
- return result
-}
diff --git a/vendor/github.com/gorilla/websocket/x_net_proxy.go b/vendor/github.com/gorilla/websocket/x_net_proxy.go
deleted file mode 100644
index 2e668f6b..00000000
--- a/vendor/github.com/gorilla/websocket/x_net_proxy.go
+++ /dev/null
@@ -1,473 +0,0 @@
-// Code generated by golang.org/x/tools/cmd/bundle. DO NOT EDIT.
-//go:generate bundle -o x_net_proxy.go golang.org/x/net/proxy
-
-// Package proxy provides support for a variety of protocols to proxy network
-// data.
-//
-
-package websocket
-
-import (
- "errors"
- "io"
- "net"
- "net/url"
- "os"
- "strconv"
- "strings"
- "sync"
-)
-
-type proxy_direct struct{}
-
-// Direct is a direct proxy: one that makes network connections directly.
-var proxy_Direct = proxy_direct{}
-
-func (proxy_direct) Dial(network, addr string) (net.Conn, error) {
- return net.Dial(network, addr)
-}
-
-// A PerHost directs connections to a default Dialer unless the host name
-// requested matches one of a number of exceptions.
-type proxy_PerHost struct {
- def, bypass proxy_Dialer
-
- bypassNetworks []*net.IPNet
- bypassIPs []net.IP
- bypassZones []string
- bypassHosts []string
-}
-
-// NewPerHost returns a PerHost Dialer that directs connections to either
-// defaultDialer or bypass, depending on whether the connection matches one of
-// the configured rules.
-func proxy_NewPerHost(defaultDialer, bypass proxy_Dialer) *proxy_PerHost {
- return &proxy_PerHost{
- def: defaultDialer,
- bypass: bypass,
- }
-}
-
-// Dial connects to the address addr on the given network through either
-// defaultDialer or bypass.
-func (p *proxy_PerHost) Dial(network, addr string) (c net.Conn, err error) {
- host, _, err := net.SplitHostPort(addr)
- if err != nil {
- return nil, err
- }
-
- return p.dialerForRequest(host).Dial(network, addr)
-}
-
-func (p *proxy_PerHost) dialerForRequest(host string) proxy_Dialer {
- if ip := net.ParseIP(host); ip != nil {
- for _, net := range p.bypassNetworks {
- if net.Contains(ip) {
- return p.bypass
- }
- }
- for _, bypassIP := range p.bypassIPs {
- if bypassIP.Equal(ip) {
- return p.bypass
- }
- }
- return p.def
- }
-
- for _, zone := range p.bypassZones {
- if strings.HasSuffix(host, zone) {
- return p.bypass
- }
- if host == zone[1:] {
- // For a zone ".example.com", we match "example.com"
- // too.
- return p.bypass
- }
- }
- for _, bypassHost := range p.bypassHosts {
- if bypassHost == host {
- return p.bypass
- }
- }
- return p.def
-}
-
-// AddFromString parses a string that contains comma-separated values
-// specifying hosts that should use the bypass proxy. Each value is either an
-// IP address, a CIDR range, a zone (*.example.com) or a host name
-// (localhost). A best effort is made to parse the string and errors are
-// ignored.
-func (p *proxy_PerHost) AddFromString(s string) {
- hosts := strings.Split(s, ",")
- for _, host := range hosts {
- host = strings.TrimSpace(host)
- if len(host) == 0 {
- continue
- }
- if strings.Contains(host, "/") {
- // We assume that it's a CIDR address like 127.0.0.0/8
- if _, net, err := net.ParseCIDR(host); err == nil {
- p.AddNetwork(net)
- }
- continue
- }
- if ip := net.ParseIP(host); ip != nil {
- p.AddIP(ip)
- continue
- }
- if strings.HasPrefix(host, "*.") {
- p.AddZone(host[1:])
- continue
- }
- p.AddHost(host)
- }
-}
-
-// AddIP specifies an IP address that will use the bypass proxy. Note that
-// this will only take effect if a literal IP address is dialed. A connection
-// to a named host will never match an IP.
-func (p *proxy_PerHost) AddIP(ip net.IP) {
- p.bypassIPs = append(p.bypassIPs, ip)
-}
-
-// AddNetwork specifies an IP range that will use the bypass proxy. Note that
-// this will only take effect if a literal IP address is dialed. A connection
-// to a named host will never match.
-func (p *proxy_PerHost) AddNetwork(net *net.IPNet) {
- p.bypassNetworks = append(p.bypassNetworks, net)
-}
-
-// AddZone specifies a DNS suffix that will use the bypass proxy. A zone of
-// "example.com" matches "example.com" and all of its subdomains.
-func (p *proxy_PerHost) AddZone(zone string) {
- if strings.HasSuffix(zone, ".") {
- zone = zone[:len(zone)-1]
- }
- if !strings.HasPrefix(zone, ".") {
- zone = "." + zone
- }
- p.bypassZones = append(p.bypassZones, zone)
-}
-
-// AddHost specifies a host name that will use the bypass proxy.
-func (p *proxy_PerHost) AddHost(host string) {
- if strings.HasSuffix(host, ".") {
- host = host[:len(host)-1]
- }
- p.bypassHosts = append(p.bypassHosts, host)
-}
-
-// A Dialer is a means to establish a connection.
-type proxy_Dialer interface {
- // Dial connects to the given address via the proxy.
- Dial(network, addr string) (c net.Conn, err error)
-}
-
-// Auth contains authentication parameters that specific Dialers may require.
-type proxy_Auth struct {
- User, Password string
-}
-
-// FromEnvironment returns the dialer specified by the proxy related variables in
-// the environment.
-func proxy_FromEnvironment() proxy_Dialer {
- allProxy := proxy_allProxyEnv.Get()
- if len(allProxy) == 0 {
- return proxy_Direct
- }
-
- proxyURL, err := url.Parse(allProxy)
- if err != nil {
- return proxy_Direct
- }
- proxy, err := proxy_FromURL(proxyURL, proxy_Direct)
- if err != nil {
- return proxy_Direct
- }
-
- noProxy := proxy_noProxyEnv.Get()
- if len(noProxy) == 0 {
- return proxy
- }
-
- perHost := proxy_NewPerHost(proxy, proxy_Direct)
- perHost.AddFromString(noProxy)
- return perHost
-}
-
-// proxySchemes is a map from URL schemes to a function that creates a Dialer
-// from a URL with such a scheme.
-var proxy_proxySchemes map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error)
-
-// RegisterDialerType takes a URL scheme and a function to generate Dialers from
-// a URL with that scheme and a forwarding Dialer. Registered schemes are used
-// by FromURL.
-func proxy_RegisterDialerType(scheme string, f func(*url.URL, proxy_Dialer) (proxy_Dialer, error)) {
- if proxy_proxySchemes == nil {
- proxy_proxySchemes = make(map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error))
- }
- proxy_proxySchemes[scheme] = f
-}
-
-// FromURL returns a Dialer given a URL specification and an underlying
-// Dialer for it to make network requests.
-func proxy_FromURL(u *url.URL, forward proxy_Dialer) (proxy_Dialer, error) {
- var auth *proxy_Auth
- if u.User != nil {
- auth = new(proxy_Auth)
- auth.User = u.User.Username()
- if p, ok := u.User.Password(); ok {
- auth.Password = p
- }
- }
-
- switch u.Scheme {
- case "socks5":
- return proxy_SOCKS5("tcp", u.Host, auth, forward)
- }
-
- // If the scheme doesn't match any of the built-in schemes, see if it
- // was registered by another package.
- if proxy_proxySchemes != nil {
- if f, ok := proxy_proxySchemes[u.Scheme]; ok {
- return f(u, forward)
- }
- }
-
- return nil, errors.New("proxy: unknown scheme: " + u.Scheme)
-}
-
-var (
- proxy_allProxyEnv = &proxy_envOnce{
- names: []string{"ALL_PROXY", "all_proxy"},
- }
- proxy_noProxyEnv = &proxy_envOnce{
- names: []string{"NO_PROXY", "no_proxy"},
- }
-)
-
-// envOnce looks up an environment variable (optionally by multiple
-// names) once. It mitigates expensive lookups on some platforms
-// (e.g. Windows).
-// (Borrowed from net/http/transport.go)
-type proxy_envOnce struct {
- names []string
- once sync.Once
- val string
-}
-
-func (e *proxy_envOnce) Get() string {
- e.once.Do(e.init)
- return e.val
-}
-
-func (e *proxy_envOnce) init() {
- for _, n := range e.names {
- e.val = os.Getenv(n)
- if e.val != "" {
- return
- }
- }
-}
-
-// SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given address
-// with an optional username and password. See RFC 1928 and RFC 1929.
-func proxy_SOCKS5(network, addr string, auth *proxy_Auth, forward proxy_Dialer) (proxy_Dialer, error) {
- s := &proxy_socks5{
- network: network,
- addr: addr,
- forward: forward,
- }
- if auth != nil {
- s.user = auth.User
- s.password = auth.Password
- }
-
- return s, nil
-}
-
-type proxy_socks5 struct {
- user, password string
- network, addr string
- forward proxy_Dialer
-}
-
-const proxy_socks5Version = 5
-
-const (
- proxy_socks5AuthNone = 0
- proxy_socks5AuthPassword = 2
-)
-
-const proxy_socks5Connect = 1
-
-const (
- proxy_socks5IP4 = 1
- proxy_socks5Domain = 3
- proxy_socks5IP6 = 4
-)
-
-var proxy_socks5Errors = []string{
- "",
- "general failure",
- "connection forbidden",
- "network unreachable",
- "host unreachable",
- "connection refused",
- "TTL expired",
- "command not supported",
- "address type not supported",
-}
-
-// Dial connects to the address addr on the given network via the SOCKS5 proxy.
-func (s *proxy_socks5) Dial(network, addr string) (net.Conn, error) {
- switch network {
- case "tcp", "tcp6", "tcp4":
- default:
- return nil, errors.New("proxy: no support for SOCKS5 proxy connections of type " + network)
- }
-
- conn, err := s.forward.Dial(s.network, s.addr)
- if err != nil {
- return nil, err
- }
- if err := s.connect(conn, addr); err != nil {
- conn.Close()
- return nil, err
- }
- return conn, nil
-}
-
-// connect takes an existing connection to a socks5 proxy server,
-// and commands the server to extend that connection to target,
-// which must be a canonical address with a host and port.
-func (s *proxy_socks5) connect(conn net.Conn, target string) error {
- host, portStr, err := net.SplitHostPort(target)
- if err != nil {
- return err
- }
-
- port, err := strconv.Atoi(portStr)
- if err != nil {
- return errors.New("proxy: failed to parse port number: " + portStr)
- }
- if port < 1 || port > 0xffff {
- return errors.New("proxy: port number out of range: " + portStr)
- }
-
- // the size here is just an estimate
- buf := make([]byte, 0, 6+len(host))
-
- buf = append(buf, proxy_socks5Version)
- if len(s.user) > 0 && len(s.user) < 256 && len(s.password) < 256 {
- buf = append(buf, 2 /* num auth methods */, proxy_socks5AuthNone, proxy_socks5AuthPassword)
- } else {
- buf = append(buf, 1 /* num auth methods */, proxy_socks5AuthNone)
- }
-
- if _, err := conn.Write(buf); err != nil {
- return errors.New("proxy: failed to write greeting to SOCKS5 proxy at " + s.addr + ": " + err.Error())
- }
-
- if _, err := io.ReadFull(conn, buf[:2]); err != nil {
- return errors.New("proxy: failed to read greeting from SOCKS5 proxy at " + s.addr + ": " + err.Error())
- }
- if buf[0] != 5 {
- return errors.New("proxy: SOCKS5 proxy at " + s.addr + " has unexpected version " + strconv.Itoa(int(buf[0])))
- }
- if buf[1] == 0xff {
- return errors.New("proxy: SOCKS5 proxy at " + s.addr + " requires authentication")
- }
-
- // See RFC 1929
- if buf[1] == proxy_socks5AuthPassword {
- buf = buf[:0]
- buf = append(buf, 1 /* password protocol version */)
- buf = append(buf, uint8(len(s.user)))
- buf = append(buf, s.user...)
- buf = append(buf, uint8(len(s.password)))
- buf = append(buf, s.password...)
-
- if _, err := conn.Write(buf); err != nil {
- return errors.New("proxy: failed to write authentication request to SOCKS5 proxy at " + s.addr + ": " + err.Error())
- }
-
- if _, err := io.ReadFull(conn, buf[:2]); err != nil {
- return errors.New("proxy: failed to read authentication reply from SOCKS5 proxy at " + s.addr + ": " + err.Error())
- }
-
- if buf[1] != 0 {
- return errors.New("proxy: SOCKS5 proxy at " + s.addr + " rejected username/password")
- }
- }
-
- buf = buf[:0]
- buf = append(buf, proxy_socks5Version, proxy_socks5Connect, 0 /* reserved */)
-
- if ip := net.ParseIP(host); ip != nil {
- if ip4 := ip.To4(); ip4 != nil {
- buf = append(buf, proxy_socks5IP4)
- ip = ip4
- } else {
- buf = append(buf, proxy_socks5IP6)
- }
- buf = append(buf, ip...)
- } else {
- if len(host) > 255 {
- return errors.New("proxy: destination host name too long: " + host)
- }
- buf = append(buf, proxy_socks5Domain)
- buf = append(buf, byte(len(host)))
- buf = append(buf, host...)
- }
- buf = append(buf, byte(port>>8), byte(port))
-
- if _, err := conn.Write(buf); err != nil {
- return errors.New("proxy: failed to write connect request to SOCKS5 proxy at " + s.addr + ": " + err.Error())
- }
-
- if _, err := io.ReadFull(conn, buf[:4]); err != nil {
- return errors.New("proxy: failed to read connect reply from SOCKS5 proxy at " + s.addr + ": " + err.Error())
- }
-
- failure := "unknown error"
- if int(buf[1]) < len(proxy_socks5Errors) {
- failure = proxy_socks5Errors[buf[1]]
- }
-
- if len(failure) > 0 {
- return errors.New("proxy: SOCKS5 proxy at " + s.addr + " failed to connect: " + failure)
- }
-
- bytesToDiscard := 0
- switch buf[3] {
- case proxy_socks5IP4:
- bytesToDiscard = net.IPv4len
- case proxy_socks5IP6:
- bytesToDiscard = net.IPv6len
- case proxy_socks5Domain:
- _, err := io.ReadFull(conn, buf[:1])
- if err != nil {
- return errors.New("proxy: failed to read domain length from SOCKS5 proxy at " + s.addr + ": " + err.Error())
- }
- bytesToDiscard = int(buf[0])
- default:
- return errors.New("proxy: got unknown address type " + strconv.Itoa(int(buf[3])) + " from SOCKS5 proxy at " + s.addr)
- }
-
- if cap(buf) < bytesToDiscard {
- buf = make([]byte, bytesToDiscard)
- } else {
- buf = buf[:bytesToDiscard]
- }
- if _, err := io.ReadFull(conn, buf); err != nil {
- return errors.New("proxy: failed to read address from SOCKS5 proxy at " + s.addr + ": " + err.Error())
- }
-
- // Also need to discard the port number
- if _, err := io.ReadFull(conn, buf[:2]); err != nil {
- return errors.New("proxy: failed to read port from SOCKS5 proxy at " + s.addr + ": " + err.Error())
- }
-
- return nil
-}
diff --git a/vendor/github.com/jinzhu/copier/License b/vendor/github.com/jinzhu/copier/License
deleted file mode 100644
index e2dc5381..00000000
--- a/vendor/github.com/jinzhu/copier/License
+++ /dev/null
@@ -1,20 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2015 Jinzhu
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
-the Software, and to permit persons to whom the Software is furnished to do so,
-subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
-COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
-IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/jinzhu/copier/README.md b/vendor/github.com/jinzhu/copier/README.md
deleted file mode 100644
index ec04b4be..00000000
--- a/vendor/github.com/jinzhu/copier/README.md
+++ /dev/null
@@ -1,132 +0,0 @@
-# Copier
-
- I am a copier, I copy everything from one to another
-
-[![test status](https://github.com/jinzhu/copier/workflows/tests/badge.svg?branch=master "test status")](https://github.com/jinzhu/copier/actions)
-
-## Features
-
-* Copy from field to field with same name
-* Copy from method to field with same name
-* Copy from field to method with same name
-* Copy from slice to slice
-* Copy from struct to slice
-* Copy from map to map
-* Enforce copying a field with a tag
-* Ignore a field with a tag
-* Deep Copy
-
-## Usage
-
-```go
-package main
-
-import (
- "fmt"
- "github.com/jinzhu/copier"
-)
-
-type User struct {
- Name string
- Role string
- Age int32
- EmployeCode int64 `copier:"EmployeNum"` // specify field name
-
- // Explicitly ignored in the destination struct.
- Salary int
-}
-
-func (user *User) DoubleAge() int32 {
- return 2 * user.Age
-}
-
-// Tags in the destination Struct provide instructions to copier.Copy to ignore
-// or enforce copying and to panic or return an error if a field was not copied.
-type Employee struct {
- // Tell copier.Copy to panic if this field is not copied.
- Name string `copier:"must"`
-
- // Tell copier.Copy to return an error if this field is not copied.
- Age int32 `copier:"must,nopanic"`
-
- // Tell copier.Copy to explicitly ignore copying this field.
- Salary int `copier:"-"`
-
- DoubleAge int32
- EmployeId int64 `copier:"EmployeNum"` // specify field name
- SuperRole string
-}
-
-func (employee *Employee) Role(role string) {
- employee.SuperRole = "Super " + role
-}
-
-func main() {
- var (
- user = User{Name: "Jinzhu", Age: 18, Role: "Admin", Salary: 200000}
- users = []User{{Name: "Jinzhu", Age: 18, Role: "Admin", Salary: 100000}, {Name: "jinzhu 2", Age: 30, Role: "Dev", Salary: 60000}}
- employee = Employee{Salary: 150000}
- employees = []Employee{}
- )
-
- copier.Copy(&employee, &user)
-
- fmt.Printf("%#v \n", employee)
- // Employee{
- // Name: "Jinzhu", // Copy from field
- // Age: 18, // Copy from field
- // Salary:150000, // Copying explicitly ignored
- // DoubleAge: 36, // Copy from method
- // EmployeeId: 0, // Ignored
- // SuperRole: "Super Admin", // Copy to method
- // }
-
- // Copy struct to slice
- copier.Copy(&employees, &user)
-
- fmt.Printf("%#v \n", employees)
- // []Employee{
- // {Name: "Jinzhu", Age: 18, Salary:0, DoubleAge: 36, EmployeId: 0, SuperRole: "Super Admin"}
- // }
-
- // Copy slice to slice
- employees = []Employee{}
- copier.Copy(&employees, &users)
-
- fmt.Printf("%#v \n", employees)
- // []Employee{
- // {Name: "Jinzhu", Age: 18, Salary:0, DoubleAge: 36, EmployeId: 0, SuperRole: "Super Admin"},
- // {Name: "jinzhu 2", Age: 30, Salary:0, DoubleAge: 60, EmployeId: 0, SuperRole: "Super Dev"},
- // }
-
- // Copy map to map
- map1 := map[int]int{3: 6, 4: 8}
- map2 := map[int32]int8{}
- copier.Copy(&map2, map1)
-
- fmt.Printf("%#v \n", map2)
- // map[int32]int8{3:6, 4:8}
-}
-```
-
-### Copy with Option
-
-```go
-copier.CopyWithOption(&to, &from, copier.Option{IgnoreEmpty: true, DeepCopy: true})
-```
-
-## Contributing
-
-You can help to make the project better, check out [http://gorm.io/contribute.html](http://gorm.io/contribute.html) for things you can do.
-
-# Author
-
-**jinzhu**
-
-*
-*
-*
-
-## License
-
-Released under the [MIT License](https://github.com/jinzhu/copier/blob/master/License).
diff --git a/vendor/github.com/jinzhu/copier/copier.go b/vendor/github.com/jinzhu/copier/copier.go
deleted file mode 100644
index 6dc9600c..00000000
--- a/vendor/github.com/jinzhu/copier/copier.go
+++ /dev/null
@@ -1,697 +0,0 @@
-package copier
-
-import (
- "database/sql"
- "database/sql/driver"
- "errors"
- "fmt"
- "reflect"
- "strings"
- "unicode"
-)
-
-// These flags define options for tag handling
-const (
- // Denotes that a destination field must be copied to. If copying fails then a panic will ensue.
- tagMust uint8 = 1 << iota
-
- // Denotes that the program should not panic when the must flag is on and
- // value is not copied. The program will return an error instead.
- tagNoPanic
-
- // Ignore a destination field from being copied to.
- tagIgnore
-
- // Denotes that the value as been copied
- hasCopied
-
- // Some default converter types for a nicer syntax
- String string = ""
- Bool bool = false
- Int int = 0
- Float32 float32 = 0
- Float64 float64 = 0
-)
-
-// Option sets copy options
-type Option struct {
- // setting this value to true will ignore copying zero values of all the fields, including bools, as well as a
- // struct having all it's fields set to their zero values respectively (see IsZero() in reflect/value.go)
- IgnoreEmpty bool
- DeepCopy bool
- Converters []TypeConverter
-}
-
-type TypeConverter struct {
- SrcType interface{}
- DstType interface{}
- Fn func(src interface{}) (interface{}, error)
-}
-
-type converterPair struct {
- SrcType reflect.Type
- DstType reflect.Type
-}
-
-// Tag Flags
-type flags struct {
- BitFlags map[string]uint8
- SrcNames tagNameMapping
- DestNames tagNameMapping
-}
-
-// Field Tag name mapping
-type tagNameMapping struct {
- FieldNameToTag map[string]string
- TagToFieldName map[string]string
-}
-
-// Copy copy things
-func Copy(toValue interface{}, fromValue interface{}) (err error) {
- return copier(toValue, fromValue, Option{})
-}
-
-// CopyWithOption copy with option
-func CopyWithOption(toValue interface{}, fromValue interface{}, opt Option) (err error) {
- return copier(toValue, fromValue, opt)
-}
-
-func copier(toValue interface{}, fromValue interface{}, opt Option) (err error) {
- var (
- isSlice bool
- amount = 1
- from = indirect(reflect.ValueOf(fromValue))
- to = indirect(reflect.ValueOf(toValue))
- converters map[converterPair]TypeConverter
- )
-
- // save convertes into map for faster lookup
- for i := range opt.Converters {
- if converters == nil {
- converters = make(map[converterPair]TypeConverter)
- }
-
- pair := converterPair{
- SrcType: reflect.TypeOf(opt.Converters[i].SrcType),
- DstType: reflect.TypeOf(opt.Converters[i].DstType),
- }
-
- converters[pair] = opt.Converters[i]
- }
-
- if !to.CanAddr() {
- return ErrInvalidCopyDestination
- }
-
- // Return is from value is invalid
- if !from.IsValid() {
- return ErrInvalidCopyFrom
- }
-
- fromType, isPtrFrom := indirectType(from.Type())
- toType, _ := indirectType(to.Type())
-
- if fromType.Kind() == reflect.Interface {
- fromType = reflect.TypeOf(from.Interface())
- }
-
- if toType.Kind() == reflect.Interface {
- toType, _ = indirectType(reflect.TypeOf(to.Interface()))
- oldTo := to
- to = reflect.New(reflect.TypeOf(to.Interface())).Elem()
- defer func() {
- oldTo.Set(to)
- }()
- }
-
- // Just set it if possible to assign for normal types
- if from.Kind() != reflect.Slice && from.Kind() != reflect.Struct && from.Kind() != reflect.Map && (from.Type().AssignableTo(to.Type()) || from.Type().ConvertibleTo(to.Type())) {
- if !isPtrFrom || !opt.DeepCopy {
- to.Set(from.Convert(to.Type()))
- } else {
- fromCopy := reflect.New(from.Type())
- fromCopy.Set(from.Elem())
- to.Set(fromCopy.Convert(to.Type()))
- }
- return
- }
-
- if from.Kind() != reflect.Slice && fromType.Kind() == reflect.Map && toType.Kind() == reflect.Map {
- if !fromType.Key().ConvertibleTo(toType.Key()) {
- return ErrMapKeyNotMatch
- }
-
- if to.IsNil() {
- to.Set(reflect.MakeMapWithSize(toType, from.Len()))
- }
-
- for _, k := range from.MapKeys() {
- toKey := indirect(reflect.New(toType.Key()))
- if !set(toKey, k, opt.DeepCopy, converters) {
- return fmt.Errorf("%w map, old key: %v, new key: %v", ErrNotSupported, k.Type(), toType.Key())
- }
-
- elemType := toType.Elem()
- if elemType.Kind() != reflect.Slice {
- elemType, _ = indirectType(elemType)
- }
- toValue := indirect(reflect.New(elemType))
- if !set(toValue, from.MapIndex(k), opt.DeepCopy, converters) {
- if err = copier(toValue.Addr().Interface(), from.MapIndex(k).Interface(), opt); err != nil {
- return err
- }
- }
-
- for {
- if elemType == toType.Elem() {
- to.SetMapIndex(toKey, toValue)
- break
- }
- elemType = reflect.PtrTo(elemType)
- toValue = toValue.Addr()
- }
- }
- return
- }
-
- if from.Kind() == reflect.Slice && to.Kind() == reflect.Slice && fromType.ConvertibleTo(toType) {
- if to.IsNil() {
- slice := reflect.MakeSlice(reflect.SliceOf(to.Type().Elem()), from.Len(), from.Cap())
- to.Set(slice)
- }
-
- for i := 0; i < from.Len(); i++ {
- if to.Len() < i+1 {
- to.Set(reflect.Append(to, reflect.New(to.Type().Elem()).Elem()))
- }
-
- if !set(to.Index(i), from.Index(i), opt.DeepCopy, converters) {
- // ignore error while copy slice element
- err = copier(to.Index(i).Addr().Interface(), from.Index(i).Interface(), opt)
- if err != nil {
- continue
- }
- }
- }
- return
- }
-
- if fromType.Kind() != reflect.Struct || toType.Kind() != reflect.Struct {
- // skip not supported type
- return
- }
-
- if from.Kind() == reflect.Slice || to.Kind() == reflect.Slice {
- isSlice = true
- if from.Kind() == reflect.Slice {
- amount = from.Len()
- }
- }
-
- for i := 0; i < amount; i++ {
- var dest, source reflect.Value
-
- if isSlice {
- // source
- if from.Kind() == reflect.Slice {
- source = indirect(from.Index(i))
- } else {
- source = indirect(from)
- }
- // dest
- dest = indirect(reflect.New(toType).Elem())
- } else {
- source = indirect(from)
- dest = indirect(to)
- }
-
- destKind := dest.Kind()
- initDest := false
- if destKind == reflect.Interface {
- initDest = true
- dest = indirect(reflect.New(toType))
- }
-
- // Get tag options
- flgs, err := getFlags(dest, source, toType, fromType)
- if err != nil {
- return err
- }
-
- // check source
- if source.IsValid() {
- copyUnexportedStructFields(dest, source)
-
- // Copy from source field to dest field or method
- fromTypeFields := deepFields(fromType)
- for _, field := range fromTypeFields {
- name := field.Name
-
- // Get bit flags for field
- fieldFlags, _ := flgs.BitFlags[name]
-
- // Check if we should ignore copying
- if (fieldFlags & tagIgnore) != 0 {
- continue
- }
-
- srcFieldName, destFieldName := getFieldName(name, flgs)
- if fromField := source.FieldByName(srcFieldName); fromField.IsValid() && !shouldIgnore(fromField, opt.IgnoreEmpty) {
- // process for nested anonymous field
- destFieldNotSet := false
- if f, ok := dest.Type().FieldByName(destFieldName); ok {
- for idx := range f.Index {
- destField := dest.FieldByIndex(f.Index[:idx+1])
-
- if destField.Kind() != reflect.Ptr {
- continue
- }
-
- if !destField.IsNil() {
- continue
- }
- if !destField.CanSet() {
- destFieldNotSet = true
- break
- }
-
- // destField is a nil pointer that can be set
- newValue := reflect.New(destField.Type().Elem())
- destField.Set(newValue)
- }
- }
-
- if destFieldNotSet {
- break
- }
-
- toField := dest.FieldByName(destFieldName)
- if toField.IsValid() {
- if toField.CanSet() {
- if !set(toField, fromField, opt.DeepCopy, converters) {
- if err := copier(toField.Addr().Interface(), fromField.Interface(), opt); err != nil {
- return err
- }
- }
- if fieldFlags != 0 {
- // Note that a copy was made
- flgs.BitFlags[name] = fieldFlags | hasCopied
- }
- }
- } else {
- // try to set to method
- var toMethod reflect.Value
- if dest.CanAddr() {
- toMethod = dest.Addr().MethodByName(destFieldName)
- } else {
- toMethod = dest.MethodByName(destFieldName)
- }
-
- if toMethod.IsValid() && toMethod.Type().NumIn() == 1 && fromField.Type().AssignableTo(toMethod.Type().In(0)) {
- toMethod.Call([]reflect.Value{fromField})
- }
- }
- }
- }
-
- // Copy from from method to dest field
- for _, field := range deepFields(toType) {
- name := field.Name
- srcFieldName, destFieldName := getFieldName(name, flgs)
-
- var fromMethod reflect.Value
- if source.CanAddr() {
- fromMethod = source.Addr().MethodByName(srcFieldName)
- } else {
- fromMethod = source.MethodByName(srcFieldName)
- }
-
- if fromMethod.IsValid() && fromMethod.Type().NumIn() == 0 && fromMethod.Type().NumOut() == 1 && !shouldIgnore(fromMethod, opt.IgnoreEmpty) {
- if toField := dest.FieldByName(destFieldName); toField.IsValid() && toField.CanSet() {
- values := fromMethod.Call([]reflect.Value{})
- if len(values) >= 1 {
- set(toField, values[0], opt.DeepCopy, converters)
- }
- }
- }
- }
- }
-
- if isSlice && to.Kind() == reflect.Slice {
- if dest.Addr().Type().AssignableTo(to.Type().Elem()) {
- if to.Len() < i+1 {
- to.Set(reflect.Append(to, dest.Addr()))
- } else {
- if !set(to.Index(i), dest.Addr(), opt.DeepCopy, converters) {
- // ignore error while copy slice element
- err = copier(to.Index(i).Addr().Interface(), dest.Addr().Interface(), opt)
- if err != nil {
- continue
- }
- }
- }
- } else if dest.Type().AssignableTo(to.Type().Elem()) {
- if to.Len() < i+1 {
- to.Set(reflect.Append(to, dest))
- } else {
- if !set(to.Index(i), dest, opt.DeepCopy, converters) {
- // ignore error while copy slice element
- err = copier(to.Index(i).Addr().Interface(), dest.Interface(), opt)
- if err != nil {
- continue
- }
- }
- }
- }
- } else if initDest {
- to.Set(dest)
- }
-
- err = checkBitFlags(flgs.BitFlags)
- }
-
- return
-}
-
-func copyUnexportedStructFields(to, from reflect.Value) {
- if from.Kind() != reflect.Struct || to.Kind() != reflect.Struct || !from.Type().AssignableTo(to.Type()) {
- return
- }
-
- // create a shallow copy of 'to' to get all fields
- tmp := indirect(reflect.New(to.Type()))
- tmp.Set(from)
-
- // revert exported fields
- for i := 0; i < to.NumField(); i++ {
- if tmp.Field(i).CanSet() {
- tmp.Field(i).Set(to.Field(i))
- }
- }
- to.Set(tmp)
-}
-
-func shouldIgnore(v reflect.Value, ignoreEmpty bool) bool {
- if !ignoreEmpty {
- return false
- }
-
- return v.IsZero()
-}
-
-func deepFields(reflectType reflect.Type) []reflect.StructField {
- if reflectType, _ = indirectType(reflectType); reflectType.Kind() == reflect.Struct {
- fields := make([]reflect.StructField, 0, reflectType.NumField())
-
- for i := 0; i < reflectType.NumField(); i++ {
- v := reflectType.Field(i)
- // PkgPath is the package path that qualifies a lower case (unexported)
- // field name. It is empty for upper case (exported) field names.
- // See https://golang.org/ref/spec#Uniqueness_of_identifiers
- if v.PkgPath == "" {
- fields = append(fields, v)
- if v.Anonymous {
- // also consider fields of anonymous fields as fields of the root
- fields = append(fields, deepFields(v.Type)...)
- }
- }
- }
-
- return fields
- }
-
- return nil
-}
-
-func indirect(reflectValue reflect.Value) reflect.Value {
- for reflectValue.Kind() == reflect.Ptr {
- reflectValue = reflectValue.Elem()
- }
- return reflectValue
-}
-
-func indirectType(reflectType reflect.Type) (_ reflect.Type, isPtr bool) {
- for reflectType.Kind() == reflect.Ptr || reflectType.Kind() == reflect.Slice {
- reflectType = reflectType.Elem()
- isPtr = true
- }
- return reflectType, isPtr
-}
-
-func set(to, from reflect.Value, deepCopy bool, converters map[converterPair]TypeConverter) bool {
- if from.IsValid() {
- if ok, err := lookupAndCopyWithConverter(to, from, converters); err != nil {
- return false
- } else if ok {
- return true
- }
-
- if to.Kind() == reflect.Ptr {
- // set `to` to nil if from is nil
- if from.Kind() == reflect.Ptr && from.IsNil() {
- to.Set(reflect.Zero(to.Type()))
- return true
- } else if to.IsNil() {
- // `from` -> `to`
- // sql.NullString -> *string
- if fromValuer, ok := driverValuer(from); ok {
- v, err := fromValuer.Value()
- if err != nil {
- return false
- }
- // if `from` is not valid do nothing with `to`
- if v == nil {
- return true
- }
- }
- // allocate new `to` variable with default value (eg. *string -> new(string))
- to.Set(reflect.New(to.Type().Elem()))
- }
- // depointer `to`
- to = to.Elem()
- }
-
- if deepCopy {
- toKind := to.Kind()
- if toKind == reflect.Interface && to.IsNil() {
- if reflect.TypeOf(from.Interface()) != nil {
- to.Set(reflect.New(reflect.TypeOf(from.Interface())).Elem())
- toKind = reflect.TypeOf(to.Interface()).Kind()
- }
- }
- if from.Kind() == reflect.Ptr && from.IsNil() {
- return true
- }
- if toKind == reflect.Struct || toKind == reflect.Map || toKind == reflect.Slice {
- return false
- }
- }
-
- if from.Type().ConvertibleTo(to.Type()) {
- to.Set(from.Convert(to.Type()))
- } else if toScanner, ok := to.Addr().Interface().(sql.Scanner); ok {
- // `from` -> `to`
- // *string -> sql.NullString
- if from.Kind() == reflect.Ptr {
- // if `from` is nil do nothing with `to`
- if from.IsNil() {
- return true
- }
- // depointer `from`
- from = indirect(from)
- }
- // `from` -> `to`
- // string -> sql.NullString
- // set `to` by invoking method Scan(`from`)
- err := toScanner.Scan(from.Interface())
- if err != nil {
- return false
- }
- } else if fromValuer, ok := driverValuer(from); ok {
- // `from` -> `to`
- // sql.NullString -> string
- v, err := fromValuer.Value()
- if err != nil {
- return false
- }
- // if `from` is not valid do nothing with `to`
- if v == nil {
- return true
- }
- rv := reflect.ValueOf(v)
- if rv.Type().AssignableTo(to.Type()) {
- to.Set(rv)
- }
- } else if from.Kind() == reflect.Ptr {
- return set(to, from.Elem(), deepCopy, converters)
- } else {
- return false
- }
- }
-
- return true
-}
-
-// lookupAndCopyWithConverter looks up the type pair, on success the TypeConverter Fn func is called to copy src to dst field.
-func lookupAndCopyWithConverter(to, from reflect.Value, converters map[converterPair]TypeConverter) (copied bool, err error) {
- pair := converterPair{
- SrcType: from.Type(),
- DstType: to.Type(),
- }
-
- if cnv, ok := converters[pair]; ok {
- result, err := cnv.Fn(from.Interface())
-
- if err != nil {
- return false, err
- }
-
- if result != nil {
- to.Set(reflect.ValueOf(result))
- } else {
- // in case we've got a nil value to copy
- to.Set(reflect.Zero(to.Type()))
- }
-
- return true, nil
- }
-
- return false, nil
-}
-
-// parseTags Parses struct tags and returns uint8 bit flags.
-func parseTags(tag string) (flg uint8, name string, err error) {
- for _, t := range strings.Split(tag, ",") {
- switch t {
- case "-":
- flg = tagIgnore
- return
- case "must":
- flg = flg | tagMust
- case "nopanic":
- flg = flg | tagNoPanic
- default:
- if unicode.IsUpper([]rune(t)[0]) {
- name = strings.TrimSpace(t)
- } else {
- err = errors.New("copier field name tag must be start upper case")
- }
- }
- }
- return
-}
-
-// getTagFlags Parses struct tags for bit flags, field name.
-func getFlags(dest, src reflect.Value, toType, fromType reflect.Type) (flags, error) {
- flgs := flags{
- BitFlags: map[string]uint8{},
- SrcNames: tagNameMapping{
- FieldNameToTag: map[string]string{},
- TagToFieldName: map[string]string{},
- },
- DestNames: tagNameMapping{
- FieldNameToTag: map[string]string{},
- TagToFieldName: map[string]string{},
- },
- }
- var toTypeFields, fromTypeFields []reflect.StructField
- if dest.IsValid() {
- toTypeFields = deepFields(toType)
- }
- if src.IsValid() {
- fromTypeFields = deepFields(fromType)
- }
-
- // Get a list dest of tags
- for _, field := range toTypeFields {
- tags := field.Tag.Get("copier")
- if tags != "" {
- var name string
- var err error
- if flgs.BitFlags[field.Name], name, err = parseTags(tags); err != nil {
- return flags{}, err
- } else if name != "" {
- flgs.DestNames.FieldNameToTag[field.Name] = name
- flgs.DestNames.TagToFieldName[name] = field.Name
- }
- }
- }
-
- // Get a list source of tags
- for _, field := range fromTypeFields {
- tags := field.Tag.Get("copier")
- if tags != "" {
- var name string
- var err error
- if _, name, err = parseTags(tags); err != nil {
- return flags{}, err
- } else if name != "" {
- flgs.SrcNames.FieldNameToTag[field.Name] = name
- flgs.SrcNames.TagToFieldName[name] = field.Name
- }
- }
- }
- return flgs, nil
-}
-
-// checkBitFlags Checks flags for error or panic conditions.
-func checkBitFlags(flagsList map[string]uint8) (err error) {
- // Check flag conditions were met
- for name, flgs := range flagsList {
- if flgs&hasCopied == 0 {
- switch {
- case flgs&tagMust != 0 && flgs&tagNoPanic != 0:
- err = fmt.Errorf("field %s has must tag but was not copied", name)
- return
- case flgs&(tagMust) != 0:
- panic(fmt.Sprintf("Field %s has must tag but was not copied", name))
- }
- }
- }
- return
-}
-
-func getFieldName(fieldName string, flgs flags) (srcFieldName string, destFieldName string) {
- // get dest field name
- if srcTagName, ok := flgs.SrcNames.FieldNameToTag[fieldName]; ok {
- destFieldName = srcTagName
- if destTagName, ok := flgs.DestNames.TagToFieldName[srcTagName]; ok {
- destFieldName = destTagName
- }
- } else {
- if destTagName, ok := flgs.DestNames.TagToFieldName[fieldName]; ok {
- destFieldName = destTagName
- }
- }
- if destFieldName == "" {
- destFieldName = fieldName
- }
-
- // get source field name
- if destTagName, ok := flgs.DestNames.FieldNameToTag[fieldName]; ok {
- srcFieldName = destTagName
- if srcField, ok := flgs.SrcNames.TagToFieldName[destTagName]; ok {
- srcFieldName = srcField
- }
- } else {
- if srcField, ok := flgs.SrcNames.TagToFieldName[fieldName]; ok {
- srcFieldName = srcField
- }
- }
-
- if srcFieldName == "" {
- srcFieldName = fieldName
- }
- return
-}
-
-func driverValuer(v reflect.Value) (i driver.Valuer, ok bool) {
-
- if !v.CanAddr() {
- i, ok = v.Interface().(driver.Valuer)
- return
- }
-
- i, ok = v.Addr().Interface().(driver.Valuer)
- return
-}
diff --git a/vendor/github.com/jinzhu/copier/errors.go b/vendor/github.com/jinzhu/copier/errors.go
deleted file mode 100644
index cf7c5e74..00000000
--- a/vendor/github.com/jinzhu/copier/errors.go
+++ /dev/null
@@ -1,10 +0,0 @@
-package copier
-
-import "errors"
-
-var (
- ErrInvalidCopyDestination = errors.New("copy destination is invalid")
- ErrInvalidCopyFrom = errors.New("copy from is invalid")
- ErrMapKeyNotMatch = errors.New("map's key type doesn't match")
- ErrNotSupported = errors.New("not supported")
-)
diff --git a/vendor/github.com/pkg/errors/.gitignore b/vendor/github.com/pkg/errors/.gitignore
deleted file mode 100644
index daf913b1..00000000
--- a/vendor/github.com/pkg/errors/.gitignore
+++ /dev/null
@@ -1,24 +0,0 @@
-# Compiled Object files, Static and Dynamic libs (Shared Objects)
-*.o
-*.a
-*.so
-
-# Folders
-_obj
-_test
-
-# Architecture specific extensions/prefixes
-*.[568vq]
-[568vq].out
-
-*.cgo1.go
-*.cgo2.c
-_cgo_defun.c
-_cgo_gotypes.go
-_cgo_export.*
-
-_testmain.go
-
-*.exe
-*.test
-*.prof
diff --git a/vendor/github.com/pkg/errors/.travis.yml b/vendor/github.com/pkg/errors/.travis.yml
deleted file mode 100644
index 9159de03..00000000
--- a/vendor/github.com/pkg/errors/.travis.yml
+++ /dev/null
@@ -1,10 +0,0 @@
-language: go
-go_import_path: github.com/pkg/errors
-go:
- - 1.11.x
- - 1.12.x
- - 1.13.x
- - tip
-
-script:
- - make check
diff --git a/vendor/github.com/pkg/errors/LICENSE b/vendor/github.com/pkg/errors/LICENSE
deleted file mode 100644
index 835ba3e7..00000000
--- a/vendor/github.com/pkg/errors/LICENSE
+++ /dev/null
@@ -1,23 +0,0 @@
-Copyright (c) 2015, Dave Cheney
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
-* Redistributions of source code must retain the above copyright notice, this
- list of conditions and the following disclaimer.
-
-* Redistributions in binary form must reproduce the above copyright notice,
- this list of conditions and the following disclaimer in the documentation
- and/or other materials provided with the distribution.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/pkg/errors/Makefile b/vendor/github.com/pkg/errors/Makefile
deleted file mode 100644
index ce9d7cde..00000000
--- a/vendor/github.com/pkg/errors/Makefile
+++ /dev/null
@@ -1,44 +0,0 @@
-PKGS := github.com/pkg/errors
-SRCDIRS := $(shell go list -f '{{.Dir}}' $(PKGS))
-GO := go
-
-check: test vet gofmt misspell unconvert staticcheck ineffassign unparam
-
-test:
- $(GO) test $(PKGS)
-
-vet: | test
- $(GO) vet $(PKGS)
-
-staticcheck:
- $(GO) get honnef.co/go/tools/cmd/staticcheck
- staticcheck -checks all $(PKGS)
-
-misspell:
- $(GO) get github.com/client9/misspell/cmd/misspell
- misspell \
- -locale GB \
- -error \
- *.md *.go
-
-unconvert:
- $(GO) get github.com/mdempsky/unconvert
- unconvert -v $(PKGS)
-
-ineffassign:
- $(GO) get github.com/gordonklaus/ineffassign
- find $(SRCDIRS) -name '*.go' | xargs ineffassign
-
-pedantic: check errcheck
-
-unparam:
- $(GO) get mvdan.cc/unparam
- unparam ./...
-
-errcheck:
- $(GO) get github.com/kisielk/errcheck
- errcheck $(PKGS)
-
-gofmt:
- @echo Checking code is gofmted
- @test -z "$(shell gofmt -s -l -d -e $(SRCDIRS) | tee /dev/stderr)"
diff --git a/vendor/github.com/pkg/errors/README.md b/vendor/github.com/pkg/errors/README.md
deleted file mode 100644
index 54dfdcb1..00000000
--- a/vendor/github.com/pkg/errors/README.md
+++ /dev/null
@@ -1,59 +0,0 @@
-# errors [![Travis-CI](https://travis-ci.org/pkg/errors.svg)](https://travis-ci.org/pkg/errors) [![AppVeyor](https://ci.appveyor.com/api/projects/status/b98mptawhudj53ep/branch/master?svg=true)](https://ci.appveyor.com/project/davecheney/errors/branch/master) [![GoDoc](https://godoc.org/github.com/pkg/errors?status.svg)](http://godoc.org/github.com/pkg/errors) [![Report card](https://goreportcard.com/badge/github.com/pkg/errors)](https://goreportcard.com/report/github.com/pkg/errors) [![Sourcegraph](https://sourcegraph.com/github.com/pkg/errors/-/badge.svg)](https://sourcegraph.com/github.com/pkg/errors?badge)
-
-Package errors provides simple error handling primitives.
-
-`go get github.com/pkg/errors`
-
-The traditional error handling idiom in Go is roughly akin to
-```go
-if err != nil {
- return err
-}
-```
-which applied recursively up the call stack results in error reports without context or debugging information. The errors package allows programmers to add context to the failure path in their code in a way that does not destroy the original value of the error.
-
-## Adding context to an error
-
-The errors.Wrap function returns a new error that adds context to the original error. For example
-```go
-_, err := ioutil.ReadAll(r)
-if err != nil {
- return errors.Wrap(err, "read failed")
-}
-```
-## Retrieving the cause of an error
-
-Using `errors.Wrap` constructs a stack of errors, adding context to the preceding error. Depending on the nature of the error it may be necessary to reverse the operation of errors.Wrap to retrieve the original error for inspection. Any error value which implements this interface can be inspected by `errors.Cause`.
-```go
-type causer interface {
- Cause() error
-}
-```
-`errors.Cause` will recursively retrieve the topmost error which does not implement `causer`, which is assumed to be the original cause. For example:
-```go
-switch err := errors.Cause(err).(type) {
-case *MyError:
- // handle specifically
-default:
- // unknown error
-}
-```
-
-[Read the package documentation for more information](https://godoc.org/github.com/pkg/errors).
-
-## Roadmap
-
-With the upcoming [Go2 error proposals](https://go.googlesource.com/proposal/+/master/design/go2draft.md) this package is moving into maintenance mode. The roadmap for a 1.0 release is as follows:
-
-- 0.9. Remove pre Go 1.9 and Go 1.10 support, address outstanding pull requests (if possible)
-- 1.0. Final release.
-
-## Contributing
-
-Because of the Go2 errors changes, this package is not accepting proposals for new functionality. With that said, we welcome pull requests, bug fixes and issue reports.
-
-Before sending a PR, please discuss your change by raising an issue.
-
-## License
-
-BSD-2-Clause
diff --git a/vendor/github.com/pkg/errors/appveyor.yml b/vendor/github.com/pkg/errors/appveyor.yml
deleted file mode 100644
index a932eade..00000000
--- a/vendor/github.com/pkg/errors/appveyor.yml
+++ /dev/null
@@ -1,32 +0,0 @@
-version: build-{build}.{branch}
-
-clone_folder: C:\gopath\src\github.com\pkg\errors
-shallow_clone: true # for startup speed
-
-environment:
- GOPATH: C:\gopath
-
-platform:
- - x64
-
-# http://www.appveyor.com/docs/installed-software
-install:
- # some helpful output for debugging builds
- - go version
- - go env
- # pre-installed MinGW at C:\MinGW is 32bit only
- # but MSYS2 at C:\msys64 has mingw64
- - set PATH=C:\msys64\mingw64\bin;%PATH%
- - gcc --version
- - g++ --version
-
-build_script:
- - go install -v ./...
-
-test_script:
- - set PATH=C:\gopath\bin;%PATH%
- - go test -v ./...
-
-#artifacts:
-# - path: '%GOPATH%\bin\*.exe'
-deploy: off
diff --git a/vendor/github.com/pkg/errors/errors.go b/vendor/github.com/pkg/errors/errors.go
deleted file mode 100644
index 161aea25..00000000
--- a/vendor/github.com/pkg/errors/errors.go
+++ /dev/null
@@ -1,288 +0,0 @@
-// Package errors provides simple error handling primitives.
-//
-// The traditional error handling idiom in Go is roughly akin to
-//
-// if err != nil {
-// return err
-// }
-//
-// which when applied recursively up the call stack results in error reports
-// without context or debugging information. The errors package allows
-// programmers to add context to the failure path in their code in a way
-// that does not destroy the original value of the error.
-//
-// Adding context to an error
-//
-// The errors.Wrap function returns a new error that adds context to the
-// original error by recording a stack trace at the point Wrap is called,
-// together with the supplied message. For example
-//
-// _, err := ioutil.ReadAll(r)
-// if err != nil {
-// return errors.Wrap(err, "read failed")
-// }
-//
-// If additional control is required, the errors.WithStack and
-// errors.WithMessage functions destructure errors.Wrap into its component
-// operations: annotating an error with a stack trace and with a message,
-// respectively.
-//
-// Retrieving the cause of an error
-//
-// Using errors.Wrap constructs a stack of errors, adding context to the
-// preceding error. Depending on the nature of the error it may be necessary
-// to reverse the operation of errors.Wrap to retrieve the original error
-// for inspection. Any error value which implements this interface
-//
-// type causer interface {
-// Cause() error
-// }
-//
-// can be inspected by errors.Cause. errors.Cause will recursively retrieve
-// the topmost error that does not implement causer, which is assumed to be
-// the original cause. For example:
-//
-// switch err := errors.Cause(err).(type) {
-// case *MyError:
-// // handle specifically
-// default:
-// // unknown error
-// }
-//
-// Although the causer interface is not exported by this package, it is
-// considered a part of its stable public interface.
-//
-// Formatted printing of errors
-//
-// All error values returned from this package implement fmt.Formatter and can
-// be formatted by the fmt package. The following verbs are supported:
-//
-// %s print the error. If the error has a Cause it will be
-// printed recursively.
-// %v see %s
-// %+v extended format. Each Frame of the error's StackTrace will
-// be printed in detail.
-//
-// Retrieving the stack trace of an error or wrapper
-//
-// New, Errorf, Wrap, and Wrapf record a stack trace at the point they are
-// invoked. This information can be retrieved with the following interface:
-//
-// type stackTracer interface {
-// StackTrace() errors.StackTrace
-// }
-//
-// The returned errors.StackTrace type is defined as
-//
-// type StackTrace []Frame
-//
-// The Frame type represents a call site in the stack trace. Frame supports
-// the fmt.Formatter interface that can be used for printing information about
-// the stack trace of this error. For example:
-//
-// if err, ok := err.(stackTracer); ok {
-// for _, f := range err.StackTrace() {
-// fmt.Printf("%+s:%d\n", f, f)
-// }
-// }
-//
-// Although the stackTracer interface is not exported by this package, it is
-// considered a part of its stable public interface.
-//
-// See the documentation for Frame.Format for more details.
-package errors
-
-import (
- "fmt"
- "io"
-)
-
-// New returns an error with the supplied message.
-// New also records the stack trace at the point it was called.
-func New(message string) error {
- return &fundamental{
- msg: message,
- stack: callers(),
- }
-}
-
-// Errorf formats according to a format specifier and returns the string
-// as a value that satisfies error.
-// Errorf also records the stack trace at the point it was called.
-func Errorf(format string, args ...interface{}) error {
- return &fundamental{
- msg: fmt.Sprintf(format, args...),
- stack: callers(),
- }
-}
-
-// fundamental is an error that has a message and a stack, but no caller.
-type fundamental struct {
- msg string
- *stack
-}
-
-func (f *fundamental) Error() string { return f.msg }
-
-func (f *fundamental) Format(s fmt.State, verb rune) {
- switch verb {
- case 'v':
- if s.Flag('+') {
- io.WriteString(s, f.msg)
- f.stack.Format(s, verb)
- return
- }
- fallthrough
- case 's':
- io.WriteString(s, f.msg)
- case 'q':
- fmt.Fprintf(s, "%q", f.msg)
- }
-}
-
-// WithStack annotates err with a stack trace at the point WithStack was called.
-// If err is nil, WithStack returns nil.
-func WithStack(err error) error {
- if err == nil {
- return nil
- }
- return &withStack{
- err,
- callers(),
- }
-}
-
-type withStack struct {
- error
- *stack
-}
-
-func (w *withStack) Cause() error { return w.error }
-
-// Unwrap provides compatibility for Go 1.13 error chains.
-func (w *withStack) Unwrap() error { return w.error }
-
-func (w *withStack) Format(s fmt.State, verb rune) {
- switch verb {
- case 'v':
- if s.Flag('+') {
- fmt.Fprintf(s, "%+v", w.Cause())
- w.stack.Format(s, verb)
- return
- }
- fallthrough
- case 's':
- io.WriteString(s, w.Error())
- case 'q':
- fmt.Fprintf(s, "%q", w.Error())
- }
-}
-
-// Wrap returns an error annotating err with a stack trace
-// at the point Wrap is called, and the supplied message.
-// If err is nil, Wrap returns nil.
-func Wrap(err error, message string) error {
- if err == nil {
- return nil
- }
- err = &withMessage{
- cause: err,
- msg: message,
- }
- return &withStack{
- err,
- callers(),
- }
-}
-
-// Wrapf returns an error annotating err with a stack trace
-// at the point Wrapf is called, and the format specifier.
-// If err is nil, Wrapf returns nil.
-func Wrapf(err error, format string, args ...interface{}) error {
- if err == nil {
- return nil
- }
- err = &withMessage{
- cause: err,
- msg: fmt.Sprintf(format, args...),
- }
- return &withStack{
- err,
- callers(),
- }
-}
-
-// WithMessage annotates err with a new message.
-// If err is nil, WithMessage returns nil.
-func WithMessage(err error, message string) error {
- if err == nil {
- return nil
- }
- return &withMessage{
- cause: err,
- msg: message,
- }
-}
-
-// WithMessagef annotates err with the format specifier.
-// If err is nil, WithMessagef returns nil.
-func WithMessagef(err error, format string, args ...interface{}) error {
- if err == nil {
- return nil
- }
- return &withMessage{
- cause: err,
- msg: fmt.Sprintf(format, args...),
- }
-}
-
-type withMessage struct {
- cause error
- msg string
-}
-
-func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() }
-func (w *withMessage) Cause() error { return w.cause }
-
-// Unwrap provides compatibility for Go 1.13 error chains.
-func (w *withMessage) Unwrap() error { return w.cause }
-
-func (w *withMessage) Format(s fmt.State, verb rune) {
- switch verb {
- case 'v':
- if s.Flag('+') {
- fmt.Fprintf(s, "%+v\n", w.Cause())
- io.WriteString(s, w.msg)
- return
- }
- fallthrough
- case 's', 'q':
- io.WriteString(s, w.Error())
- }
-}
-
-// Cause returns the underlying cause of the error, if possible.
-// An error value has a cause if it implements the following
-// interface:
-//
-// type causer interface {
-// Cause() error
-// }
-//
-// If the error does not implement Cause, the original error will
-// be returned. If the error is nil, nil will be returned without further
-// investigation.
-func Cause(err error) error {
- type causer interface {
- Cause() error
- }
-
- for err != nil {
- cause, ok := err.(causer)
- if !ok {
- break
- }
- err = cause.Cause()
- }
- return err
-}
diff --git a/vendor/github.com/pkg/errors/go113.go b/vendor/github.com/pkg/errors/go113.go
deleted file mode 100644
index be0d10d0..00000000
--- a/vendor/github.com/pkg/errors/go113.go
+++ /dev/null
@@ -1,38 +0,0 @@
-// +build go1.13
-
-package errors
-
-import (
- stderrors "errors"
-)
-
-// Is reports whether any error in err's chain matches target.
-//
-// The chain consists of err itself followed by the sequence of errors obtained by
-// repeatedly calling Unwrap.
-//
-// An error is considered to match a target if it is equal to that target or if
-// it implements a method Is(error) bool such that Is(target) returns true.
-func Is(err, target error) bool { return stderrors.Is(err, target) }
-
-// As finds the first error in err's chain that matches target, and if so, sets
-// target to that error value and returns true.
-//
-// The chain consists of err itself followed by the sequence of errors obtained by
-// repeatedly calling Unwrap.
-//
-// An error matches target if the error's concrete value is assignable to the value
-// pointed to by target, or if the error has a method As(interface{}) bool such that
-// As(target) returns true. In the latter case, the As method is responsible for
-// setting target.
-//
-// As will panic if target is not a non-nil pointer to either a type that implements
-// error, or to any interface type. As returns false if err is nil.
-func As(err error, target interface{}) bool { return stderrors.As(err, target) }
-
-// Unwrap returns the result of calling the Unwrap method on err, if err's
-// type contains an Unwrap method returning error.
-// Otherwise, Unwrap returns nil.
-func Unwrap(err error) error {
- return stderrors.Unwrap(err)
-}
diff --git a/vendor/github.com/pkg/errors/stack.go b/vendor/github.com/pkg/errors/stack.go
deleted file mode 100644
index 779a8348..00000000
--- a/vendor/github.com/pkg/errors/stack.go
+++ /dev/null
@@ -1,177 +0,0 @@
-package errors
-
-import (
- "fmt"
- "io"
- "path"
- "runtime"
- "strconv"
- "strings"
-)
-
-// Frame represents a program counter inside a stack frame.
-// For historical reasons if Frame is interpreted as a uintptr
-// its value represents the program counter + 1.
-type Frame uintptr
-
-// pc returns the program counter for this frame;
-// multiple frames may have the same PC value.
-func (f Frame) pc() uintptr { return uintptr(f) - 1 }
-
-// file returns the full path to the file that contains the
-// function for this Frame's pc.
-func (f Frame) file() string {
- fn := runtime.FuncForPC(f.pc())
- if fn == nil {
- return "unknown"
- }
- file, _ := fn.FileLine(f.pc())
- return file
-}
-
-// line returns the line number of source code of the
-// function for this Frame's pc.
-func (f Frame) line() int {
- fn := runtime.FuncForPC(f.pc())
- if fn == nil {
- return 0
- }
- _, line := fn.FileLine(f.pc())
- return line
-}
-
-// name returns the name of this function, if known.
-func (f Frame) name() string {
- fn := runtime.FuncForPC(f.pc())
- if fn == nil {
- return "unknown"
- }
- return fn.Name()
-}
-
-// Format formats the frame according to the fmt.Formatter interface.
-//
-// %s source file
-// %d source line
-// %n function name
-// %v equivalent to %s:%d
-//
-// Format accepts flags that alter the printing of some verbs, as follows:
-//
-// %+s function name and path of source file relative to the compile time
-// GOPATH separated by \n\t (\n\t)
-// %+v equivalent to %+s:%d
-func (f Frame) Format(s fmt.State, verb rune) {
- switch verb {
- case 's':
- switch {
- case s.Flag('+'):
- io.WriteString(s, f.name())
- io.WriteString(s, "\n\t")
- io.WriteString(s, f.file())
- default:
- io.WriteString(s, path.Base(f.file()))
- }
- case 'd':
- io.WriteString(s, strconv.Itoa(f.line()))
- case 'n':
- io.WriteString(s, funcname(f.name()))
- case 'v':
- f.Format(s, 's')
- io.WriteString(s, ":")
- f.Format(s, 'd')
- }
-}
-
-// MarshalText formats a stacktrace Frame as a text string. The output is the
-// same as that of fmt.Sprintf("%+v", f), but without newlines or tabs.
-func (f Frame) MarshalText() ([]byte, error) {
- name := f.name()
- if name == "unknown" {
- return []byte(name), nil
- }
- return []byte(fmt.Sprintf("%s %s:%d", name, f.file(), f.line())), nil
-}
-
-// StackTrace is stack of Frames from innermost (newest) to outermost (oldest).
-type StackTrace []Frame
-
-// Format formats the stack of Frames according to the fmt.Formatter interface.
-//
-// %s lists source files for each Frame in the stack
-// %v lists the source file and line number for each Frame in the stack
-//
-// Format accepts flags that alter the printing of some verbs, as follows:
-//
-// %+v Prints filename, function, and line number for each Frame in the stack.
-func (st StackTrace) Format(s fmt.State, verb rune) {
- switch verb {
- case 'v':
- switch {
- case s.Flag('+'):
- for _, f := range st {
- io.WriteString(s, "\n")
- f.Format(s, verb)
- }
- case s.Flag('#'):
- fmt.Fprintf(s, "%#v", []Frame(st))
- default:
- st.formatSlice(s, verb)
- }
- case 's':
- st.formatSlice(s, verb)
- }
-}
-
-// formatSlice will format this StackTrace into the given buffer as a slice of
-// Frame, only valid when called with '%s' or '%v'.
-func (st StackTrace) formatSlice(s fmt.State, verb rune) {
- io.WriteString(s, "[")
- for i, f := range st {
- if i > 0 {
- io.WriteString(s, " ")
- }
- f.Format(s, verb)
- }
- io.WriteString(s, "]")
-}
-
-// stack represents a stack of program counters.
-type stack []uintptr
-
-func (s *stack) Format(st fmt.State, verb rune) {
- switch verb {
- case 'v':
- switch {
- case st.Flag('+'):
- for _, pc := range *s {
- f := Frame(pc)
- fmt.Fprintf(st, "\n%+v", f)
- }
- }
- }
-}
-
-func (s *stack) StackTrace() StackTrace {
- f := make([]Frame, len(*s))
- for i := 0; i < len(f); i++ {
- f[i] = Frame((*s)[i])
- }
- return f
-}
-
-func callers() *stack {
- const depth = 32
- var pcs [depth]uintptr
- n := runtime.Callers(3, pcs[:])
- var st stack = pcs[0:n]
- return &st
-}
-
-// funcname removes the path prefix component of a function's name reported by func.Name().
-func funcname(name string) string {
- i := strings.LastIndex(name, "/")
- name = name[i+1:]
- i = strings.Index(name, ".")
- return name[i+1:]
-}
diff --git a/vendor/github.com/pmezard/go-difflib/LICENSE b/vendor/github.com/pmezard/go-difflib/LICENSE
deleted file mode 100644
index c67dad61..00000000
--- a/vendor/github.com/pmezard/go-difflib/LICENSE
+++ /dev/null
@@ -1,27 +0,0 @@
-Copyright (c) 2013, Patrick Mezard
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
-notice, this list of conditions and the following disclaimer in the
-documentation and/or other materials provided with the distribution.
- The names of its contributors may not be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
-IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
-TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
-PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
-TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/pmezard/go-difflib/difflib/difflib.go b/vendor/github.com/pmezard/go-difflib/difflib/difflib.go
deleted file mode 100644
index 003e99fa..00000000
--- a/vendor/github.com/pmezard/go-difflib/difflib/difflib.go
+++ /dev/null
@@ -1,772 +0,0 @@
-// Package difflib is a partial port of Python difflib module.
-//
-// It provides tools to compare sequences of strings and generate textual diffs.
-//
-// The following class and functions have been ported:
-//
-// - SequenceMatcher
-//
-// - unified_diff
-//
-// - context_diff
-//
-// Getting unified diffs was the main goal of the port. Keep in mind this code
-// is mostly suitable to output text differences in a human friendly way, there
-// are no guarantees generated diffs are consumable by patch(1).
-package difflib
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "io"
- "strings"
-)
-
-func min(a, b int) int {
- if a < b {
- return a
- }
- return b
-}
-
-func max(a, b int) int {
- if a > b {
- return a
- }
- return b
-}
-
-func calculateRatio(matches, length int) float64 {
- if length > 0 {
- return 2.0 * float64(matches) / float64(length)
- }
- return 1.0
-}
-
-type Match struct {
- A int
- B int
- Size int
-}
-
-type OpCode struct {
- Tag byte
- I1 int
- I2 int
- J1 int
- J2 int
-}
-
-// SequenceMatcher compares sequence of strings. The basic
-// algorithm predates, and is a little fancier than, an algorithm
-// published in the late 1980's by Ratcliff and Obershelp under the
-// hyperbolic name "gestalt pattern matching". The basic idea is to find
-// the longest contiguous matching subsequence that contains no "junk"
-// elements (R-O doesn't address junk). The same idea is then applied
-// recursively to the pieces of the sequences to the left and to the right
-// of the matching subsequence. This does not yield minimal edit
-// sequences, but does tend to yield matches that "look right" to people.
-//
-// SequenceMatcher tries to compute a "human-friendly diff" between two
-// sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the
-// longest *contiguous* & junk-free matching subsequence. That's what
-// catches peoples' eyes. The Windows(tm) windiff has another interesting
-// notion, pairing up elements that appear uniquely in each sequence.
-// That, and the method here, appear to yield more intuitive difference
-// reports than does diff. This method appears to be the least vulnerable
-// to synching up on blocks of "junk lines", though (like blank lines in
-// ordinary text files, or maybe "" lines in HTML files). That may be
-// because this is the only method of the 3 that has a *concept* of
-// "junk" .
-//
-// Timing: Basic R-O is cubic time worst case and quadratic time expected
-// case. SequenceMatcher is quadratic time for the worst case and has
-// expected-case behavior dependent in a complicated way on how many
-// elements the sequences have in common; best case time is linear.
-type SequenceMatcher struct {
- a []string
- b []string
- b2j map[string][]int
- IsJunk func(string) bool
- autoJunk bool
- bJunk map[string]struct{}
- matchingBlocks []Match
- fullBCount map[string]int
- bPopular map[string]struct{}
- opCodes []OpCode
-}
-
-func NewMatcher(a, b []string) *SequenceMatcher {
- m := SequenceMatcher{autoJunk: true}
- m.SetSeqs(a, b)
- return &m
-}
-
-func NewMatcherWithJunk(a, b []string, autoJunk bool,
- isJunk func(string) bool) *SequenceMatcher {
-
- m := SequenceMatcher{IsJunk: isJunk, autoJunk: autoJunk}
- m.SetSeqs(a, b)
- return &m
-}
-
-// Set two sequences to be compared.
-func (m *SequenceMatcher) SetSeqs(a, b []string) {
- m.SetSeq1(a)
- m.SetSeq2(b)
-}
-
-// Set the first sequence to be compared. The second sequence to be compared is
-// not changed.
-//
-// SequenceMatcher computes and caches detailed information about the second
-// sequence, so if you want to compare one sequence S against many sequences,
-// use .SetSeq2(s) once and call .SetSeq1(x) repeatedly for each of the other
-// sequences.
-//
-// See also SetSeqs() and SetSeq2().
-func (m *SequenceMatcher) SetSeq1(a []string) {
- if &a == &m.a {
- return
- }
- m.a = a
- m.matchingBlocks = nil
- m.opCodes = nil
-}
-
-// Set the second sequence to be compared. The first sequence to be compared is
-// not changed.
-func (m *SequenceMatcher) SetSeq2(b []string) {
- if &b == &m.b {
- return
- }
- m.b = b
- m.matchingBlocks = nil
- m.opCodes = nil
- m.fullBCount = nil
- m.chainB()
-}
-
-func (m *SequenceMatcher) chainB() {
- // Populate line -> index mapping
- b2j := map[string][]int{}
- for i, s := range m.b {
- indices := b2j[s]
- indices = append(indices, i)
- b2j[s] = indices
- }
-
- // Purge junk elements
- m.bJunk = map[string]struct{}{}
- if m.IsJunk != nil {
- junk := m.bJunk
- for s, _ := range b2j {
- if m.IsJunk(s) {
- junk[s] = struct{}{}
- }
- }
- for s, _ := range junk {
- delete(b2j, s)
- }
- }
-
- // Purge remaining popular elements
- popular := map[string]struct{}{}
- n := len(m.b)
- if m.autoJunk && n >= 200 {
- ntest := n/100 + 1
- for s, indices := range b2j {
- if len(indices) > ntest {
- popular[s] = struct{}{}
- }
- }
- for s, _ := range popular {
- delete(b2j, s)
- }
- }
- m.bPopular = popular
- m.b2j = b2j
-}
-
-func (m *SequenceMatcher) isBJunk(s string) bool {
- _, ok := m.bJunk[s]
- return ok
-}
-
-// Find longest matching block in a[alo:ahi] and b[blo:bhi].
-//
-// If IsJunk is not defined:
-//
-// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where
-// alo <= i <= i+k <= ahi
-// blo <= j <= j+k <= bhi
-// and for all (i',j',k') meeting those conditions,
-// k >= k'
-// i <= i'
-// and if i == i', j <= j'
-//
-// In other words, of all maximal matching blocks, return one that
-// starts earliest in a, and of all those maximal matching blocks that
-// start earliest in a, return the one that starts earliest in b.
-//
-// If IsJunk is defined, first the longest matching block is
-// determined as above, but with the additional restriction that no
-// junk element appears in the block. Then that block is extended as
-// far as possible by matching (only) junk elements on both sides. So
-// the resulting block never matches on junk except as identical junk
-// happens to be adjacent to an "interesting" match.
-//
-// If no blocks match, return (alo, blo, 0).
-func (m *SequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) Match {
- // CAUTION: stripping common prefix or suffix would be incorrect.
- // E.g.,
- // ab
- // acab
- // Longest matching block is "ab", but if common prefix is
- // stripped, it's "a" (tied with "b"). UNIX(tm) diff does so
- // strip, so ends up claiming that ab is changed to acab by
- // inserting "ca" in the middle. That's minimal but unintuitive:
- // "it's obvious" that someone inserted "ac" at the front.
- // Windiff ends up at the same place as diff, but by pairing up
- // the unique 'b's and then matching the first two 'a's.
- besti, bestj, bestsize := alo, blo, 0
-
- // find longest junk-free match
- // during an iteration of the loop, j2len[j] = length of longest
- // junk-free match ending with a[i-1] and b[j]
- j2len := map[int]int{}
- for i := alo; i != ahi; i++ {
- // look at all instances of a[i] in b; note that because
- // b2j has no junk keys, the loop is skipped if a[i] is junk
- newj2len := map[int]int{}
- for _, j := range m.b2j[m.a[i]] {
- // a[i] matches b[j]
- if j < blo {
- continue
- }
- if j >= bhi {
- break
- }
- k := j2len[j-1] + 1
- newj2len[j] = k
- if k > bestsize {
- besti, bestj, bestsize = i-k+1, j-k+1, k
- }
- }
- j2len = newj2len
- }
-
- // Extend the best by non-junk elements on each end. In particular,
- // "popular" non-junk elements aren't in b2j, which greatly speeds
- // the inner loop above, but also means "the best" match so far
- // doesn't contain any junk *or* popular non-junk elements.
- for besti > alo && bestj > blo && !m.isBJunk(m.b[bestj-1]) &&
- m.a[besti-1] == m.b[bestj-1] {
- besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
- }
- for besti+bestsize < ahi && bestj+bestsize < bhi &&
- !m.isBJunk(m.b[bestj+bestsize]) &&
- m.a[besti+bestsize] == m.b[bestj+bestsize] {
- bestsize += 1
- }
-
- // Now that we have a wholly interesting match (albeit possibly
- // empty!), we may as well suck up the matching junk on each
- // side of it too. Can't think of a good reason not to, and it
- // saves post-processing the (possibly considerable) expense of
- // figuring out what to do with it. In the case of an empty
- // interesting match, this is clearly the right thing to do,
- // because no other kind of match is possible in the regions.
- for besti > alo && bestj > blo && m.isBJunk(m.b[bestj-1]) &&
- m.a[besti-1] == m.b[bestj-1] {
- besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
- }
- for besti+bestsize < ahi && bestj+bestsize < bhi &&
- m.isBJunk(m.b[bestj+bestsize]) &&
- m.a[besti+bestsize] == m.b[bestj+bestsize] {
- bestsize += 1
- }
-
- return Match{A: besti, B: bestj, Size: bestsize}
-}
-
-// Return list of triples describing matching subsequences.
-//
-// Each triple is of the form (i, j, n), and means that
-// a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in
-// i and in j. It's also guaranteed that if (i, j, n) and (i', j', n') are
-// adjacent triples in the list, and the second is not the last triple in the
-// list, then i+n != i' or j+n != j'. IOW, adjacent triples never describe
-// adjacent equal blocks.
-//
-// The last triple is a dummy, (len(a), len(b), 0), and is the only
-// triple with n==0.
-func (m *SequenceMatcher) GetMatchingBlocks() []Match {
- if m.matchingBlocks != nil {
- return m.matchingBlocks
- }
-
- var matchBlocks func(alo, ahi, blo, bhi int, matched []Match) []Match
- matchBlocks = func(alo, ahi, blo, bhi int, matched []Match) []Match {
- match := m.findLongestMatch(alo, ahi, blo, bhi)
- i, j, k := match.A, match.B, match.Size
- if match.Size > 0 {
- if alo < i && blo < j {
- matched = matchBlocks(alo, i, blo, j, matched)
- }
- matched = append(matched, match)
- if i+k < ahi && j+k < bhi {
- matched = matchBlocks(i+k, ahi, j+k, bhi, matched)
- }
- }
- return matched
- }
- matched := matchBlocks(0, len(m.a), 0, len(m.b), nil)
-
- // It's possible that we have adjacent equal blocks in the
- // matching_blocks list now.
- nonAdjacent := []Match{}
- i1, j1, k1 := 0, 0, 0
- for _, b := range matched {
- // Is this block adjacent to i1, j1, k1?
- i2, j2, k2 := b.A, b.B, b.Size
- if i1+k1 == i2 && j1+k1 == j2 {
- // Yes, so collapse them -- this just increases the length of
- // the first block by the length of the second, and the first
- // block so lengthened remains the block to compare against.
- k1 += k2
- } else {
- // Not adjacent. Remember the first block (k1==0 means it's
- // the dummy we started with), and make the second block the
- // new block to compare against.
- if k1 > 0 {
- nonAdjacent = append(nonAdjacent, Match{i1, j1, k1})
- }
- i1, j1, k1 = i2, j2, k2
- }
- }
- if k1 > 0 {
- nonAdjacent = append(nonAdjacent, Match{i1, j1, k1})
- }
-
- nonAdjacent = append(nonAdjacent, Match{len(m.a), len(m.b), 0})
- m.matchingBlocks = nonAdjacent
- return m.matchingBlocks
-}
-
-// Return list of 5-tuples describing how to turn a into b.
-//
-// Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple
-// has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the
-// tuple preceding it, and likewise for j1 == the previous j2.
-//
-// The tags are characters, with these meanings:
-//
-// 'r' (replace): a[i1:i2] should be replaced by b[j1:j2]
-//
-// 'd' (delete): a[i1:i2] should be deleted, j1==j2 in this case.
-//
-// 'i' (insert): b[j1:j2] should be inserted at a[i1:i1], i1==i2 in this case.
-//
-// 'e' (equal): a[i1:i2] == b[j1:j2]
-func (m *SequenceMatcher) GetOpCodes() []OpCode {
- if m.opCodes != nil {
- return m.opCodes
- }
- i, j := 0, 0
- matching := m.GetMatchingBlocks()
- opCodes := make([]OpCode, 0, len(matching))
- for _, m := range matching {
- // invariant: we've pumped out correct diffs to change
- // a[:i] into b[:j], and the next matching block is
- // a[ai:ai+size] == b[bj:bj+size]. So we need to pump
- // out a diff to change a[i:ai] into b[j:bj], pump out
- // the matching block, and move (i,j) beyond the match
- ai, bj, size := m.A, m.B, m.Size
- tag := byte(0)
- if i < ai && j < bj {
- tag = 'r'
- } else if i < ai {
- tag = 'd'
- } else if j < bj {
- tag = 'i'
- }
- if tag > 0 {
- opCodes = append(opCodes, OpCode{tag, i, ai, j, bj})
- }
- i, j = ai+size, bj+size
- // the list of matching blocks is terminated by a
- // sentinel with size 0
- if size > 0 {
- opCodes = append(opCodes, OpCode{'e', ai, i, bj, j})
- }
- }
- m.opCodes = opCodes
- return m.opCodes
-}
-
-// Isolate change clusters by eliminating ranges with no changes.
-//
-// Return a generator of groups with up to n lines of context.
-// Each group is in the same format as returned by GetOpCodes().
-func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode {
- if n < 0 {
- n = 3
- }
- codes := m.GetOpCodes()
- if len(codes) == 0 {
- codes = []OpCode{OpCode{'e', 0, 1, 0, 1}}
- }
- // Fixup leading and trailing groups if they show no changes.
- if codes[0].Tag == 'e' {
- c := codes[0]
- i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
- codes[0] = OpCode{c.Tag, max(i1, i2-n), i2, max(j1, j2-n), j2}
- }
- if codes[len(codes)-1].Tag == 'e' {
- c := codes[len(codes)-1]
- i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
- codes[len(codes)-1] = OpCode{c.Tag, i1, min(i2, i1+n), j1, min(j2, j1+n)}
- }
- nn := n + n
- groups := [][]OpCode{}
- group := []OpCode{}
- for _, c := range codes {
- i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
- // End the current group and start a new one whenever
- // there is a large range with no changes.
- if c.Tag == 'e' && i2-i1 > nn {
- group = append(group, OpCode{c.Tag, i1, min(i2, i1+n),
- j1, min(j2, j1+n)})
- groups = append(groups, group)
- group = []OpCode{}
- i1, j1 = max(i1, i2-n), max(j1, j2-n)
- }
- group = append(group, OpCode{c.Tag, i1, i2, j1, j2})
- }
- if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') {
- groups = append(groups, group)
- }
- return groups
-}
-
-// Return a measure of the sequences' similarity (float in [0,1]).
-//
-// Where T is the total number of elements in both sequences, and
-// M is the number of matches, this is 2.0*M / T.
-// Note that this is 1 if the sequences are identical, and 0 if
-// they have nothing in common.
-//
-// .Ratio() is expensive to compute if you haven't already computed
-// .GetMatchingBlocks() or .GetOpCodes(), in which case you may
-// want to try .QuickRatio() or .RealQuickRation() first to get an
-// upper bound.
-func (m *SequenceMatcher) Ratio() float64 {
- matches := 0
- for _, m := range m.GetMatchingBlocks() {
- matches += m.Size
- }
- return calculateRatio(matches, len(m.a)+len(m.b))
-}
-
-// Return an upper bound on ratio() relatively quickly.
-//
-// This isn't defined beyond that it is an upper bound on .Ratio(), and
-// is faster to compute.
-func (m *SequenceMatcher) QuickRatio() float64 {
- // viewing a and b as multisets, set matches to the cardinality
- // of their intersection; this counts the number of matches
- // without regard to order, so is clearly an upper bound
- if m.fullBCount == nil {
- m.fullBCount = map[string]int{}
- for _, s := range m.b {
- m.fullBCount[s] = m.fullBCount[s] + 1
- }
- }
-
- // avail[x] is the number of times x appears in 'b' less the
- // number of times we've seen it in 'a' so far ... kinda
- avail := map[string]int{}
- matches := 0
- for _, s := range m.a {
- n, ok := avail[s]
- if !ok {
- n = m.fullBCount[s]
- }
- avail[s] = n - 1
- if n > 0 {
- matches += 1
- }
- }
- return calculateRatio(matches, len(m.a)+len(m.b))
-}
-
-// Return an upper bound on ratio() very quickly.
-//
-// This isn't defined beyond that it is an upper bound on .Ratio(), and
-// is faster to compute than either .Ratio() or .QuickRatio().
-func (m *SequenceMatcher) RealQuickRatio() float64 {
- la, lb := len(m.a), len(m.b)
- return calculateRatio(min(la, lb), la+lb)
-}
-
-// Convert range to the "ed" format
-func formatRangeUnified(start, stop int) string {
- // Per the diff spec at http://www.unix.org/single_unix_specification/
- beginning := start + 1 // lines start numbering with one
- length := stop - start
- if length == 1 {
- return fmt.Sprintf("%d", beginning)
- }
- if length == 0 {
- beginning -= 1 // empty ranges begin at line just before the range
- }
- return fmt.Sprintf("%d,%d", beginning, length)
-}
-
-// Unified diff parameters
-type UnifiedDiff struct {
- A []string // First sequence lines
- FromFile string // First file name
- FromDate string // First file time
- B []string // Second sequence lines
- ToFile string // Second file name
- ToDate string // Second file time
- Eol string // Headers end of line, defaults to LF
- Context int // Number of context lines
-}
-
-// Compare two sequences of lines; generate the delta as a unified diff.
-//
-// Unified diffs are a compact way of showing line changes and a few
-// lines of context. The number of context lines is set by 'n' which
-// defaults to three.
-//
-// By default, the diff control lines (those with ---, +++, or @@) are
-// created with a trailing newline. This is helpful so that inputs
-// created from file.readlines() result in diffs that are suitable for
-// file.writelines() since both the inputs and outputs have trailing
-// newlines.
-//
-// For inputs that do not have trailing newlines, set the lineterm
-// argument to "" so that the output will be uniformly newline free.
-//
-// The unidiff format normally has a header for filenames and modification
-// times. Any or all of these may be specified using strings for
-// 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'.
-// The modification times are normally expressed in the ISO 8601 format.
-func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error {
- buf := bufio.NewWriter(writer)
- defer buf.Flush()
- wf := func(format string, args ...interface{}) error {
- _, err := buf.WriteString(fmt.Sprintf(format, args...))
- return err
- }
- ws := func(s string) error {
- _, err := buf.WriteString(s)
- return err
- }
-
- if len(diff.Eol) == 0 {
- diff.Eol = "\n"
- }
-
- started := false
- m := NewMatcher(diff.A, diff.B)
- for _, g := range m.GetGroupedOpCodes(diff.Context) {
- if !started {
- started = true
- fromDate := ""
- if len(diff.FromDate) > 0 {
- fromDate = "\t" + diff.FromDate
- }
- toDate := ""
- if len(diff.ToDate) > 0 {
- toDate = "\t" + diff.ToDate
- }
- if diff.FromFile != "" || diff.ToFile != "" {
- err := wf("--- %s%s%s", diff.FromFile, fromDate, diff.Eol)
- if err != nil {
- return err
- }
- err = wf("+++ %s%s%s", diff.ToFile, toDate, diff.Eol)
- if err != nil {
- return err
- }
- }
- }
- first, last := g[0], g[len(g)-1]
- range1 := formatRangeUnified(first.I1, last.I2)
- range2 := formatRangeUnified(first.J1, last.J2)
- if err := wf("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil {
- return err
- }
- for _, c := range g {
- i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
- if c.Tag == 'e' {
- for _, line := range diff.A[i1:i2] {
- if err := ws(" " + line); err != nil {
- return err
- }
- }
- continue
- }
- if c.Tag == 'r' || c.Tag == 'd' {
- for _, line := range diff.A[i1:i2] {
- if err := ws("-" + line); err != nil {
- return err
- }
- }
- }
- if c.Tag == 'r' || c.Tag == 'i' {
- for _, line := range diff.B[j1:j2] {
- if err := ws("+" + line); err != nil {
- return err
- }
- }
- }
- }
- }
- return nil
-}
-
-// Like WriteUnifiedDiff but returns the diff a string.
-func GetUnifiedDiffString(diff UnifiedDiff) (string, error) {
- w := &bytes.Buffer{}
- err := WriteUnifiedDiff(w, diff)
- return string(w.Bytes()), err
-}
-
-// Convert range to the "ed" format.
-func formatRangeContext(start, stop int) string {
- // Per the diff spec at http://www.unix.org/single_unix_specification/
- beginning := start + 1 // lines start numbering with one
- length := stop - start
- if length == 0 {
- beginning -= 1 // empty ranges begin at line just before the range
- }
- if length <= 1 {
- return fmt.Sprintf("%d", beginning)
- }
- return fmt.Sprintf("%d,%d", beginning, beginning+length-1)
-}
-
-type ContextDiff UnifiedDiff
-
-// Compare two sequences of lines; generate the delta as a context diff.
-//
-// Context diffs are a compact way of showing line changes and a few
-// lines of context. The number of context lines is set by diff.Context
-// which defaults to three.
-//
-// By default, the diff control lines (those with *** or ---) are
-// created with a trailing newline.
-//
-// For inputs that do not have trailing newlines, set the diff.Eol
-// argument to "" so that the output will be uniformly newline free.
-//
-// The context diff format normally has a header for filenames and
-// modification times. Any or all of these may be specified using
-// strings for diff.FromFile, diff.ToFile, diff.FromDate, diff.ToDate.
-// The modification times are normally expressed in the ISO 8601 format.
-// If not specified, the strings default to blanks.
-func WriteContextDiff(writer io.Writer, diff ContextDiff) error {
- buf := bufio.NewWriter(writer)
- defer buf.Flush()
- var diffErr error
- wf := func(format string, args ...interface{}) {
- _, err := buf.WriteString(fmt.Sprintf(format, args...))
- if diffErr == nil && err != nil {
- diffErr = err
- }
- }
- ws := func(s string) {
- _, err := buf.WriteString(s)
- if diffErr == nil && err != nil {
- diffErr = err
- }
- }
-
- if len(diff.Eol) == 0 {
- diff.Eol = "\n"
- }
-
- prefix := map[byte]string{
- 'i': "+ ",
- 'd': "- ",
- 'r': "! ",
- 'e': " ",
- }
-
- started := false
- m := NewMatcher(diff.A, diff.B)
- for _, g := range m.GetGroupedOpCodes(diff.Context) {
- if !started {
- started = true
- fromDate := ""
- if len(diff.FromDate) > 0 {
- fromDate = "\t" + diff.FromDate
- }
- toDate := ""
- if len(diff.ToDate) > 0 {
- toDate = "\t" + diff.ToDate
- }
- if diff.FromFile != "" || diff.ToFile != "" {
- wf("*** %s%s%s", diff.FromFile, fromDate, diff.Eol)
- wf("--- %s%s%s", diff.ToFile, toDate, diff.Eol)
- }
- }
-
- first, last := g[0], g[len(g)-1]
- ws("***************" + diff.Eol)
-
- range1 := formatRangeContext(first.I1, last.I2)
- wf("*** %s ****%s", range1, diff.Eol)
- for _, c := range g {
- if c.Tag == 'r' || c.Tag == 'd' {
- for _, cc := range g {
- if cc.Tag == 'i' {
- continue
- }
- for _, line := range diff.A[cc.I1:cc.I2] {
- ws(prefix[cc.Tag] + line)
- }
- }
- break
- }
- }
-
- range2 := formatRangeContext(first.J1, last.J2)
- wf("--- %s ----%s", range2, diff.Eol)
- for _, c := range g {
- if c.Tag == 'r' || c.Tag == 'i' {
- for _, cc := range g {
- if cc.Tag == 'd' {
- continue
- }
- for _, line := range diff.B[cc.J1:cc.J2] {
- ws(prefix[cc.Tag] + line)
- }
- }
- break
- }
- }
- }
- return diffErr
-}
-
-// Like WriteContextDiff but returns the diff a string.
-func GetContextDiffString(diff ContextDiff) (string, error) {
- w := &bytes.Buffer{}
- err := WriteContextDiff(w, diff)
- return string(w.Bytes()), err
-}
-
-// Split a string on "\n" while preserving them. The output can be used
-// as input for UnifiedDiff and ContextDiff structures.
-func SplitLines(s string) []string {
- lines := strings.SplitAfter(s, "\n")
- lines[len(lines)-1] += "\n"
- return lines
-}
diff --git a/vendor/github.com/rs/xid/.appveyor.yml b/vendor/github.com/rs/xid/.appveyor.yml
deleted file mode 100644
index c73bb33b..00000000
--- a/vendor/github.com/rs/xid/.appveyor.yml
+++ /dev/null
@@ -1,27 +0,0 @@
-version: 1.0.0.{build}
-
-platform: x64
-
-branches:
- only:
- - master
-
-clone_folder: c:\gopath\src\github.com\rs\xid
-
-environment:
- GOPATH: c:\gopath
-
-install:
- - echo %PATH%
- - echo %GOPATH%
- - set PATH=%GOPATH%\bin;c:\go\bin;%PATH%
- - go version
- - go env
- - go get -t .
-
-build_script:
- - go build
-
-test_script:
- - go test
-
diff --git a/vendor/github.com/rs/xid/.travis.yml b/vendor/github.com/rs/xid/.travis.yml
deleted file mode 100644
index b37da159..00000000
--- a/vendor/github.com/rs/xid/.travis.yml
+++ /dev/null
@@ -1,8 +0,0 @@
-language: go
-go:
-- "1.9"
-- "1.10"
-- "master"
-matrix:
- allow_failures:
- - go: "master"
diff --git a/vendor/github.com/rs/xid/LICENSE b/vendor/github.com/rs/xid/LICENSE
deleted file mode 100644
index 47c5e9d2..00000000
--- a/vendor/github.com/rs/xid/LICENSE
+++ /dev/null
@@ -1,19 +0,0 @@
-Copyright (c) 2015 Olivier Poitrey
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is furnished
-to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
diff --git a/vendor/github.com/rs/xid/README.md b/vendor/github.com/rs/xid/README.md
deleted file mode 100644
index 5bf462e8..00000000
--- a/vendor/github.com/rs/xid/README.md
+++ /dev/null
@@ -1,116 +0,0 @@
-# Globally Unique ID Generator
-
-[![godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/rs/xid) [![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](https://raw.githubusercontent.com/rs/xid/master/LICENSE) [![Build Status](https://travis-ci.org/rs/xid.svg?branch=master)](https://travis-ci.org/rs/xid) [![Coverage](http://gocover.io/_badge/github.com/rs/xid)](http://gocover.io/github.com/rs/xid)
-
-Package xid is a globally unique id generator library, ready to safely be used directly in your server code.
-
-Xid uses the Mongo Object ID algorithm to generate globally unique ids with a different serialization (base64) to make it shorter when transported as a string:
-https://docs.mongodb.org/manual/reference/object-id/
-
-- 4-byte value representing the seconds since the Unix epoch,
-- 3-byte machine identifier,
-- 2-byte process id, and
-- 3-byte counter, starting with a random value.
-
-The binary representation of the id is compatible with Mongo 12 bytes Object IDs.
-The string representation is using base32 hex (w/o padding) for better space efficiency
-when stored in that form (20 bytes). The hex variant of base32 is used to retain the
-sortable property of the id.
-
-Xid doesn't use base64 because case sensitivity and the 2 non alphanum chars may be an
-issue when transported as a string between various systems. Base36 wasn't retained either
-because 1/ it's not standard 2/ the resulting size is not predictable (not bit aligned)
-and 3/ it would not remain sortable. To validate a base32 `xid`, expect a 20 chars long,
-all lowercase sequence of `a` to `v` letters and `0` to `9` numbers (`[0-9a-v]{20}`).
-
-UUIDs are 16 bytes (128 bits) and 36 chars as string representation. Twitter Snowflake
-ids are 8 bytes (64 bits) but require machine/data-center configuration and/or central
-generator servers. xid stands in between with 12 bytes (96 bits) and a more compact
-URL-safe string representation (20 chars). No configuration or central generator server
-is required so it can be used directly in server's code.
-
-| Name | Binary Size | String Size | Features
-|-------------|-------------|----------------|----------------
-| [UUID] | 16 bytes | 36 chars | configuration free, not sortable
-| [shortuuid] | 16 bytes | 22 chars | configuration free, not sortable
-| [Snowflake] | 8 bytes | up to 20 chars | needs machine/DC configuration, needs central server, sortable
-| [MongoID] | 12 bytes | 24 chars | configuration free, sortable
-| xid | 12 bytes | 20 chars | configuration free, sortable
-
-[UUID]: https://en.wikipedia.org/wiki/Universally_unique_identifier
-[shortuuid]: https://github.com/stochastic-technologies/shortuuid
-[Snowflake]: https://blog.twitter.com/2010/announcing-snowflake
-[MongoID]: https://docs.mongodb.org/manual/reference/object-id/
-
-Features:
-
-- Size: 12 bytes (96 bits), smaller than UUID, larger than snowflake
-- Base32 hex encoded by default (20 chars when transported as printable string, still sortable)
-- Non configured, you don't need set a unique machine and/or data center id
-- K-ordered
-- Embedded time with 1 second precision
-- Unicity guaranteed for 16,777,216 (24 bits) unique ids per second and per host/process
-- Lock-free (i.e.: unlike UUIDv1 and v2)
-
-Best used with [zerolog](https://github.com/rs/zerolog)'s
-[RequestIDHandler](https://godoc.org/github.com/rs/zerolog/hlog#RequestIDHandler).
-
-Notes:
-
-- Xid is dependent on the system time, a monotonic counter and so is not cryptographically secure. If unpredictability of IDs is important, you should not use Xids. It is worth noting that most other UUID-like implementations are also not cryptographically secure. You should use libraries that rely on cryptographically secure sources (like /dev/urandom on unix, crypto/rand in golang), if you want a truly random ID generator.
-
-References:
-
-- http://www.slideshare.net/davegardnerisme/unique-id-generation-in-distributed-systems
-- https://en.wikipedia.org/wiki/Universally_unique_identifier
-- https://blog.twitter.com/2010/announcing-snowflake
-- Python port by [Graham Abbott](https://github.com/graham): https://github.com/graham/python_xid
-- Scala port by [Egor Kolotaev](https://github.com/kolotaev): https://github.com/kolotaev/ride
-- Rust port by [Jérôme Renard](https://github.com/jeromer/): https://github.com/jeromer/libxid
-- Ruby port by [Valar](https://github.com/valarpirai/): https://github.com/valarpirai/ruby_xid
-- Java port by [0xShamil](https://github.com/0xShamil/): https://github.com/0xShamil/java-xid
-- Dart port by [Peter Bwire](https://github.com/pitabwire): https://pub.dev/packages/xid
-
-## Install
-
- go get github.com/rs/xid
-
-## Usage
-
-```go
-guid := xid.New()
-
-println(guid.String())
-// Output: 9m4e2mr0ui3e8a215n4g
-```
-
-Get `xid` embedded info:
-
-```go
-guid.Machine()
-guid.Pid()
-guid.Time()
-guid.Counter()
-```
-
-## Benchmark
-
-Benchmark against Go [Maxim Bublis](https://github.com/satori)'s [UUID](https://github.com/satori/go.uuid).
-
-```
-BenchmarkXID 20000000 91.1 ns/op 32 B/op 1 allocs/op
-BenchmarkXID-2 20000000 55.9 ns/op 32 B/op 1 allocs/op
-BenchmarkXID-4 50000000 32.3 ns/op 32 B/op 1 allocs/op
-BenchmarkUUIDv1 10000000 204 ns/op 48 B/op 1 allocs/op
-BenchmarkUUIDv1-2 10000000 160 ns/op 48 B/op 1 allocs/op
-BenchmarkUUIDv1-4 10000000 195 ns/op 48 B/op 1 allocs/op
-BenchmarkUUIDv4 1000000 1503 ns/op 64 B/op 2 allocs/op
-BenchmarkUUIDv4-2 1000000 1427 ns/op 64 B/op 2 allocs/op
-BenchmarkUUIDv4-4 1000000 1452 ns/op 64 B/op 2 allocs/op
-```
-
-Note: UUIDv1 requires a global lock, hence the performance degradation as we add more CPUs.
-
-## Licenses
-
-All source code is licensed under the [MIT License](https://raw.github.com/rs/xid/master/LICENSE).
diff --git a/vendor/github.com/rs/xid/error.go b/vendor/github.com/rs/xid/error.go
deleted file mode 100644
index ea253749..00000000
--- a/vendor/github.com/rs/xid/error.go
+++ /dev/null
@@ -1,11 +0,0 @@
-package xid
-
-const (
- // ErrInvalidID is returned when trying to unmarshal an invalid ID.
- ErrInvalidID strErr = "xid: invalid ID"
-)
-
-// strErr allows declaring errors as constants.
-type strErr string
-
-func (err strErr) Error() string { return string(err) }
diff --git a/vendor/github.com/rs/xid/hostid_darwin.go b/vendor/github.com/rs/xid/hostid_darwin.go
deleted file mode 100644
index 08351ff7..00000000
--- a/vendor/github.com/rs/xid/hostid_darwin.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// +build darwin
-
-package xid
-
-import "syscall"
-
-func readPlatformMachineID() (string, error) {
- return syscall.Sysctl("kern.uuid")
-}
diff --git a/vendor/github.com/rs/xid/hostid_fallback.go b/vendor/github.com/rs/xid/hostid_fallback.go
deleted file mode 100644
index 7fbd3c00..00000000
--- a/vendor/github.com/rs/xid/hostid_fallback.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// +build !darwin,!linux,!freebsd,!windows
-
-package xid
-
-import "errors"
-
-func readPlatformMachineID() (string, error) {
- return "", errors.New("not implemented")
-}
diff --git a/vendor/github.com/rs/xid/hostid_freebsd.go b/vendor/github.com/rs/xid/hostid_freebsd.go
deleted file mode 100644
index be25a039..00000000
--- a/vendor/github.com/rs/xid/hostid_freebsd.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// +build freebsd
-
-package xid
-
-import "syscall"
-
-func readPlatformMachineID() (string, error) {
- return syscall.Sysctl("kern.hostuuid")
-}
diff --git a/vendor/github.com/rs/xid/hostid_linux.go b/vendor/github.com/rs/xid/hostid_linux.go
deleted file mode 100644
index 837b2043..00000000
--- a/vendor/github.com/rs/xid/hostid_linux.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// +build linux
-
-package xid
-
-import "io/ioutil"
-
-func readPlatformMachineID() (string, error) {
- b, err := ioutil.ReadFile("/etc/machine-id")
- if err != nil || len(b) == 0 {
- b, err = ioutil.ReadFile("/sys/class/dmi/id/product_uuid")
- }
- return string(b), err
-}
diff --git a/vendor/github.com/rs/xid/hostid_windows.go b/vendor/github.com/rs/xid/hostid_windows.go
deleted file mode 100644
index ec2593ee..00000000
--- a/vendor/github.com/rs/xid/hostid_windows.go
+++ /dev/null
@@ -1,38 +0,0 @@
-// +build windows
-
-package xid
-
-import (
- "fmt"
- "syscall"
- "unsafe"
-)
-
-func readPlatformMachineID() (string, error) {
- // source: https://github.com/shirou/gopsutil/blob/master/host/host_syscall.go
- var h syscall.Handle
- err := syscall.RegOpenKeyEx(syscall.HKEY_LOCAL_MACHINE, syscall.StringToUTF16Ptr(`SOFTWARE\Microsoft\Cryptography`), 0, syscall.KEY_READ|syscall.KEY_WOW64_64KEY, &h)
- if err != nil {
- return "", err
- }
- defer syscall.RegCloseKey(h)
-
- const syscallRegBufLen = 74 // len(`{`) + len(`abcdefgh-1234-456789012-123345456671` * 2) + len(`}`) // 2 == bytes/UTF16
- const uuidLen = 36
-
- var regBuf [syscallRegBufLen]uint16
- bufLen := uint32(syscallRegBufLen)
- var valType uint32
- err = syscall.RegQueryValueEx(h, syscall.StringToUTF16Ptr(`MachineGuid`), nil, &valType, (*byte)(unsafe.Pointer(®Buf[0])), &bufLen)
- if err != nil {
- return "", err
- }
-
- hostID := syscall.UTF16ToString(regBuf[:])
- hostIDLen := len(hostID)
- if hostIDLen != uuidLen {
- return "", fmt.Errorf("HostID incorrect: %q\n", hostID)
- }
-
- return hostID, nil
-}
diff --git a/vendor/github.com/rs/xid/id.go b/vendor/github.com/rs/xid/id.go
deleted file mode 100644
index 1f536b41..00000000
--- a/vendor/github.com/rs/xid/id.go
+++ /dev/null
@@ -1,392 +0,0 @@
-// Package xid is a globally unique id generator suited for web scale
-//
-// Xid is using Mongo Object ID algorithm to generate globally unique ids:
-// https://docs.mongodb.org/manual/reference/object-id/
-//
-// - 4-byte value representing the seconds since the Unix epoch,
-// - 3-byte machine identifier,
-// - 2-byte process id, and
-// - 3-byte counter, starting with a random value.
-//
-// The binary representation of the id is compatible with Mongo 12 bytes Object IDs.
-// The string representation is using base32 hex (w/o padding) for better space efficiency
-// when stored in that form (20 bytes). The hex variant of base32 is used to retain the
-// sortable property of the id.
-//
-// Xid doesn't use base64 because case sensitivity and the 2 non alphanum chars may be an
-// issue when transported as a string between various systems. Base36 wasn't retained either
-// because 1/ it's not standard 2/ the resulting size is not predictable (not bit aligned)
-// and 3/ it would not remain sortable. To validate a base32 `xid`, expect a 20 chars long,
-// all lowercase sequence of `a` to `v` letters and `0` to `9` numbers (`[0-9a-v]{20}`).
-//
-// UUID is 16 bytes (128 bits), snowflake is 8 bytes (64 bits), xid stands in between
-// with 12 bytes with a more compact string representation ready for the web and no
-// required configuration or central generation server.
-//
-// Features:
-//
-// - Size: 12 bytes (96 bits), smaller than UUID, larger than snowflake
-// - Base32 hex encoded by default (16 bytes storage when transported as printable string)
-// - Non configured, you don't need set a unique machine and/or data center id
-// - K-ordered
-// - Embedded time with 1 second precision
-// - Unicity guaranteed for 16,777,216 (24 bits) unique ids per second and per host/process
-//
-// Best used with xlog's RequestIDHandler (https://godoc.org/github.com/rs/xlog#RequestIDHandler).
-//
-// References:
-//
-// - http://www.slideshare.net/davegardnerisme/unique-id-generation-in-distributed-systems
-// - https://en.wikipedia.org/wiki/Universally_unique_identifier
-// - https://blog.twitter.com/2010/announcing-snowflake
-package xid
-
-import (
- "bytes"
- "crypto/md5"
- "crypto/rand"
- "database/sql/driver"
- "encoding/binary"
- "fmt"
- "hash/crc32"
- "io/ioutil"
- "os"
- "sort"
- "sync/atomic"
- "time"
- "unsafe"
-)
-
-// Code inspired from mgo/bson ObjectId
-
-// ID represents a unique request id
-type ID [rawLen]byte
-
-const (
- encodedLen = 20 // string encoded len
- rawLen = 12 // binary raw len
-
- // encoding stores a custom version of the base32 encoding with lower case
- // letters.
- encoding = "0123456789abcdefghijklmnopqrstuv"
-)
-
-var (
- // objectIDCounter is atomically incremented when generating a new ObjectId
- // using NewObjectId() function. It's used as a counter part of an id.
- // This id is initialized with a random value.
- objectIDCounter = randInt()
-
- // machineId stores machine id generated once and used in subsequent calls
- // to NewObjectId function.
- machineID = readMachineID()
-
- // pid stores the current process id
- pid = os.Getpid()
-
- nilID ID
-
- // dec is the decoding map for base32 encoding
- dec [256]byte
-)
-
-func init() {
- for i := 0; i < len(dec); i++ {
- dec[i] = 0xFF
- }
- for i := 0; i < len(encoding); i++ {
- dec[encoding[i]] = byte(i)
- }
-
- // If /proc/self/cpuset exists and is not /, we can assume that we are in a
- // form of container and use the content of cpuset xor-ed with the PID in
- // order get a reasonable machine global unique PID.
- b, err := ioutil.ReadFile("/proc/self/cpuset")
- if err == nil && len(b) > 1 {
- pid ^= int(crc32.ChecksumIEEE(b))
- }
-}
-
-// readMachineId generates machine id and puts it into the machineId global
-// variable. If this function fails to get the hostname, it will cause
-// a runtime error.
-func readMachineID() []byte {
- id := make([]byte, 3)
- hid, err := readPlatformMachineID()
- if err != nil || len(hid) == 0 {
- hid, err = os.Hostname()
- }
- if err == nil && len(hid) != 0 {
- hw := md5.New()
- hw.Write([]byte(hid))
- copy(id, hw.Sum(nil))
- } else {
- // Fallback to rand number if machine id can't be gathered
- if _, randErr := rand.Reader.Read(id); randErr != nil {
- panic(fmt.Errorf("xid: cannot get hostname nor generate a random number: %v; %v", err, randErr))
- }
- }
- return id
-}
-
-// randInt generates a random uint32
-func randInt() uint32 {
- b := make([]byte, 3)
- if _, err := rand.Reader.Read(b); err != nil {
- panic(fmt.Errorf("xid: cannot generate random number: %v;", err))
- }
- return uint32(b[0])<<16 | uint32(b[1])<<8 | uint32(b[2])
-}
-
-// New generates a globally unique ID
-func New() ID {
- return NewWithTime(time.Now())
-}
-
-// NewWithTime generates a globally unique ID with the passed in time
-func NewWithTime(t time.Time) ID {
- var id ID
- // Timestamp, 4 bytes, big endian
- binary.BigEndian.PutUint32(id[:], uint32(t.Unix()))
- // Machine, first 3 bytes of md5(hostname)
- id[4] = machineID[0]
- id[5] = machineID[1]
- id[6] = machineID[2]
- // Pid, 2 bytes, specs don't specify endianness, but we use big endian.
- id[7] = byte(pid >> 8)
- id[8] = byte(pid)
- // Increment, 3 bytes, big endian
- i := atomic.AddUint32(&objectIDCounter, 1)
- id[9] = byte(i >> 16)
- id[10] = byte(i >> 8)
- id[11] = byte(i)
- return id
-}
-
-// FromString reads an ID from its string representation
-func FromString(id string) (ID, error) {
- i := &ID{}
- err := i.UnmarshalText([]byte(id))
- return *i, err
-}
-
-// String returns a base32 hex lowercased with no padding representation of the id (char set is 0-9, a-v).
-func (id ID) String() string {
- text := make([]byte, encodedLen)
- encode(text, id[:])
- return *(*string)(unsafe.Pointer(&text))
-}
-
-// Encode encodes the id using base32 encoding, writing 20 bytes to dst and return it.
-func (id ID) Encode(dst []byte) []byte {
- encode(dst, id[:])
- return dst
-}
-
-// MarshalText implements encoding/text TextMarshaler interface
-func (id ID) MarshalText() ([]byte, error) {
- text := make([]byte, encodedLen)
- encode(text, id[:])
- return text, nil
-}
-
-// MarshalJSON implements encoding/json Marshaler interface
-func (id ID) MarshalJSON() ([]byte, error) {
- if id.IsNil() {
- return []byte("null"), nil
- }
- text := make([]byte, encodedLen+2)
- encode(text[1:encodedLen+1], id[:])
- text[0], text[encodedLen+1] = '"', '"'
- return text, nil
-}
-
-// encode by unrolling the stdlib base32 algorithm + removing all safe checks
-func encode(dst, id []byte) {
- _ = dst[19]
- _ = id[11]
-
- dst[19] = encoding[(id[11]<<4)&0x1F]
- dst[18] = encoding[(id[11]>>1)&0x1F]
- dst[17] = encoding[(id[11]>>6)&0x1F|(id[10]<<2)&0x1F]
- dst[16] = encoding[id[10]>>3]
- dst[15] = encoding[id[9]&0x1F]
- dst[14] = encoding[(id[9]>>5)|(id[8]<<3)&0x1F]
- dst[13] = encoding[(id[8]>>2)&0x1F]
- dst[12] = encoding[id[8]>>7|(id[7]<<1)&0x1F]
- dst[11] = encoding[(id[7]>>4)&0x1F|(id[6]<<4)&0x1F]
- dst[10] = encoding[(id[6]>>1)&0x1F]
- dst[9] = encoding[(id[6]>>6)&0x1F|(id[5]<<2)&0x1F]
- dst[8] = encoding[id[5]>>3]
- dst[7] = encoding[id[4]&0x1F]
- dst[6] = encoding[id[4]>>5|(id[3]<<3)&0x1F]
- dst[5] = encoding[(id[3]>>2)&0x1F]
- dst[4] = encoding[id[3]>>7|(id[2]<<1)&0x1F]
- dst[3] = encoding[(id[2]>>4)&0x1F|(id[1]<<4)&0x1F]
- dst[2] = encoding[(id[1]>>1)&0x1F]
- dst[1] = encoding[(id[1]>>6)&0x1F|(id[0]<<2)&0x1F]
- dst[0] = encoding[id[0]>>3]
-}
-
-// UnmarshalText implements encoding/text TextUnmarshaler interface
-func (id *ID) UnmarshalText(text []byte) error {
- if len(text) != encodedLen {
- return ErrInvalidID
- }
- for _, c := range text {
- if dec[c] == 0xFF {
- return ErrInvalidID
- }
- }
- if !decode(id, text) {
- return ErrInvalidID
- }
- return nil
-}
-
-// UnmarshalJSON implements encoding/json Unmarshaler interface
-func (id *ID) UnmarshalJSON(b []byte) error {
- s := string(b)
- if s == "null" {
- *id = nilID
- return nil
- }
- // Check the slice length to prevent panic on passing it to UnmarshalText()
- if len(b) < 2 {
- return ErrInvalidID
- }
- return id.UnmarshalText(b[1 : len(b)-1])
-}
-
-// decode by unrolling the stdlib base32 algorithm + customized safe check.
-func decode(id *ID, src []byte) bool {
- _ = src[19]
- _ = id[11]
-
- id[11] = dec[src[17]]<<6 | dec[src[18]]<<1 | dec[src[19]]>>4
- id[10] = dec[src[16]]<<3 | dec[src[17]]>>2
- id[9] = dec[src[14]]<<5 | dec[src[15]]
- id[8] = dec[src[12]]<<7 | dec[src[13]]<<2 | dec[src[14]]>>3
- id[7] = dec[src[11]]<<4 | dec[src[12]]>>1
- id[6] = dec[src[9]]<<6 | dec[src[10]]<<1 | dec[src[11]]>>4
- id[5] = dec[src[8]]<<3 | dec[src[9]]>>2
- id[4] = dec[src[6]]<<5 | dec[src[7]]
- id[3] = dec[src[4]]<<7 | dec[src[5]]<<2 | dec[src[6]]>>3
- id[2] = dec[src[3]]<<4 | dec[src[4]]>>1
- id[1] = dec[src[1]]<<6 | dec[src[2]]<<1 | dec[src[3]]>>4
- id[0] = dec[src[0]]<<3 | dec[src[1]]>>2
-
- // Validate that there are no discarer bits (padding) in src that would
- // cause the string-encoded id not to equal src.
- var check [4]byte
-
- check[3] = encoding[(id[11]<<4)&0x1F]
- check[2] = encoding[(id[11]>>1)&0x1F]
- check[1] = encoding[(id[11]>>6)&0x1F|(id[10]<<2)&0x1F]
- check[0] = encoding[id[10]>>3]
- return bytes.Equal([]byte(src[16:20]), check[:])
-}
-
-// Time returns the timestamp part of the id.
-// It's a runtime error to call this method with an invalid id.
-func (id ID) Time() time.Time {
- // First 4 bytes of ObjectId is 32-bit big-endian seconds from epoch.
- secs := int64(binary.BigEndian.Uint32(id[0:4]))
- return time.Unix(secs, 0)
-}
-
-// Machine returns the 3-byte machine id part of the id.
-// It's a runtime error to call this method with an invalid id.
-func (id ID) Machine() []byte {
- return id[4:7]
-}
-
-// Pid returns the process id part of the id.
-// It's a runtime error to call this method with an invalid id.
-func (id ID) Pid() uint16 {
- return binary.BigEndian.Uint16(id[7:9])
-}
-
-// Counter returns the incrementing value part of the id.
-// It's a runtime error to call this method with an invalid id.
-func (id ID) Counter() int32 {
- b := id[9:12]
- // Counter is stored as big-endian 3-byte value
- return int32(uint32(b[0])<<16 | uint32(b[1])<<8 | uint32(b[2]))
-}
-
-// Value implements the driver.Valuer interface.
-func (id ID) Value() (driver.Value, error) {
- if id.IsNil() {
- return nil, nil
- }
- b, err := id.MarshalText()
- return string(b), err
-}
-
-// Scan implements the sql.Scanner interface.
-func (id *ID) Scan(value interface{}) (err error) {
- switch val := value.(type) {
- case string:
- return id.UnmarshalText([]byte(val))
- case []byte:
- return id.UnmarshalText(val)
- case nil:
- *id = nilID
- return nil
- default:
- return fmt.Errorf("xid: scanning unsupported type: %T", value)
- }
-}
-
-// IsNil Returns true if this is a "nil" ID
-func (id ID) IsNil() bool {
- return id == nilID
-}
-
-// NilID returns a zero value for `xid.ID`.
-func NilID() ID {
- return nilID
-}
-
-// Bytes returns the byte array representation of `ID`
-func (id ID) Bytes() []byte {
- return id[:]
-}
-
-// FromBytes convert the byte array representation of `ID` back to `ID`
-func FromBytes(b []byte) (ID, error) {
- var id ID
- if len(b) != rawLen {
- return id, ErrInvalidID
- }
- copy(id[:], b)
- return id, nil
-}
-
-// Compare returns an integer comparing two IDs. It behaves just like `bytes.Compare`.
-// The result will be 0 if two IDs are identical, -1 if current id is less than the other one,
-// and 1 if current id is greater than the other.
-func (id ID) Compare(other ID) int {
- return bytes.Compare(id[:], other[:])
-}
-
-type sorter []ID
-
-func (s sorter) Len() int {
- return len(s)
-}
-
-func (s sorter) Less(i, j int) bool {
- return s[i].Compare(s[j]) < 0
-}
-
-func (s sorter) Swap(i, j int) {
- s[i], s[j] = s[j], s[i]
-}
-
-// Sort sorts an array of IDs inplace.
-// It works by wrapping `[]ID` and use `sort.Sort`.
-func Sort(ids []ID) {
- sort.Sort(sorter(ids))
-}
diff --git a/vendor/github.com/stretchr/testify/LICENSE b/vendor/github.com/stretchr/testify/LICENSE
deleted file mode 100644
index 4b0421cf..00000000
--- a/vendor/github.com/stretchr/testify/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-MIT License
-
-Copyright (c) 2012-2020 Mat Ryer, Tyler Bunnell and contributors.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare.go b/vendor/github.com/stretchr/testify/assert/assertion_compare.go
deleted file mode 100644
index 3bb22a97..00000000
--- a/vendor/github.com/stretchr/testify/assert/assertion_compare.go
+++ /dev/null
@@ -1,436 +0,0 @@
-package assert
-
-import (
- "fmt"
- "reflect"
- "time"
-)
-
-type CompareType int
-
-const (
- compareLess CompareType = iota - 1
- compareEqual
- compareGreater
-)
-
-var (
- intType = reflect.TypeOf(int(1))
- int8Type = reflect.TypeOf(int8(1))
- int16Type = reflect.TypeOf(int16(1))
- int32Type = reflect.TypeOf(int32(1))
- int64Type = reflect.TypeOf(int64(1))
-
- uintType = reflect.TypeOf(uint(1))
- uint8Type = reflect.TypeOf(uint8(1))
- uint16Type = reflect.TypeOf(uint16(1))
- uint32Type = reflect.TypeOf(uint32(1))
- uint64Type = reflect.TypeOf(uint64(1))
-
- float32Type = reflect.TypeOf(float32(1))
- float64Type = reflect.TypeOf(float64(1))
-
- stringType = reflect.TypeOf("")
-
- timeType = reflect.TypeOf(time.Time{})
-)
-
-func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
- obj1Value := reflect.ValueOf(obj1)
- obj2Value := reflect.ValueOf(obj2)
-
- // throughout this switch we try and avoid calling .Convert() if possible,
- // as this has a pretty big performance impact
- switch kind {
- case reflect.Int:
- {
- intobj1, ok := obj1.(int)
- if !ok {
- intobj1 = obj1Value.Convert(intType).Interface().(int)
- }
- intobj2, ok := obj2.(int)
- if !ok {
- intobj2 = obj2Value.Convert(intType).Interface().(int)
- }
- if intobj1 > intobj2 {
- return compareGreater, true
- }
- if intobj1 == intobj2 {
- return compareEqual, true
- }
- if intobj1 < intobj2 {
- return compareLess, true
- }
- }
- case reflect.Int8:
- {
- int8obj1, ok := obj1.(int8)
- if !ok {
- int8obj1 = obj1Value.Convert(int8Type).Interface().(int8)
- }
- int8obj2, ok := obj2.(int8)
- if !ok {
- int8obj2 = obj2Value.Convert(int8Type).Interface().(int8)
- }
- if int8obj1 > int8obj2 {
- return compareGreater, true
- }
- if int8obj1 == int8obj2 {
- return compareEqual, true
- }
- if int8obj1 < int8obj2 {
- return compareLess, true
- }
- }
- case reflect.Int16:
- {
- int16obj1, ok := obj1.(int16)
- if !ok {
- int16obj1 = obj1Value.Convert(int16Type).Interface().(int16)
- }
- int16obj2, ok := obj2.(int16)
- if !ok {
- int16obj2 = obj2Value.Convert(int16Type).Interface().(int16)
- }
- if int16obj1 > int16obj2 {
- return compareGreater, true
- }
- if int16obj1 == int16obj2 {
- return compareEqual, true
- }
- if int16obj1 < int16obj2 {
- return compareLess, true
- }
- }
- case reflect.Int32:
- {
- int32obj1, ok := obj1.(int32)
- if !ok {
- int32obj1 = obj1Value.Convert(int32Type).Interface().(int32)
- }
- int32obj2, ok := obj2.(int32)
- if !ok {
- int32obj2 = obj2Value.Convert(int32Type).Interface().(int32)
- }
- if int32obj1 > int32obj2 {
- return compareGreater, true
- }
- if int32obj1 == int32obj2 {
- return compareEqual, true
- }
- if int32obj1 < int32obj2 {
- return compareLess, true
- }
- }
- case reflect.Int64:
- {
- int64obj1, ok := obj1.(int64)
- if !ok {
- int64obj1 = obj1Value.Convert(int64Type).Interface().(int64)
- }
- int64obj2, ok := obj2.(int64)
- if !ok {
- int64obj2 = obj2Value.Convert(int64Type).Interface().(int64)
- }
- if int64obj1 > int64obj2 {
- return compareGreater, true
- }
- if int64obj1 == int64obj2 {
- return compareEqual, true
- }
- if int64obj1 < int64obj2 {
- return compareLess, true
- }
- }
- case reflect.Uint:
- {
- uintobj1, ok := obj1.(uint)
- if !ok {
- uintobj1 = obj1Value.Convert(uintType).Interface().(uint)
- }
- uintobj2, ok := obj2.(uint)
- if !ok {
- uintobj2 = obj2Value.Convert(uintType).Interface().(uint)
- }
- if uintobj1 > uintobj2 {
- return compareGreater, true
- }
- if uintobj1 == uintobj2 {
- return compareEqual, true
- }
- if uintobj1 < uintobj2 {
- return compareLess, true
- }
- }
- case reflect.Uint8:
- {
- uint8obj1, ok := obj1.(uint8)
- if !ok {
- uint8obj1 = obj1Value.Convert(uint8Type).Interface().(uint8)
- }
- uint8obj2, ok := obj2.(uint8)
- if !ok {
- uint8obj2 = obj2Value.Convert(uint8Type).Interface().(uint8)
- }
- if uint8obj1 > uint8obj2 {
- return compareGreater, true
- }
- if uint8obj1 == uint8obj2 {
- return compareEqual, true
- }
- if uint8obj1 < uint8obj2 {
- return compareLess, true
- }
- }
- case reflect.Uint16:
- {
- uint16obj1, ok := obj1.(uint16)
- if !ok {
- uint16obj1 = obj1Value.Convert(uint16Type).Interface().(uint16)
- }
- uint16obj2, ok := obj2.(uint16)
- if !ok {
- uint16obj2 = obj2Value.Convert(uint16Type).Interface().(uint16)
- }
- if uint16obj1 > uint16obj2 {
- return compareGreater, true
- }
- if uint16obj1 == uint16obj2 {
- return compareEqual, true
- }
- if uint16obj1 < uint16obj2 {
- return compareLess, true
- }
- }
- case reflect.Uint32:
- {
- uint32obj1, ok := obj1.(uint32)
- if !ok {
- uint32obj1 = obj1Value.Convert(uint32Type).Interface().(uint32)
- }
- uint32obj2, ok := obj2.(uint32)
- if !ok {
- uint32obj2 = obj2Value.Convert(uint32Type).Interface().(uint32)
- }
- if uint32obj1 > uint32obj2 {
- return compareGreater, true
- }
- if uint32obj1 == uint32obj2 {
- return compareEqual, true
- }
- if uint32obj1 < uint32obj2 {
- return compareLess, true
- }
- }
- case reflect.Uint64:
- {
- uint64obj1, ok := obj1.(uint64)
- if !ok {
- uint64obj1 = obj1Value.Convert(uint64Type).Interface().(uint64)
- }
- uint64obj2, ok := obj2.(uint64)
- if !ok {
- uint64obj2 = obj2Value.Convert(uint64Type).Interface().(uint64)
- }
- if uint64obj1 > uint64obj2 {
- return compareGreater, true
- }
- if uint64obj1 == uint64obj2 {
- return compareEqual, true
- }
- if uint64obj1 < uint64obj2 {
- return compareLess, true
- }
- }
- case reflect.Float32:
- {
- float32obj1, ok := obj1.(float32)
- if !ok {
- float32obj1 = obj1Value.Convert(float32Type).Interface().(float32)
- }
- float32obj2, ok := obj2.(float32)
- if !ok {
- float32obj2 = obj2Value.Convert(float32Type).Interface().(float32)
- }
- if float32obj1 > float32obj2 {
- return compareGreater, true
- }
- if float32obj1 == float32obj2 {
- return compareEqual, true
- }
- if float32obj1 < float32obj2 {
- return compareLess, true
- }
- }
- case reflect.Float64:
- {
- float64obj1, ok := obj1.(float64)
- if !ok {
- float64obj1 = obj1Value.Convert(float64Type).Interface().(float64)
- }
- float64obj2, ok := obj2.(float64)
- if !ok {
- float64obj2 = obj2Value.Convert(float64Type).Interface().(float64)
- }
- if float64obj1 > float64obj2 {
- return compareGreater, true
- }
- if float64obj1 == float64obj2 {
- return compareEqual, true
- }
- if float64obj1 < float64obj2 {
- return compareLess, true
- }
- }
- case reflect.String:
- {
- stringobj1, ok := obj1.(string)
- if !ok {
- stringobj1 = obj1Value.Convert(stringType).Interface().(string)
- }
- stringobj2, ok := obj2.(string)
- if !ok {
- stringobj2 = obj2Value.Convert(stringType).Interface().(string)
- }
- if stringobj1 > stringobj2 {
- return compareGreater, true
- }
- if stringobj1 == stringobj2 {
- return compareEqual, true
- }
- if stringobj1 < stringobj2 {
- return compareLess, true
- }
- }
- // Check for known struct types we can check for compare results.
- case reflect.Struct:
- {
- // All structs enter here. We're not interested in most types.
- if !canConvert(obj1Value, timeType) {
- break
- }
-
- // time.Time can compared!
- timeObj1, ok := obj1.(time.Time)
- if !ok {
- timeObj1 = obj1Value.Convert(timeType).Interface().(time.Time)
- }
-
- timeObj2, ok := obj2.(time.Time)
- if !ok {
- timeObj2 = obj2Value.Convert(timeType).Interface().(time.Time)
- }
-
- return compare(timeObj1.UnixNano(), timeObj2.UnixNano(), reflect.Int64)
- }
- }
-
- return compareEqual, false
-}
-
-// Greater asserts that the first element is greater than the second
-//
-// assert.Greater(t, 2, 1)
-// assert.Greater(t, float64(2), float64(1))
-// assert.Greater(t, "b", "a")
-func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- return compareTwoValues(t, e1, e2, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...)
-}
-
-// GreaterOrEqual asserts that the first element is greater than or equal to the second
-//
-// assert.GreaterOrEqual(t, 2, 1)
-// assert.GreaterOrEqual(t, 2, 2)
-// assert.GreaterOrEqual(t, "b", "a")
-// assert.GreaterOrEqual(t, "b", "b")
-func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- return compareTwoValues(t, e1, e2, []CompareType{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...)
-}
-
-// Less asserts that the first element is less than the second
-//
-// assert.Less(t, 1, 2)
-// assert.Less(t, float64(1), float64(2))
-// assert.Less(t, "a", "b")
-func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- return compareTwoValues(t, e1, e2, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...)
-}
-
-// LessOrEqual asserts that the first element is less than or equal to the second
-//
-// assert.LessOrEqual(t, 1, 2)
-// assert.LessOrEqual(t, 2, 2)
-// assert.LessOrEqual(t, "a", "b")
-// assert.LessOrEqual(t, "b", "b")
-func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- return compareTwoValues(t, e1, e2, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...)
-}
-
-// Positive asserts that the specified element is positive
-//
-// assert.Positive(t, 1)
-// assert.Positive(t, 1.23)
-func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- zero := reflect.Zero(reflect.TypeOf(e))
- return compareTwoValues(t, e, zero.Interface(), []CompareType{compareGreater}, "\"%v\" is not positive", msgAndArgs...)
-}
-
-// Negative asserts that the specified element is negative
-//
-// assert.Negative(t, -1)
-// assert.Negative(t, -1.23)
-func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- zero := reflect.Zero(reflect.TypeOf(e))
- return compareTwoValues(t, e, zero.Interface(), []CompareType{compareLess}, "\"%v\" is not negative", msgAndArgs...)
-}
-
-func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
-
- e1Kind := reflect.ValueOf(e1).Kind()
- e2Kind := reflect.ValueOf(e2).Kind()
- if e1Kind != e2Kind {
- return Fail(t, "Elements should be the same type", msgAndArgs...)
- }
-
- compareResult, isComparable := compare(e1, e2, e1Kind)
- if !isComparable {
- return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...)
- }
-
- if !containsValue(allowedComparesResults, compareResult) {
- return Fail(t, fmt.Sprintf(failMessage, e1, e2), msgAndArgs...)
- }
-
- return true
-}
-
-func containsValue(values []CompareType, value CompareType) bool {
- for _, v := range values {
- if v == value {
- return true
- }
- }
-
- return false
-}
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go b/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go
deleted file mode 100644
index df22c47f..00000000
--- a/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go
+++ /dev/null
@@ -1,16 +0,0 @@
-//go:build go1.17
-// +build go1.17
-
-// TODO: once support for Go 1.16 is dropped, this file can be
-// merged/removed with assertion_compare_go1.17_test.go and
-// assertion_compare_legacy.go
-
-package assert
-
-import "reflect"
-
-// Wrapper around reflect.Value.CanConvert, for compatability
-// reasons.
-func canConvert(value reflect.Value, to reflect.Type) bool {
- return value.CanConvert(to)
-}
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go b/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go
deleted file mode 100644
index 1701af2a..00000000
--- a/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go
+++ /dev/null
@@ -1,16 +0,0 @@
-//go:build !go1.17
-// +build !go1.17
-
-// TODO: once support for Go 1.16 is dropped, this file can be
-// merged/removed with assertion_compare_go1.17_test.go and
-// assertion_compare_can_convert.go
-
-package assert
-
-import "reflect"
-
-// Older versions of Go does not have the reflect.Value.CanConvert
-// method.
-func canConvert(value reflect.Value, to reflect.Type) bool {
- return false
-}
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go
deleted file mode 100644
index 27e2420e..00000000
--- a/vendor/github.com/stretchr/testify/assert/assertion_format.go
+++ /dev/null
@@ -1,753 +0,0 @@
-/*
-* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen
-* THIS FILE MUST NOT BE EDITED BY HAND
- */
-
-package assert
-
-import (
- http "net/http"
- url "net/url"
- time "time"
-)
-
-// Conditionf uses a Comparison to assert a complex condition.
-func Conditionf(t TestingT, comp Comparison, msg string, args ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- return Condition(t, comp, append([]interface{}{msg}, args...)...)
-}
-
-// Containsf asserts that the specified string, list(array, slice...) or map contains the
-// specified substring or element.
-//
-// assert.Containsf(t, "Hello World", "World", "error message %s", "formatted")
-// assert.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted")
-// assert.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted")
-func Containsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- return Contains(t, s, contains, append([]interface{}{msg}, args...)...)
-}
-
-// DirExistsf checks whether a directory exists in the given path. It also fails
-// if the path is a file rather a directory or there is an error checking whether it exists.
-func DirExistsf(t TestingT, path string, msg string, args ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- return DirExists(t, path, append([]interface{}{msg}, args...)...)
-}
-
-// ElementsMatchf asserts that the specified listA(array, slice...) is equal to specified
-// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
-// the number of appearances of each of them in both lists should match.
-//
-// assert.ElementsMatchf(t, [1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted")
-func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string, args ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- return ElementsMatch(t, listA, listB, append([]interface{}{msg}, args...)...)
-}
-
-// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either
-// a slice or a channel with len == 0.
-//
-// assert.Emptyf(t, obj, "error message %s", "formatted")
-func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- return Empty(t, object, append([]interface{}{msg}, args...)...)
-}
-
-// Equalf asserts that two objects are equal.
-//
-// assert.Equalf(t, 123, 123, "error message %s", "formatted")
-//
-// Pointer variable equality is determined based on the equality of the
-// referenced values (as opposed to the memory addresses). Function equality
-// cannot be determined and will always fail.
-func Equalf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- return Equal(t, expected, actual, append([]interface{}{msg}, args...)...)
-}
-
-// EqualErrorf asserts that a function returned an error (i.e. not `nil`)
-// and that it is equal to the provided error.
-//
-// actualObj, err := SomeFunction()
-// assert.EqualErrorf(t, err, expectedErrorString, "error message %s", "formatted")
-func EqualErrorf(t TestingT, theError error, errString string, msg string, args ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- return EqualError(t, theError, errString, append([]interface{}{msg}, args...)...)
-}
-
-// EqualValuesf asserts that two objects are equal or convertable to the same types
-// and equal.
-//
-// assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted")
-func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- return EqualValues(t, expected, actual, append([]interface{}{msg}, args...)...)
-}
-
-// Errorf asserts that a function returned an error (i.e. not `nil`).
-//
-// actualObj, err := SomeFunction()
-// if assert.Errorf(t, err, "error message %s", "formatted") {
-// assert.Equal(t, expectedErrorf, err)
-// }
-func Errorf(t TestingT, err error, msg string, args ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- return Error(t, err, append([]interface{}{msg}, args...)...)
-}
-
-// ErrorAsf asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value.
-// This is a wrapper for errors.As.
-func ErrorAsf(t TestingT, err error, target interface{}, msg string, args ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- return ErrorAs(t, err, target, append([]interface{}{msg}, args...)...)
-}
-
-// ErrorContainsf asserts that a function returned an error (i.e. not `nil`)
-// and that the error contains the specified substring.
-//
-// actualObj, err := SomeFunction()
-// assert.ErrorContainsf(t, err, expectedErrorSubString, "error message %s", "formatted")
-func ErrorContainsf(t TestingT, theError error, contains string, msg string, args ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- return ErrorContains(t, theError, contains, append([]interface{}{msg}, args...)...)
-}
-
-// ErrorIsf asserts that at least one of the errors in err's chain matches target.
-// This is a wrapper for errors.Is.
-func ErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- return ErrorIs(t, err, target, append([]interface{}{msg}, args...)...)
-}
-
-// Eventuallyf asserts that given condition will be met in waitFor time,
-// periodically checking target function each tick.
-//
-// assert.Eventuallyf(t, func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
-func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- return Eventually(t, condition, waitFor, tick, append([]interface{}{msg}, args...)...)
-}
-
-// Exactlyf asserts that two objects are equal in value and type.
-//
-// assert.Exactlyf(t, int32(123), int64(123), "error message %s", "formatted")
-func Exactlyf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- return Exactly(t, expected, actual, append([]interface{}{msg}, args...)...)
-}
-
-// Failf reports a failure through
-func Failf(t TestingT, failureMessage string, msg string, args ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- return Fail(t, failureMessage, append([]interface{}{msg}, args...)...)
-}
-
-// FailNowf fails test
-func FailNowf(t TestingT, failureMessage string, msg string, args ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- return FailNow(t, failureMessage, append([]interface{}{msg}, args...)...)
-}
-
-// Falsef asserts that the specified value is false.
-//
-// assert.Falsef(t, myBool, "error message %s", "formatted")
-func Falsef(t TestingT, value bool, msg string, args ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- return False(t, value, append([]interface{}{msg}, args...)...)
-}
-
-// FileExistsf checks whether a file exists in the given path. It also fails if
-// the path points to a directory or there is an error when trying to check the file.
-func FileExistsf(t TestingT, path string, msg string, args ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- return FileExists(t, path, append([]interface{}{msg}, args...)...)
-}
-
-// Greaterf asserts that the first element is greater than the second
-//
-// assert.Greaterf(t, 2, 1, "error message %s", "formatted")
-// assert.Greaterf(t, float64(2), float64(1), "error message %s", "formatted")
-// assert.Greaterf(t, "b", "a", "error message %s", "formatted")
-func Greaterf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- return Greater(t, e1, e2, append([]interface{}{msg}, args...)...)
-}
-
-// GreaterOrEqualf asserts that the first element is greater than or equal to the second
-//
-// assert.GreaterOrEqualf(t, 2, 1, "error message %s", "formatted")
-// assert.GreaterOrEqualf(t, 2, 2, "error message %s", "formatted")
-// assert.GreaterOrEqualf(t, "b", "a", "error message %s", "formatted")
-// assert.GreaterOrEqualf(t, "b", "b", "error message %s", "formatted")
-func GreaterOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- return GreaterOrEqual(t, e1, e2, append([]interface{}{msg}, args...)...)
-}
-
-// HTTPBodyContainsf asserts that a specified handler returns a
-// body that contains a string.
-//
-// assert.HTTPBodyContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
-//
-// Returns whether the assertion was successful (true) or not (false).
-func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- return HTTPBodyContains(t, handler, method, url, values, str, append([]interface{}{msg}, args...)...)
-}
-
-// HTTPBodyNotContainsf asserts that a specified handler returns a
-// body that does not contain a string.
-//
-// assert.HTTPBodyNotContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
-//
-// Returns whether the assertion was successful (true) or not (false).
-func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- return HTTPBodyNotContains(t, handler, method, url, values, str, append([]interface{}{msg}, args...)...)
-}
-
-// HTTPErrorf asserts that a specified handler returns an error status code.
-//
-// assert.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
-//
-// Returns whether the assertion was successful (true) or not (false).
-func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- return HTTPError(t, handler, method, url, values, append([]interface{}{msg}, args...)...)
-}
-
-// HTTPRedirectf asserts that a specified handler returns a redirect status code.
-//
-// assert.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
-//
-// Returns whether the assertion was successful (true) or not (false).
-func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- return HTTPRedirect(t, handler, method, url, values, append([]interface{}{msg}, args...)...)
-}
-
-// HTTPStatusCodef asserts that a specified handler returns a specified status code.
-//
-// assert.HTTPStatusCodef(t, myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted")
-//
-// Returns whether the assertion was successful (true) or not (false).
-func HTTPStatusCodef(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msg string, args ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- return HTTPStatusCode(t, handler, method, url, values, statuscode, append([]interface{}{msg}, args...)...)
-}
-
-// HTTPSuccessf asserts that a specified handler returns a success status code.
-//
-// assert.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted")
-//
-// Returns whether the assertion was successful (true) or not (false).
-func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- return HTTPSuccess(t, handler, method, url, values, append([]interface{}{msg}, args...)...)
-}
-
-// Implementsf asserts that an object is implemented by the specified interface.
-//
-// assert.Implementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted")
-func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- return Implements(t, interfaceObject, object, append([]interface{}{msg}, args...)...)
-}
-
-// InDeltaf asserts that the two numerals are within delta of each other.
-//
-// assert.InDeltaf(t, math.Pi, 22/7.0, 0.01, "error message %s", "formatted")
-func InDeltaf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- return InDelta(t, expected, actual, delta, append([]interface{}{msg}, args...)...)
-}
-
-// InDeltaMapValuesf is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys.
-func InDeltaMapValuesf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- return InDeltaMapValues(t, expected, actual, delta, append([]interface{}{msg}, args...)...)
-}
-
-// InDeltaSlicef is the same as InDelta, except it compares two slices.
-func InDeltaSlicef(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- return InDeltaSlice(t, expected, actual, delta, append([]interface{}{msg}, args...)...)
-}
-
-// InEpsilonf asserts that expected and actual have a relative error less than epsilon
-func InEpsilonf(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- return InEpsilon(t, expected, actual, epsilon, append([]interface{}{msg}, args...)...)
-}
-
-// InEpsilonSlicef is the same as InEpsilon, except it compares each value from two slices.
-func InEpsilonSlicef(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- return InEpsilonSlice(t, expected, actual, epsilon, append([]interface{}{msg}, args...)...)
-}
-
-// IsDecreasingf asserts that the collection is decreasing
-//
-// assert.IsDecreasingf(t, []int{2, 1, 0}, "error message %s", "formatted")
-// assert.IsDecreasingf(t, []float{2, 1}, "error message %s", "formatted")
-// assert.IsDecreasingf(t, []string{"b", "a"}, "error message %s", "formatted")
-func IsDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- return IsDecreasing(t, object, append([]interface{}{msg}, args...)...)
-}
-
-// IsIncreasingf asserts that the collection is increasing
-//
-// assert.IsIncreasingf(t, []int{1, 2, 3}, "error message %s", "formatted")
-// assert.IsIncreasingf(t, []float{1, 2}, "error message %s", "formatted")
-// assert.IsIncreasingf(t, []string{"a", "b"}, "error message %s", "formatted")
-func IsIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- return IsIncreasing(t, object, append([]interface{}{msg}, args...)...)
-}
-
-// IsNonDecreasingf asserts that the collection is not decreasing
-//
-// assert.IsNonDecreasingf(t, []int{1, 1, 2}, "error message %s", "formatted")
-// assert.IsNonDecreasingf(t, []float{1, 2}, "error message %s", "formatted")
-// assert.IsNonDecreasingf(t, []string{"a", "b"}, "error message %s", "formatted")
-func IsNonDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- return IsNonDecreasing(t, object, append([]interface{}{msg}, args...)...)
-}
-
-// IsNonIncreasingf asserts that the collection is not increasing
-//
-// assert.IsNonIncreasingf(t, []int{2, 1, 1}, "error message %s", "formatted")
-// assert.IsNonIncreasingf(t, []float{2, 1}, "error message %s", "formatted")
-// assert.IsNonIncreasingf(t, []string{"b", "a"}, "error message %s", "formatted")
-func IsNonIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- return IsNonIncreasing(t, object, append([]interface{}{msg}, args...)...)
-}
-
-// IsTypef asserts that the specified objects are of the same type.
-func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- return IsType(t, expectedType, object, append([]interface{}{msg}, args...)...)
-}
-
-// JSONEqf asserts that two JSON strings are equivalent.
-//
-// assert.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted")
-func JSONEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- return JSONEq(t, expected, actual, append([]interface{}{msg}, args...)...)
-}
-
-// Lenf asserts that the specified object has specific length.
-// Lenf also fails if the object has a type that len() not accept.
-//
-// assert.Lenf(t, mySlice, 3, "error message %s", "formatted")
-func Lenf(t TestingT, object interface{}, length int, msg string, args ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- return Len(t, object, length, append([]interface{}{msg}, args...)...)
-}
-
-// Lessf asserts that the first element is less than the second
-//
-// assert.Lessf(t, 1, 2, "error message %s", "formatted")
-// assert.Lessf(t, float64(1), float64(2), "error message %s", "formatted")
-// assert.Lessf(t, "a", "b", "error message %s", "formatted")
-func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- return Less(t, e1, e2, append([]interface{}{msg}, args...)...)
-}
-
-// LessOrEqualf asserts that the first element is less than or equal to the second
-//
-// assert.LessOrEqualf(t, 1, 2, "error message %s", "formatted")
-// assert.LessOrEqualf(t, 2, 2, "error message %s", "formatted")
-// assert.LessOrEqualf(t, "a", "b", "error message %s", "formatted")
-// assert.LessOrEqualf(t, "b", "b", "error message %s", "formatted")
-func LessOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- return LessOrEqual(t, e1, e2, append([]interface{}{msg}, args...)...)
-}
-
-// Negativef asserts that the specified element is negative
-//
-// assert.Negativef(t, -1, "error message %s", "formatted")
-// assert.Negativef(t, -1.23, "error message %s", "formatted")
-func Negativef(t TestingT, e interface{}, msg string, args ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- return Negative(t, e, append([]interface{}{msg}, args...)...)
-}
-
-// Neverf asserts that the given condition doesn't satisfy in waitFor time,
-// periodically checking the target function each tick.
-//
-// assert.Neverf(t, func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
-func Neverf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- return Never(t, condition, waitFor, tick, append([]interface{}{msg}, args...)...)
-}
-
-// Nilf asserts that the specified object is nil.
-//
-// assert.Nilf(t, err, "error message %s", "formatted")
-func Nilf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- return Nil(t, object, append([]interface{}{msg}, args...)...)
-}
-
-// NoDirExistsf checks whether a directory does not exist in the given path.
-// It fails if the path points to an existing _directory_ only.
-func NoDirExistsf(t TestingT, path string, msg string, args ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- return NoDirExists(t, path, append([]interface{}{msg}, args...)...)
-}
-
-// NoErrorf asserts that a function returned no error (i.e. `nil`).
-//
-// actualObj, err := SomeFunction()
-// if assert.NoErrorf(t, err, "error message %s", "formatted") {
-// assert.Equal(t, expectedObj, actualObj)
-// }
-func NoErrorf(t TestingT, err error, msg string, args ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- return NoError(t, err, append([]interface{}{msg}, args...)...)
-}
-
-// NoFileExistsf checks whether a file does not exist in a given path. It fails
-// if the path points to an existing _file_ only.
-func NoFileExistsf(t TestingT, path string, msg string, args ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- return NoFileExists(t, path, append([]interface{}{msg}, args...)...)
-}
-
-// NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the
-// specified substring or element.
-//
-// assert.NotContainsf(t, "Hello World", "Earth", "error message %s", "formatted")
-// assert.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted")
-// assert.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted")
-func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- return NotContains(t, s, contains, append([]interface{}{msg}, args...)...)
-}
-
-// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
-// a slice or a channel with len == 0.
-//
-// if assert.NotEmptyf(t, obj, "error message %s", "formatted") {
-// assert.Equal(t, "two", obj[1])
-// }
-func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- return NotEmpty(t, object, append([]interface{}{msg}, args...)...)
-}
-
-// NotEqualf asserts that the specified values are NOT equal.
-//
-// assert.NotEqualf(t, obj1, obj2, "error message %s", "formatted")
-//
-// Pointer variable equality is determined based on the equality of the
-// referenced values (as opposed to the memory addresses).
-func NotEqualf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- return NotEqual(t, expected, actual, append([]interface{}{msg}, args...)...)
-}
-
-// NotEqualValuesf asserts that two objects are not equal even when converted to the same type
-//
-// assert.NotEqualValuesf(t, obj1, obj2, "error message %s", "formatted")
-func NotEqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- return NotEqualValues(t, expected, actual, append([]interface{}{msg}, args...)...)
-}
-
-// NotErrorIsf asserts that at none of the errors in err's chain matches target.
-// This is a wrapper for errors.Is.
-func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- return NotErrorIs(t, err, target, append([]interface{}{msg}, args...)...)
-}
-
-// NotNilf asserts that the specified object is not nil.
-//
-// assert.NotNilf(t, err, "error message %s", "formatted")
-func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- return NotNil(t, object, append([]interface{}{msg}, args...)...)
-}
-
-// NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic.
-//
-// assert.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted")
-func NotPanicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- return NotPanics(t, f, append([]interface{}{msg}, args...)...)
-}
-
-// NotRegexpf asserts that a specified regexp does not match a string.
-//
-// assert.NotRegexpf(t, regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted")
-// assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted")
-func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- return NotRegexp(t, rx, str, append([]interface{}{msg}, args...)...)
-}
-
-// NotSamef asserts that two pointers do not reference the same object.
-//
-// assert.NotSamef(t, ptr1, ptr2, "error message %s", "formatted")
-//
-// Both arguments must be pointer variables. Pointer variable sameness is
-// determined based on the equality of both type and value.
-func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- return NotSame(t, expected, actual, append([]interface{}{msg}, args...)...)
-}
-
-// NotSubsetf asserts that the specified list(array, slice...) contains not all
-// elements given in the specified subset(array, slice...).
-//
-// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted")
-func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- return NotSubset(t, list, subset, append([]interface{}{msg}, args...)...)
-}
-
-// NotZerof asserts that i is not the zero value for its type.
-func NotZerof(t TestingT, i interface{}, msg string, args ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- return NotZero(t, i, append([]interface{}{msg}, args...)...)
-}
-
-// Panicsf asserts that the code inside the specified PanicTestFunc panics.
-//
-// assert.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted")
-func Panicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- return Panics(t, f, append([]interface{}{msg}, args...)...)
-}
-
-// PanicsWithErrorf asserts that the code inside the specified PanicTestFunc
-// panics, and that the recovered panic value is an error that satisfies the
-// EqualError comparison.
-//
-// assert.PanicsWithErrorf(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
-func PanicsWithErrorf(t TestingT, errString string, f PanicTestFunc, msg string, args ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- return PanicsWithError(t, errString, f, append([]interface{}{msg}, args...)...)
-}
-
-// PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that
-// the recovered panic value equals the expected panic value.
-//
-// assert.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
-func PanicsWithValuef(t TestingT, expected interface{}, f PanicTestFunc, msg string, args ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- return PanicsWithValue(t, expected, f, append([]interface{}{msg}, args...)...)
-}
-
-// Positivef asserts that the specified element is positive
-//
-// assert.Positivef(t, 1, "error message %s", "formatted")
-// assert.Positivef(t, 1.23, "error message %s", "formatted")
-func Positivef(t TestingT, e interface{}, msg string, args ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- return Positive(t, e, append([]interface{}{msg}, args...)...)
-}
-
-// Regexpf asserts that a specified regexp matches a string.
-//
-// assert.Regexpf(t, regexp.MustCompile("start"), "it's starting", "error message %s", "formatted")
-// assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted")
-func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- return Regexp(t, rx, str, append([]interface{}{msg}, args...)...)
-}
-
-// Samef asserts that two pointers reference the same object.
-//
-// assert.Samef(t, ptr1, ptr2, "error message %s", "formatted")
-//
-// Both arguments must be pointer variables. Pointer variable sameness is
-// determined based on the equality of both type and value.
-func Samef(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- return Same(t, expected, actual, append([]interface{}{msg}, args...)...)
-}
-
-// Subsetf asserts that the specified list(array, slice...) contains all
-// elements given in the specified subset(array, slice...).
-//
-// assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted")
-func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- return Subset(t, list, subset, append([]interface{}{msg}, args...)...)
-}
-
-// Truef asserts that the specified value is true.
-//
-// assert.Truef(t, myBool, "error message %s", "formatted")
-func Truef(t TestingT, value bool, msg string, args ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- return True(t, value, append([]interface{}{msg}, args...)...)
-}
-
-// WithinDurationf asserts that the two times are within duration delta of each other.
-//
-// assert.WithinDurationf(t, time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted")
-func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- return WithinDuration(t, expected, actual, delta, append([]interface{}{msg}, args...)...)
-}
-
-// YAMLEqf asserts that two YAML strings are equivalent.
-func YAMLEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- return YAMLEq(t, expected, actual, append([]interface{}{msg}, args...)...)
-}
-
-// Zerof asserts that i is the zero value for its type.
-func Zerof(t TestingT, i interface{}, msg string, args ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- return Zero(t, i, append([]interface{}{msg}, args...)...)
-}
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl b/vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl
deleted file mode 100644
index d2bb0b81..00000000
--- a/vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl
+++ /dev/null
@@ -1,5 +0,0 @@
-{{.CommentFormat}}
-func {{.DocInfo.Name}}f(t TestingT, {{.ParamsFormat}}) bool {
- if h, ok := t.(tHelper); ok { h.Helper() }
- return {{.DocInfo.Name}}(t, {{.ForwardedParamsFormat}})
-}
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go
deleted file mode 100644
index d9ea368d..00000000
--- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go
+++ /dev/null
@@ -1,1494 +0,0 @@
-/*
-* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen
-* THIS FILE MUST NOT BE EDITED BY HAND
- */
-
-package assert
-
-import (
- http "net/http"
- url "net/url"
- time "time"
-)
-
-// Condition uses a Comparison to assert a complex condition.
-func (a *Assertions) Condition(comp Comparison, msgAndArgs ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return Condition(a.t, comp, msgAndArgs...)
-}
-
-// Conditionf uses a Comparison to assert a complex condition.
-func (a *Assertions) Conditionf(comp Comparison, msg string, args ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return Conditionf(a.t, comp, msg, args...)
-}
-
-// Contains asserts that the specified string, list(array, slice...) or map contains the
-// specified substring or element.
-//
-// a.Contains("Hello World", "World")
-// a.Contains(["Hello", "World"], "World")
-// a.Contains({"Hello": "World"}, "Hello")
-func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return Contains(a.t, s, contains, msgAndArgs...)
-}
-
-// Containsf asserts that the specified string, list(array, slice...) or map contains the
-// specified substring or element.
-//
-// a.Containsf("Hello World", "World", "error message %s", "formatted")
-// a.Containsf(["Hello", "World"], "World", "error message %s", "formatted")
-// a.Containsf({"Hello": "World"}, "Hello", "error message %s", "formatted")
-func (a *Assertions) Containsf(s interface{}, contains interface{}, msg string, args ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return Containsf(a.t, s, contains, msg, args...)
-}
-
-// DirExists checks whether a directory exists in the given path. It also fails
-// if the path is a file rather a directory or there is an error checking whether it exists.
-func (a *Assertions) DirExists(path string, msgAndArgs ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return DirExists(a.t, path, msgAndArgs...)
-}
-
-// DirExistsf checks whether a directory exists in the given path. It also fails
-// if the path is a file rather a directory or there is an error checking whether it exists.
-func (a *Assertions) DirExistsf(path string, msg string, args ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return DirExistsf(a.t, path, msg, args...)
-}
-
-// ElementsMatch asserts that the specified listA(array, slice...) is equal to specified
-// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
-// the number of appearances of each of them in both lists should match.
-//
-// a.ElementsMatch([1, 3, 2, 3], [1, 3, 3, 2])
-func (a *Assertions) ElementsMatch(listA interface{}, listB interface{}, msgAndArgs ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return ElementsMatch(a.t, listA, listB, msgAndArgs...)
-}
-
-// ElementsMatchf asserts that the specified listA(array, slice...) is equal to specified
-// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
-// the number of appearances of each of them in both lists should match.
-//
-// a.ElementsMatchf([1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted")
-func (a *Assertions) ElementsMatchf(listA interface{}, listB interface{}, msg string, args ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return ElementsMatchf(a.t, listA, listB, msg, args...)
-}
-
-// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either
-// a slice or a channel with len == 0.
-//
-// a.Empty(obj)
-func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return Empty(a.t, object, msgAndArgs...)
-}
-
-// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either
-// a slice or a channel with len == 0.
-//
-// a.Emptyf(obj, "error message %s", "formatted")
-func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return Emptyf(a.t, object, msg, args...)
-}
-
-// Equal asserts that two objects are equal.
-//
-// a.Equal(123, 123)
-//
-// Pointer variable equality is determined based on the equality of the
-// referenced values (as opposed to the memory addresses). Function equality
-// cannot be determined and will always fail.
-func (a *Assertions) Equal(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return Equal(a.t, expected, actual, msgAndArgs...)
-}
-
-// EqualError asserts that a function returned an error (i.e. not `nil`)
-// and that it is equal to the provided error.
-//
-// actualObj, err := SomeFunction()
-// a.EqualError(err, expectedErrorString)
-func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return EqualError(a.t, theError, errString, msgAndArgs...)
-}
-
-// EqualErrorf asserts that a function returned an error (i.e. not `nil`)
-// and that it is equal to the provided error.
-//
-// actualObj, err := SomeFunction()
-// a.EqualErrorf(err, expectedErrorString, "error message %s", "formatted")
-func (a *Assertions) EqualErrorf(theError error, errString string, msg string, args ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return EqualErrorf(a.t, theError, errString, msg, args...)
-}
-
-// EqualValues asserts that two objects are equal or convertable to the same types
-// and equal.
-//
-// a.EqualValues(uint32(123), int32(123))
-func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return EqualValues(a.t, expected, actual, msgAndArgs...)
-}
-
-// EqualValuesf asserts that two objects are equal or convertable to the same types
-// and equal.
-//
-// a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted")
-func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return EqualValuesf(a.t, expected, actual, msg, args...)
-}
-
-// Equalf asserts that two objects are equal.
-//
-// a.Equalf(123, 123, "error message %s", "formatted")
-//
-// Pointer variable equality is determined based on the equality of the
-// referenced values (as opposed to the memory addresses). Function equality
-// cannot be determined and will always fail.
-func (a *Assertions) Equalf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return Equalf(a.t, expected, actual, msg, args...)
-}
-
-// Error asserts that a function returned an error (i.e. not `nil`).
-//
-// actualObj, err := SomeFunction()
-// if a.Error(err) {
-// assert.Equal(t, expectedError, err)
-// }
-func (a *Assertions) Error(err error, msgAndArgs ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return Error(a.t, err, msgAndArgs...)
-}
-
-// ErrorAs asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value.
-// This is a wrapper for errors.As.
-func (a *Assertions) ErrorAs(err error, target interface{}, msgAndArgs ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return ErrorAs(a.t, err, target, msgAndArgs...)
-}
-
-// ErrorAsf asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value.
-// This is a wrapper for errors.As.
-func (a *Assertions) ErrorAsf(err error, target interface{}, msg string, args ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return ErrorAsf(a.t, err, target, msg, args...)
-}
-
-// ErrorContains asserts that a function returned an error (i.e. not `nil`)
-// and that the error contains the specified substring.
-//
-// actualObj, err := SomeFunction()
-// a.ErrorContains(err, expectedErrorSubString)
-func (a *Assertions) ErrorContains(theError error, contains string, msgAndArgs ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return ErrorContains(a.t, theError, contains, msgAndArgs...)
-}
-
-// ErrorContainsf asserts that a function returned an error (i.e. not `nil`)
-// and that the error contains the specified substring.
-//
-// actualObj, err := SomeFunction()
-// a.ErrorContainsf(err, expectedErrorSubString, "error message %s", "formatted")
-func (a *Assertions) ErrorContainsf(theError error, contains string, msg string, args ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return ErrorContainsf(a.t, theError, contains, msg, args...)
-}
-
-// ErrorIs asserts that at least one of the errors in err's chain matches target.
-// This is a wrapper for errors.Is.
-func (a *Assertions) ErrorIs(err error, target error, msgAndArgs ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return ErrorIs(a.t, err, target, msgAndArgs...)
-}
-
-// ErrorIsf asserts that at least one of the errors in err's chain matches target.
-// This is a wrapper for errors.Is.
-func (a *Assertions) ErrorIsf(err error, target error, msg string, args ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return ErrorIsf(a.t, err, target, msg, args...)
-}
-
-// Errorf asserts that a function returned an error (i.e. not `nil`).
-//
-// actualObj, err := SomeFunction()
-// if a.Errorf(err, "error message %s", "formatted") {
-// assert.Equal(t, expectedErrorf, err)
-// }
-func (a *Assertions) Errorf(err error, msg string, args ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return Errorf(a.t, err, msg, args...)
-}
-
-// Eventually asserts that given condition will be met in waitFor time,
-// periodically checking target function each tick.
-//
-// a.Eventually(func() bool { return true; }, time.Second, 10*time.Millisecond)
-func (a *Assertions) Eventually(condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return Eventually(a.t, condition, waitFor, tick, msgAndArgs...)
-}
-
-// Eventuallyf asserts that given condition will be met in waitFor time,
-// periodically checking target function each tick.
-//
-// a.Eventuallyf(func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
-func (a *Assertions) Eventuallyf(condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return Eventuallyf(a.t, condition, waitFor, tick, msg, args...)
-}
-
-// Exactly asserts that two objects are equal in value and type.
-//
-// a.Exactly(int32(123), int64(123))
-func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return Exactly(a.t, expected, actual, msgAndArgs...)
-}
-
-// Exactlyf asserts that two objects are equal in value and type.
-//
-// a.Exactlyf(int32(123), int64(123), "error message %s", "formatted")
-func (a *Assertions) Exactlyf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return Exactlyf(a.t, expected, actual, msg, args...)
-}
-
-// Fail reports a failure through
-func (a *Assertions) Fail(failureMessage string, msgAndArgs ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return Fail(a.t, failureMessage, msgAndArgs...)
-}
-
-// FailNow fails test
-func (a *Assertions) FailNow(failureMessage string, msgAndArgs ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return FailNow(a.t, failureMessage, msgAndArgs...)
-}
-
-// FailNowf fails test
-func (a *Assertions) FailNowf(failureMessage string, msg string, args ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return FailNowf(a.t, failureMessage, msg, args...)
-}
-
-// Failf reports a failure through
-func (a *Assertions) Failf(failureMessage string, msg string, args ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return Failf(a.t, failureMessage, msg, args...)
-}
-
-// False asserts that the specified value is false.
-//
-// a.False(myBool)
-func (a *Assertions) False(value bool, msgAndArgs ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return False(a.t, value, msgAndArgs...)
-}
-
-// Falsef asserts that the specified value is false.
-//
-// a.Falsef(myBool, "error message %s", "formatted")
-func (a *Assertions) Falsef(value bool, msg string, args ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return Falsef(a.t, value, msg, args...)
-}
-
-// FileExists checks whether a file exists in the given path. It also fails if
-// the path points to a directory or there is an error when trying to check the file.
-func (a *Assertions) FileExists(path string, msgAndArgs ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return FileExists(a.t, path, msgAndArgs...)
-}
-
-// FileExistsf checks whether a file exists in the given path. It also fails if
-// the path points to a directory or there is an error when trying to check the file.
-func (a *Assertions) FileExistsf(path string, msg string, args ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return FileExistsf(a.t, path, msg, args...)
-}
-
-// Greater asserts that the first element is greater than the second
-//
-// a.Greater(2, 1)
-// a.Greater(float64(2), float64(1))
-// a.Greater("b", "a")
-func (a *Assertions) Greater(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return Greater(a.t, e1, e2, msgAndArgs...)
-}
-
-// GreaterOrEqual asserts that the first element is greater than or equal to the second
-//
-// a.GreaterOrEqual(2, 1)
-// a.GreaterOrEqual(2, 2)
-// a.GreaterOrEqual("b", "a")
-// a.GreaterOrEqual("b", "b")
-func (a *Assertions) GreaterOrEqual(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return GreaterOrEqual(a.t, e1, e2, msgAndArgs...)
-}
-
-// GreaterOrEqualf asserts that the first element is greater than or equal to the second
-//
-// a.GreaterOrEqualf(2, 1, "error message %s", "formatted")
-// a.GreaterOrEqualf(2, 2, "error message %s", "formatted")
-// a.GreaterOrEqualf("b", "a", "error message %s", "formatted")
-// a.GreaterOrEqualf("b", "b", "error message %s", "formatted")
-func (a *Assertions) GreaterOrEqualf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return GreaterOrEqualf(a.t, e1, e2, msg, args...)
-}
-
-// Greaterf asserts that the first element is greater than the second
-//
-// a.Greaterf(2, 1, "error message %s", "formatted")
-// a.Greaterf(float64(2), float64(1), "error message %s", "formatted")
-// a.Greaterf("b", "a", "error message %s", "formatted")
-func (a *Assertions) Greaterf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return Greaterf(a.t, e1, e2, msg, args...)
-}
-
-// HTTPBodyContains asserts that a specified handler returns a
-// body that contains a string.
-//
-// a.HTTPBodyContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
-//
-// Returns whether the assertion was successful (true) or not (false).
-func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return HTTPBodyContains(a.t, handler, method, url, values, str, msgAndArgs...)
-}
-
-// HTTPBodyContainsf asserts that a specified handler returns a
-// body that contains a string.
-//
-// a.HTTPBodyContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
-//
-// Returns whether the assertion was successful (true) or not (false).
-func (a *Assertions) HTTPBodyContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return HTTPBodyContainsf(a.t, handler, method, url, values, str, msg, args...)
-}
-
-// HTTPBodyNotContains asserts that a specified handler returns a
-// body that does not contain a string.
-//
-// a.HTTPBodyNotContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
-//
-// Returns whether the assertion was successful (true) or not (false).
-func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return HTTPBodyNotContains(a.t, handler, method, url, values, str, msgAndArgs...)
-}
-
-// HTTPBodyNotContainsf asserts that a specified handler returns a
-// body that does not contain a string.
-//
-// a.HTTPBodyNotContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
-//
-// Returns whether the assertion was successful (true) or not (false).
-func (a *Assertions) HTTPBodyNotContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return HTTPBodyNotContainsf(a.t, handler, method, url, values, str, msg, args...)
-}
-
-// HTTPError asserts that a specified handler returns an error status code.
-//
-// a.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
-//
-// Returns whether the assertion was successful (true) or not (false).
-func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return HTTPError(a.t, handler, method, url, values, msgAndArgs...)
-}
-
-// HTTPErrorf asserts that a specified handler returns an error status code.
-//
-// a.HTTPErrorf(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
-//
-// Returns whether the assertion was successful (true) or not (false).
-func (a *Assertions) HTTPErrorf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return HTTPErrorf(a.t, handler, method, url, values, msg, args...)
-}
-
-// HTTPRedirect asserts that a specified handler returns a redirect status code.
-//
-// a.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
-//
-// Returns whether the assertion was successful (true) or not (false).
-func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return HTTPRedirect(a.t, handler, method, url, values, msgAndArgs...)
-}
-
-// HTTPRedirectf asserts that a specified handler returns a redirect status code.
-//
-// a.HTTPRedirectf(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
-//
-// Returns whether the assertion was successful (true) or not (false).
-func (a *Assertions) HTTPRedirectf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return HTTPRedirectf(a.t, handler, method, url, values, msg, args...)
-}
-
-// HTTPStatusCode asserts that a specified handler returns a specified status code.
-//
-// a.HTTPStatusCode(myHandler, "GET", "/notImplemented", nil, 501)
-//
-// Returns whether the assertion was successful (true) or not (false).
-func (a *Assertions) HTTPStatusCode(handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msgAndArgs ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return HTTPStatusCode(a.t, handler, method, url, values, statuscode, msgAndArgs...)
-}
-
-// HTTPStatusCodef asserts that a specified handler returns a specified status code.
-//
-// a.HTTPStatusCodef(myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted")
-//
-// Returns whether the assertion was successful (true) or not (false).
-func (a *Assertions) HTTPStatusCodef(handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msg string, args ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return HTTPStatusCodef(a.t, handler, method, url, values, statuscode, msg, args...)
-}
-
-// HTTPSuccess asserts that a specified handler returns a success status code.
-//
-// a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil)
-//
-// Returns whether the assertion was successful (true) or not (false).
-func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return HTTPSuccess(a.t, handler, method, url, values, msgAndArgs...)
-}
-
-// HTTPSuccessf asserts that a specified handler returns a success status code.
-//
-// a.HTTPSuccessf(myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted")
-//
-// Returns whether the assertion was successful (true) or not (false).
-func (a *Assertions) HTTPSuccessf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return HTTPSuccessf(a.t, handler, method, url, values, msg, args...)
-}
-
-// Implements asserts that an object is implemented by the specified interface.
-//
-// a.Implements((*MyInterface)(nil), new(MyObject))
-func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return Implements(a.t, interfaceObject, object, msgAndArgs...)
-}
-
-// Implementsf asserts that an object is implemented by the specified interface.
-//
-// a.Implementsf((*MyInterface)(nil), new(MyObject), "error message %s", "formatted")
-func (a *Assertions) Implementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return Implementsf(a.t, interfaceObject, object, msg, args...)
-}
-
-// InDelta asserts that the two numerals are within delta of each other.
-//
-// a.InDelta(math.Pi, 22/7.0, 0.01)
-func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return InDelta(a.t, expected, actual, delta, msgAndArgs...)
-}
-
-// InDeltaMapValues is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys.
-func (a *Assertions) InDeltaMapValues(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return InDeltaMapValues(a.t, expected, actual, delta, msgAndArgs...)
-}
-
-// InDeltaMapValuesf is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys.
-func (a *Assertions) InDeltaMapValuesf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return InDeltaMapValuesf(a.t, expected, actual, delta, msg, args...)
-}
-
-// InDeltaSlice is the same as InDelta, except it compares two slices.
-func (a *Assertions) InDeltaSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return InDeltaSlice(a.t, expected, actual, delta, msgAndArgs...)
-}
-
-// InDeltaSlicef is the same as InDelta, except it compares two slices.
-func (a *Assertions) InDeltaSlicef(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return InDeltaSlicef(a.t, expected, actual, delta, msg, args...)
-}
-
-// InDeltaf asserts that the two numerals are within delta of each other.
-//
-// a.InDeltaf(math.Pi, 22/7.0, 0.01, "error message %s", "formatted")
-func (a *Assertions) InDeltaf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return InDeltaf(a.t, expected, actual, delta, msg, args...)
-}
-
-// InEpsilon asserts that expected and actual have a relative error less than epsilon
-func (a *Assertions) InEpsilon(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return InEpsilon(a.t, expected, actual, epsilon, msgAndArgs...)
-}
-
-// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices.
-func (a *Assertions) InEpsilonSlice(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return InEpsilonSlice(a.t, expected, actual, epsilon, msgAndArgs...)
-}
-
-// InEpsilonSlicef is the same as InEpsilon, except it compares each value from two slices.
-func (a *Assertions) InEpsilonSlicef(expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return InEpsilonSlicef(a.t, expected, actual, epsilon, msg, args...)
-}
-
-// InEpsilonf asserts that expected and actual have a relative error less than epsilon
-func (a *Assertions) InEpsilonf(expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return InEpsilonf(a.t, expected, actual, epsilon, msg, args...)
-}
-
-// IsDecreasing asserts that the collection is decreasing
-//
-// a.IsDecreasing([]int{2, 1, 0})
-// a.IsDecreasing([]float{2, 1})
-// a.IsDecreasing([]string{"b", "a"})
-func (a *Assertions) IsDecreasing(object interface{}, msgAndArgs ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return IsDecreasing(a.t, object, msgAndArgs...)
-}
-
-// IsDecreasingf asserts that the collection is decreasing
-//
-// a.IsDecreasingf([]int{2, 1, 0}, "error message %s", "formatted")
-// a.IsDecreasingf([]float{2, 1}, "error message %s", "formatted")
-// a.IsDecreasingf([]string{"b", "a"}, "error message %s", "formatted")
-func (a *Assertions) IsDecreasingf(object interface{}, msg string, args ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return IsDecreasingf(a.t, object, msg, args...)
-}
-
-// IsIncreasing asserts that the collection is increasing
-//
-// a.IsIncreasing([]int{1, 2, 3})
-// a.IsIncreasing([]float{1, 2})
-// a.IsIncreasing([]string{"a", "b"})
-func (a *Assertions) IsIncreasing(object interface{}, msgAndArgs ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return IsIncreasing(a.t, object, msgAndArgs...)
-}
-
-// IsIncreasingf asserts that the collection is increasing
-//
-// a.IsIncreasingf([]int{1, 2, 3}, "error message %s", "formatted")
-// a.IsIncreasingf([]float{1, 2}, "error message %s", "formatted")
-// a.IsIncreasingf([]string{"a", "b"}, "error message %s", "formatted")
-func (a *Assertions) IsIncreasingf(object interface{}, msg string, args ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return IsIncreasingf(a.t, object, msg, args...)
-}
-
-// IsNonDecreasing asserts that the collection is not decreasing
-//
-// a.IsNonDecreasing([]int{1, 1, 2})
-// a.IsNonDecreasing([]float{1, 2})
-// a.IsNonDecreasing([]string{"a", "b"})
-func (a *Assertions) IsNonDecreasing(object interface{}, msgAndArgs ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return IsNonDecreasing(a.t, object, msgAndArgs...)
-}
-
-// IsNonDecreasingf asserts that the collection is not decreasing
-//
-// a.IsNonDecreasingf([]int{1, 1, 2}, "error message %s", "formatted")
-// a.IsNonDecreasingf([]float{1, 2}, "error message %s", "formatted")
-// a.IsNonDecreasingf([]string{"a", "b"}, "error message %s", "formatted")
-func (a *Assertions) IsNonDecreasingf(object interface{}, msg string, args ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return IsNonDecreasingf(a.t, object, msg, args...)
-}
-
-// IsNonIncreasing asserts that the collection is not increasing
-//
-// a.IsNonIncreasing([]int{2, 1, 1})
-// a.IsNonIncreasing([]float{2, 1})
-// a.IsNonIncreasing([]string{"b", "a"})
-func (a *Assertions) IsNonIncreasing(object interface{}, msgAndArgs ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return IsNonIncreasing(a.t, object, msgAndArgs...)
-}
-
-// IsNonIncreasingf asserts that the collection is not increasing
-//
-// a.IsNonIncreasingf([]int{2, 1, 1}, "error message %s", "formatted")
-// a.IsNonIncreasingf([]float{2, 1}, "error message %s", "formatted")
-// a.IsNonIncreasingf([]string{"b", "a"}, "error message %s", "formatted")
-func (a *Assertions) IsNonIncreasingf(object interface{}, msg string, args ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return IsNonIncreasingf(a.t, object, msg, args...)
-}
-
-// IsType asserts that the specified objects are of the same type.
-func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return IsType(a.t, expectedType, object, msgAndArgs...)
-}
-
-// IsTypef asserts that the specified objects are of the same type.
-func (a *Assertions) IsTypef(expectedType interface{}, object interface{}, msg string, args ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return IsTypef(a.t, expectedType, object, msg, args...)
-}
-
-// JSONEq asserts that two JSON strings are equivalent.
-//
-// a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`)
-func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return JSONEq(a.t, expected, actual, msgAndArgs...)
-}
-
-// JSONEqf asserts that two JSON strings are equivalent.
-//
-// a.JSONEqf(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted")
-func (a *Assertions) JSONEqf(expected string, actual string, msg string, args ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return JSONEqf(a.t, expected, actual, msg, args...)
-}
-
-// Len asserts that the specified object has specific length.
-// Len also fails if the object has a type that len() not accept.
-//
-// a.Len(mySlice, 3)
-func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return Len(a.t, object, length, msgAndArgs...)
-}
-
-// Lenf asserts that the specified object has specific length.
-// Lenf also fails if the object has a type that len() not accept.
-//
-// a.Lenf(mySlice, 3, "error message %s", "formatted")
-func (a *Assertions) Lenf(object interface{}, length int, msg string, args ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return Lenf(a.t, object, length, msg, args...)
-}
-
-// Less asserts that the first element is less than the second
-//
-// a.Less(1, 2)
-// a.Less(float64(1), float64(2))
-// a.Less("a", "b")
-func (a *Assertions) Less(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return Less(a.t, e1, e2, msgAndArgs...)
-}
-
-// LessOrEqual asserts that the first element is less than or equal to the second
-//
-// a.LessOrEqual(1, 2)
-// a.LessOrEqual(2, 2)
-// a.LessOrEqual("a", "b")
-// a.LessOrEqual("b", "b")
-func (a *Assertions) LessOrEqual(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return LessOrEqual(a.t, e1, e2, msgAndArgs...)
-}
-
-// LessOrEqualf asserts that the first element is less than or equal to the second
-//
-// a.LessOrEqualf(1, 2, "error message %s", "formatted")
-// a.LessOrEqualf(2, 2, "error message %s", "formatted")
-// a.LessOrEqualf("a", "b", "error message %s", "formatted")
-// a.LessOrEqualf("b", "b", "error message %s", "formatted")
-func (a *Assertions) LessOrEqualf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return LessOrEqualf(a.t, e1, e2, msg, args...)
-}
-
-// Lessf asserts that the first element is less than the second
-//
-// a.Lessf(1, 2, "error message %s", "formatted")
-// a.Lessf(float64(1), float64(2), "error message %s", "formatted")
-// a.Lessf("a", "b", "error message %s", "formatted")
-func (a *Assertions) Lessf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return Lessf(a.t, e1, e2, msg, args...)
-}
-
-// Negative asserts that the specified element is negative
-//
-// a.Negative(-1)
-// a.Negative(-1.23)
-func (a *Assertions) Negative(e interface{}, msgAndArgs ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return Negative(a.t, e, msgAndArgs...)
-}
-
-// Negativef asserts that the specified element is negative
-//
-// a.Negativef(-1, "error message %s", "formatted")
-// a.Negativef(-1.23, "error message %s", "formatted")
-func (a *Assertions) Negativef(e interface{}, msg string, args ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return Negativef(a.t, e, msg, args...)
-}
-
-// Never asserts that the given condition doesn't satisfy in waitFor time,
-// periodically checking the target function each tick.
-//
-// a.Never(func() bool { return false; }, time.Second, 10*time.Millisecond)
-func (a *Assertions) Never(condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return Never(a.t, condition, waitFor, tick, msgAndArgs...)
-}
-
-// Neverf asserts that the given condition doesn't satisfy in waitFor time,
-// periodically checking the target function each tick.
-//
-// a.Neverf(func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
-func (a *Assertions) Neverf(condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return Neverf(a.t, condition, waitFor, tick, msg, args...)
-}
-
-// Nil asserts that the specified object is nil.
-//
-// a.Nil(err)
-func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return Nil(a.t, object, msgAndArgs...)
-}
-
-// Nilf asserts that the specified object is nil.
-//
-// a.Nilf(err, "error message %s", "formatted")
-func (a *Assertions) Nilf(object interface{}, msg string, args ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return Nilf(a.t, object, msg, args...)
-}
-
-// NoDirExists checks whether a directory does not exist in the given path.
-// It fails if the path points to an existing _directory_ only.
-func (a *Assertions) NoDirExists(path string, msgAndArgs ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return NoDirExists(a.t, path, msgAndArgs...)
-}
-
-// NoDirExistsf checks whether a directory does not exist in the given path.
-// It fails if the path points to an existing _directory_ only.
-func (a *Assertions) NoDirExistsf(path string, msg string, args ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return NoDirExistsf(a.t, path, msg, args...)
-}
-
-// NoError asserts that a function returned no error (i.e. `nil`).
-//
-// actualObj, err := SomeFunction()
-// if a.NoError(err) {
-// assert.Equal(t, expectedObj, actualObj)
-// }
-func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return NoError(a.t, err, msgAndArgs...)
-}
-
-// NoErrorf asserts that a function returned no error (i.e. `nil`).
-//
-// actualObj, err := SomeFunction()
-// if a.NoErrorf(err, "error message %s", "formatted") {
-// assert.Equal(t, expectedObj, actualObj)
-// }
-func (a *Assertions) NoErrorf(err error, msg string, args ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return NoErrorf(a.t, err, msg, args...)
-}
-
-// NoFileExists checks whether a file does not exist in a given path. It fails
-// if the path points to an existing _file_ only.
-func (a *Assertions) NoFileExists(path string, msgAndArgs ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return NoFileExists(a.t, path, msgAndArgs...)
-}
-
-// NoFileExistsf checks whether a file does not exist in a given path. It fails
-// if the path points to an existing _file_ only.
-func (a *Assertions) NoFileExistsf(path string, msg string, args ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return NoFileExistsf(a.t, path, msg, args...)
-}
-
-// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the
-// specified substring or element.
-//
-// a.NotContains("Hello World", "Earth")
-// a.NotContains(["Hello", "World"], "Earth")
-// a.NotContains({"Hello": "World"}, "Earth")
-func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return NotContains(a.t, s, contains, msgAndArgs...)
-}
-
-// NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the
-// specified substring or element.
-//
-// a.NotContainsf("Hello World", "Earth", "error message %s", "formatted")
-// a.NotContainsf(["Hello", "World"], "Earth", "error message %s", "formatted")
-// a.NotContainsf({"Hello": "World"}, "Earth", "error message %s", "formatted")
-func (a *Assertions) NotContainsf(s interface{}, contains interface{}, msg string, args ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return NotContainsf(a.t, s, contains, msg, args...)
-}
-
-// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
-// a slice or a channel with len == 0.
-//
-// if a.NotEmpty(obj) {
-// assert.Equal(t, "two", obj[1])
-// }
-func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return NotEmpty(a.t, object, msgAndArgs...)
-}
-
-// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
-// a slice or a channel with len == 0.
-//
-// if a.NotEmptyf(obj, "error message %s", "formatted") {
-// assert.Equal(t, "two", obj[1])
-// }
-func (a *Assertions) NotEmptyf(object interface{}, msg string, args ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return NotEmptyf(a.t, object, msg, args...)
-}
-
-// NotEqual asserts that the specified values are NOT equal.
-//
-// a.NotEqual(obj1, obj2)
-//
-// Pointer variable equality is determined based on the equality of the
-// referenced values (as opposed to the memory addresses).
-func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return NotEqual(a.t, expected, actual, msgAndArgs...)
-}
-
-// NotEqualValues asserts that two objects are not equal even when converted to the same type
-//
-// a.NotEqualValues(obj1, obj2)
-func (a *Assertions) NotEqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return NotEqualValues(a.t, expected, actual, msgAndArgs...)
-}
-
-// NotEqualValuesf asserts that two objects are not equal even when converted to the same type
-//
-// a.NotEqualValuesf(obj1, obj2, "error message %s", "formatted")
-func (a *Assertions) NotEqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return NotEqualValuesf(a.t, expected, actual, msg, args...)
-}
-
-// NotEqualf asserts that the specified values are NOT equal.
-//
-// a.NotEqualf(obj1, obj2, "error message %s", "formatted")
-//
-// Pointer variable equality is determined based on the equality of the
-// referenced values (as opposed to the memory addresses).
-func (a *Assertions) NotEqualf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return NotEqualf(a.t, expected, actual, msg, args...)
-}
-
-// NotErrorIs asserts that at none of the errors in err's chain matches target.
-// This is a wrapper for errors.Is.
-func (a *Assertions) NotErrorIs(err error, target error, msgAndArgs ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return NotErrorIs(a.t, err, target, msgAndArgs...)
-}
-
-// NotErrorIsf asserts that at none of the errors in err's chain matches target.
-// This is a wrapper for errors.Is.
-func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return NotErrorIsf(a.t, err, target, msg, args...)
-}
-
-// NotNil asserts that the specified object is not nil.
-//
-// a.NotNil(err)
-func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return NotNil(a.t, object, msgAndArgs...)
-}
-
-// NotNilf asserts that the specified object is not nil.
-//
-// a.NotNilf(err, "error message %s", "formatted")
-func (a *Assertions) NotNilf(object interface{}, msg string, args ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return NotNilf(a.t, object, msg, args...)
-}
-
-// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic.
-//
-// a.NotPanics(func(){ RemainCalm() })
-func (a *Assertions) NotPanics(f PanicTestFunc, msgAndArgs ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return NotPanics(a.t, f, msgAndArgs...)
-}
-
-// NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic.
-//
-// a.NotPanicsf(func(){ RemainCalm() }, "error message %s", "formatted")
-func (a *Assertions) NotPanicsf(f PanicTestFunc, msg string, args ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return NotPanicsf(a.t, f, msg, args...)
-}
-
-// NotRegexp asserts that a specified regexp does not match a string.
-//
-// a.NotRegexp(regexp.MustCompile("starts"), "it's starting")
-// a.NotRegexp("^start", "it's not starting")
-func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return NotRegexp(a.t, rx, str, msgAndArgs...)
-}
-
-// NotRegexpf asserts that a specified regexp does not match a string.
-//
-// a.NotRegexpf(regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted")
-// a.NotRegexpf("^start", "it's not starting", "error message %s", "formatted")
-func (a *Assertions) NotRegexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return NotRegexpf(a.t, rx, str, msg, args...)
-}
-
-// NotSame asserts that two pointers do not reference the same object.
-//
-// a.NotSame(ptr1, ptr2)
-//
-// Both arguments must be pointer variables. Pointer variable sameness is
-// determined based on the equality of both type and value.
-func (a *Assertions) NotSame(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return NotSame(a.t, expected, actual, msgAndArgs...)
-}
-
-// NotSamef asserts that two pointers do not reference the same object.
-//
-// a.NotSamef(ptr1, ptr2, "error message %s", "formatted")
-//
-// Both arguments must be pointer variables. Pointer variable sameness is
-// determined based on the equality of both type and value.
-func (a *Assertions) NotSamef(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return NotSamef(a.t, expected, actual, msg, args...)
-}
-
-// NotSubset asserts that the specified list(array, slice...) contains not all
-// elements given in the specified subset(array, slice...).
-//
-// a.NotSubset([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]")
-func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return NotSubset(a.t, list, subset, msgAndArgs...)
-}
-
-// NotSubsetf asserts that the specified list(array, slice...) contains not all
-// elements given in the specified subset(array, slice...).
-//
-// a.NotSubsetf([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted")
-func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return NotSubsetf(a.t, list, subset, msg, args...)
-}
-
-// NotZero asserts that i is not the zero value for its type.
-func (a *Assertions) NotZero(i interface{}, msgAndArgs ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return NotZero(a.t, i, msgAndArgs...)
-}
-
-// NotZerof asserts that i is not the zero value for its type.
-func (a *Assertions) NotZerof(i interface{}, msg string, args ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return NotZerof(a.t, i, msg, args...)
-}
-
-// Panics asserts that the code inside the specified PanicTestFunc panics.
-//
-// a.Panics(func(){ GoCrazy() })
-func (a *Assertions) Panics(f PanicTestFunc, msgAndArgs ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return Panics(a.t, f, msgAndArgs...)
-}
-
-// PanicsWithError asserts that the code inside the specified PanicTestFunc
-// panics, and that the recovered panic value is an error that satisfies the
-// EqualError comparison.
-//
-// a.PanicsWithError("crazy error", func(){ GoCrazy() })
-func (a *Assertions) PanicsWithError(errString string, f PanicTestFunc, msgAndArgs ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return PanicsWithError(a.t, errString, f, msgAndArgs...)
-}
-
-// PanicsWithErrorf asserts that the code inside the specified PanicTestFunc
-// panics, and that the recovered panic value is an error that satisfies the
-// EqualError comparison.
-//
-// a.PanicsWithErrorf("crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
-func (a *Assertions) PanicsWithErrorf(errString string, f PanicTestFunc, msg string, args ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return PanicsWithErrorf(a.t, errString, f, msg, args...)
-}
-
-// PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that
-// the recovered panic value equals the expected panic value.
-//
-// a.PanicsWithValue("crazy error", func(){ GoCrazy() })
-func (a *Assertions) PanicsWithValue(expected interface{}, f PanicTestFunc, msgAndArgs ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return PanicsWithValue(a.t, expected, f, msgAndArgs...)
-}
-
-// PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that
-// the recovered panic value equals the expected panic value.
-//
-// a.PanicsWithValuef("crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
-func (a *Assertions) PanicsWithValuef(expected interface{}, f PanicTestFunc, msg string, args ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return PanicsWithValuef(a.t, expected, f, msg, args...)
-}
-
-// Panicsf asserts that the code inside the specified PanicTestFunc panics.
-//
-// a.Panicsf(func(){ GoCrazy() }, "error message %s", "formatted")
-func (a *Assertions) Panicsf(f PanicTestFunc, msg string, args ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return Panicsf(a.t, f, msg, args...)
-}
-
-// Positive asserts that the specified element is positive
-//
-// a.Positive(1)
-// a.Positive(1.23)
-func (a *Assertions) Positive(e interface{}, msgAndArgs ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return Positive(a.t, e, msgAndArgs...)
-}
-
-// Positivef asserts that the specified element is positive
-//
-// a.Positivef(1, "error message %s", "formatted")
-// a.Positivef(1.23, "error message %s", "formatted")
-func (a *Assertions) Positivef(e interface{}, msg string, args ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return Positivef(a.t, e, msg, args...)
-}
-
-// Regexp asserts that a specified regexp matches a string.
-//
-// a.Regexp(regexp.MustCompile("start"), "it's starting")
-// a.Regexp("start...$", "it's not starting")
-func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return Regexp(a.t, rx, str, msgAndArgs...)
-}
-
-// Regexpf asserts that a specified regexp matches a string.
-//
-// a.Regexpf(regexp.MustCompile("start"), "it's starting", "error message %s", "formatted")
-// a.Regexpf("start...$", "it's not starting", "error message %s", "formatted")
-func (a *Assertions) Regexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return Regexpf(a.t, rx, str, msg, args...)
-}
-
-// Same asserts that two pointers reference the same object.
-//
-// a.Same(ptr1, ptr2)
-//
-// Both arguments must be pointer variables. Pointer variable sameness is
-// determined based on the equality of both type and value.
-func (a *Assertions) Same(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return Same(a.t, expected, actual, msgAndArgs...)
-}
-
-// Samef asserts that two pointers reference the same object.
-//
-// a.Samef(ptr1, ptr2, "error message %s", "formatted")
-//
-// Both arguments must be pointer variables. Pointer variable sameness is
-// determined based on the equality of both type and value.
-func (a *Assertions) Samef(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return Samef(a.t, expected, actual, msg, args...)
-}
-
-// Subset asserts that the specified list(array, slice...) contains all
-// elements given in the specified subset(array, slice...).
-//
-// a.Subset([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]")
-func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return Subset(a.t, list, subset, msgAndArgs...)
-}
-
-// Subsetf asserts that the specified list(array, slice...) contains all
-// elements given in the specified subset(array, slice...).
-//
-// a.Subsetf([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted")
-func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return Subsetf(a.t, list, subset, msg, args...)
-}
-
-// True asserts that the specified value is true.
-//
-// a.True(myBool)
-func (a *Assertions) True(value bool, msgAndArgs ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return True(a.t, value, msgAndArgs...)
-}
-
-// Truef asserts that the specified value is true.
-//
-// a.Truef(myBool, "error message %s", "formatted")
-func (a *Assertions) Truef(value bool, msg string, args ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return Truef(a.t, value, msg, args...)
-}
-
-// WithinDuration asserts that the two times are within duration delta of each other.
-//
-// a.WithinDuration(time.Now(), time.Now(), 10*time.Second)
-func (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return WithinDuration(a.t, expected, actual, delta, msgAndArgs...)
-}
-
-// WithinDurationf asserts that the two times are within duration delta of each other.
-//
-// a.WithinDurationf(time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted")
-func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return WithinDurationf(a.t, expected, actual, delta, msg, args...)
-}
-
-// YAMLEq asserts that two YAML strings are equivalent.
-func (a *Assertions) YAMLEq(expected string, actual string, msgAndArgs ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return YAMLEq(a.t, expected, actual, msgAndArgs...)
-}
-
-// YAMLEqf asserts that two YAML strings are equivalent.
-func (a *Assertions) YAMLEqf(expected string, actual string, msg string, args ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return YAMLEqf(a.t, expected, actual, msg, args...)
-}
-
-// Zero asserts that i is the zero value for its type.
-func (a *Assertions) Zero(i interface{}, msgAndArgs ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return Zero(a.t, i, msgAndArgs...)
-}
-
-// Zerof asserts that i is the zero value for its type.
-func (a *Assertions) Zerof(i interface{}, msg string, args ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return Zerof(a.t, i, msg, args...)
-}
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl b/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl
deleted file mode 100644
index 188bb9e1..00000000
--- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl
+++ /dev/null
@@ -1,5 +0,0 @@
-{{.CommentWithoutT "a"}}
-func (a *Assertions) {{.DocInfo.Name}}({{.Params}}) bool {
- if h, ok := a.t.(tHelper); ok { h.Helper() }
- return {{.DocInfo.Name}}(a.t, {{.ForwardedParams}})
-}
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_order.go b/vendor/github.com/stretchr/testify/assert/assertion_order.go
deleted file mode 100644
index 75944878..00000000
--- a/vendor/github.com/stretchr/testify/assert/assertion_order.go
+++ /dev/null
@@ -1,81 +0,0 @@
-package assert
-
-import (
- "fmt"
- "reflect"
-)
-
-// isOrdered checks that collection contains orderable elements.
-func isOrdered(t TestingT, object interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool {
- objKind := reflect.TypeOf(object).Kind()
- if objKind != reflect.Slice && objKind != reflect.Array {
- return false
- }
-
- objValue := reflect.ValueOf(object)
- objLen := objValue.Len()
-
- if objLen <= 1 {
- return true
- }
-
- value := objValue.Index(0)
- valueInterface := value.Interface()
- firstValueKind := value.Kind()
-
- for i := 1; i < objLen; i++ {
- prevValue := value
- prevValueInterface := valueInterface
-
- value = objValue.Index(i)
- valueInterface = value.Interface()
-
- compareResult, isComparable := compare(prevValueInterface, valueInterface, firstValueKind)
-
- if !isComparable {
- return Fail(t, fmt.Sprintf("Can not compare type \"%s\" and \"%s\"", reflect.TypeOf(value), reflect.TypeOf(prevValue)), msgAndArgs...)
- }
-
- if !containsValue(allowedComparesResults, compareResult) {
- return Fail(t, fmt.Sprintf(failMessage, prevValue, value), msgAndArgs...)
- }
- }
-
- return true
-}
-
-// IsIncreasing asserts that the collection is increasing
-//
-// assert.IsIncreasing(t, []int{1, 2, 3})
-// assert.IsIncreasing(t, []float{1, 2})
-// assert.IsIncreasing(t, []string{"a", "b"})
-func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
- return isOrdered(t, object, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...)
-}
-
-// IsNonIncreasing asserts that the collection is not increasing
-//
-// assert.IsNonIncreasing(t, []int{2, 1, 1})
-// assert.IsNonIncreasing(t, []float{2, 1})
-// assert.IsNonIncreasing(t, []string{"b", "a"})
-func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
- return isOrdered(t, object, []CompareType{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...)
-}
-
-// IsDecreasing asserts that the collection is decreasing
-//
-// assert.IsDecreasing(t, []int{2, 1, 0})
-// assert.IsDecreasing(t, []float{2, 1})
-// assert.IsDecreasing(t, []string{"b", "a"})
-func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
- return isOrdered(t, object, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...)
-}
-
-// IsNonDecreasing asserts that the collection is not decreasing
-//
-// assert.IsNonDecreasing(t, []int{1, 1, 2})
-// assert.IsNonDecreasing(t, []float{1, 2})
-// assert.IsNonDecreasing(t, []string{"a", "b"})
-func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
- return isOrdered(t, object, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...)
-}
diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go
deleted file mode 100644
index 0357b223..00000000
--- a/vendor/github.com/stretchr/testify/assert/assertions.go
+++ /dev/null
@@ -1,1810 +0,0 @@
-package assert
-
-import (
- "bufio"
- "bytes"
- "encoding/json"
- "errors"
- "fmt"
- "math"
- "os"
- "reflect"
- "regexp"
- "runtime"
- "runtime/debug"
- "strings"
- "time"
- "unicode"
- "unicode/utf8"
-
- "github.com/davecgh/go-spew/spew"
- "github.com/pmezard/go-difflib/difflib"
- yaml "gopkg.in/yaml.v3"
-)
-
-//go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=assert -template=assertion_format.go.tmpl"
-
-// TestingT is an interface wrapper around *testing.T
-type TestingT interface {
- Errorf(format string, args ...interface{})
-}
-
-// ComparisonAssertionFunc is a common function prototype when comparing two values. Can be useful
-// for table driven tests.
-type ComparisonAssertionFunc func(TestingT, interface{}, interface{}, ...interface{}) bool
-
-// ValueAssertionFunc is a common function prototype when validating a single value. Can be useful
-// for table driven tests.
-type ValueAssertionFunc func(TestingT, interface{}, ...interface{}) bool
-
-// BoolAssertionFunc is a common function prototype when validating a bool value. Can be useful
-// for table driven tests.
-type BoolAssertionFunc func(TestingT, bool, ...interface{}) bool
-
-// ErrorAssertionFunc is a common function prototype when validating an error value. Can be useful
-// for table driven tests.
-type ErrorAssertionFunc func(TestingT, error, ...interface{}) bool
-
-// Comparison is a custom function that returns true on success and false on failure
-type Comparison func() (success bool)
-
-/*
- Helper functions
-*/
-
-// ObjectsAreEqual determines if two objects are considered equal.
-//
-// This function does no assertion of any kind.
-func ObjectsAreEqual(expected, actual interface{}) bool {
- if expected == nil || actual == nil {
- return expected == actual
- }
-
- exp, ok := expected.([]byte)
- if !ok {
- return reflect.DeepEqual(expected, actual)
- }
-
- act, ok := actual.([]byte)
- if !ok {
- return false
- }
- if exp == nil || act == nil {
- return exp == nil && act == nil
- }
- return bytes.Equal(exp, act)
-}
-
-// ObjectsAreEqualValues gets whether two objects are equal, or if their
-// values are equal.
-func ObjectsAreEqualValues(expected, actual interface{}) bool {
- if ObjectsAreEqual(expected, actual) {
- return true
- }
-
- actualType := reflect.TypeOf(actual)
- if actualType == nil {
- return false
- }
- expectedValue := reflect.ValueOf(expected)
- if expectedValue.IsValid() && expectedValue.Type().ConvertibleTo(actualType) {
- // Attempt comparison after type conversion
- return reflect.DeepEqual(expectedValue.Convert(actualType).Interface(), actual)
- }
-
- return false
-}
-
-/* CallerInfo is necessary because the assert functions use the testing object
-internally, causing it to print the file:line of the assert method, rather than where
-the problem actually occurred in calling code.*/
-
-// CallerInfo returns an array of strings containing the file and line number
-// of each stack frame leading from the current test to the assert call that
-// failed.
-func CallerInfo() []string {
-
- var pc uintptr
- var ok bool
- var file string
- var line int
- var name string
-
- callers := []string{}
- for i := 0; ; i++ {
- pc, file, line, ok = runtime.Caller(i)
- if !ok {
- // The breaks below failed to terminate the loop, and we ran off the
- // end of the call stack.
- break
- }
-
- // This is a huge edge case, but it will panic if this is the case, see #180
- if file == "" {
- break
- }
-
- f := runtime.FuncForPC(pc)
- if f == nil {
- break
- }
- name = f.Name()
-
- // testing.tRunner is the standard library function that calls
- // tests. Subtests are called directly by tRunner, without going through
- // the Test/Benchmark/Example function that contains the t.Run calls, so
- // with subtests we should break when we hit tRunner, without adding it
- // to the list of callers.
- if name == "testing.tRunner" {
- break
- }
-
- parts := strings.Split(file, "/")
- file = parts[len(parts)-1]
- if len(parts) > 1 {
- dir := parts[len(parts)-2]
- if (dir != "assert" && dir != "mock" && dir != "require") || file == "mock_test.go" {
- callers = append(callers, fmt.Sprintf("%s:%d", file, line))
- }
- }
-
- // Drop the package
- segments := strings.Split(name, ".")
- name = segments[len(segments)-1]
- if isTest(name, "Test") ||
- isTest(name, "Benchmark") ||
- isTest(name, "Example") {
- break
- }
- }
-
- return callers
-}
-
-// Stolen from the `go test` tool.
-// isTest tells whether name looks like a test (or benchmark, according to prefix).
-// It is a Test (say) if there is a character after Test that is not a lower-case letter.
-// We don't want TesticularCancer.
-func isTest(name, prefix string) bool {
- if !strings.HasPrefix(name, prefix) {
- return false
- }
- if len(name) == len(prefix) { // "Test" is ok
- return true
- }
- r, _ := utf8.DecodeRuneInString(name[len(prefix):])
- return !unicode.IsLower(r)
-}
-
-func messageFromMsgAndArgs(msgAndArgs ...interface{}) string {
- if len(msgAndArgs) == 0 || msgAndArgs == nil {
- return ""
- }
- if len(msgAndArgs) == 1 {
- msg := msgAndArgs[0]
- if msgAsStr, ok := msg.(string); ok {
- return msgAsStr
- }
- return fmt.Sprintf("%+v", msg)
- }
- if len(msgAndArgs) > 1 {
- return fmt.Sprintf(msgAndArgs[0].(string), msgAndArgs[1:]...)
- }
- return ""
-}
-
-// Aligns the provided message so that all lines after the first line start at the same location as the first line.
-// Assumes that the first line starts at the correct location (after carriage return, tab, label, spacer and tab).
-// The longestLabelLen parameter specifies the length of the longest label in the output (required becaues this is the
-// basis on which the alignment occurs).
-func indentMessageLines(message string, longestLabelLen int) string {
- outBuf := new(bytes.Buffer)
-
- for i, scanner := 0, bufio.NewScanner(strings.NewReader(message)); scanner.Scan(); i++ {
- // no need to align first line because it starts at the correct location (after the label)
- if i != 0 {
- // append alignLen+1 spaces to align with "{{longestLabel}}:" before adding tab
- outBuf.WriteString("\n\t" + strings.Repeat(" ", longestLabelLen+1) + "\t")
- }
- outBuf.WriteString(scanner.Text())
- }
-
- return outBuf.String()
-}
-
-type failNower interface {
- FailNow()
-}
-
-// FailNow fails test
-func FailNow(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- Fail(t, failureMessage, msgAndArgs...)
-
- // We cannot extend TestingT with FailNow() and
- // maintain backwards compatibility, so we fallback
- // to panicking when FailNow is not available in
- // TestingT.
- // See issue #263
-
- if t, ok := t.(failNower); ok {
- t.FailNow()
- } else {
- panic("test failed and t is missing `FailNow()`")
- }
- return false
-}
-
-// Fail reports a failure through
-func Fail(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- content := []labeledContent{
- {"Error Trace", strings.Join(CallerInfo(), "\n\t\t\t")},
- {"Error", failureMessage},
- }
-
- // Add test name if the Go version supports it
- if n, ok := t.(interface {
- Name() string
- }); ok {
- content = append(content, labeledContent{"Test", n.Name()})
- }
-
- message := messageFromMsgAndArgs(msgAndArgs...)
- if len(message) > 0 {
- content = append(content, labeledContent{"Messages", message})
- }
-
- t.Errorf("\n%s", ""+labeledOutput(content...))
-
- return false
-}
-
-type labeledContent struct {
- label string
- content string
-}
-
-// labeledOutput returns a string consisting of the provided labeledContent. Each labeled output is appended in the following manner:
-//
-// \t{{label}}:{{align_spaces}}\t{{content}}\n
-//
-// The initial carriage return is required to undo/erase any padding added by testing.T.Errorf. The "\t{{label}}:" is for the label.
-// If a label is shorter than the longest label provided, padding spaces are added to make all the labels match in length. Once this
-// alignment is achieved, "\t{{content}}\n" is added for the output.
-//
-// If the content of the labeledOutput contains line breaks, the subsequent lines are aligned so that they start at the same location as the first line.
-func labeledOutput(content ...labeledContent) string {
- longestLabel := 0
- for _, v := range content {
- if len(v.label) > longestLabel {
- longestLabel = len(v.label)
- }
- }
- var output string
- for _, v := range content {
- output += "\t" + v.label + ":" + strings.Repeat(" ", longestLabel-len(v.label)) + "\t" + indentMessageLines(v.content, longestLabel) + "\n"
- }
- return output
-}
-
-// Implements asserts that an object is implemented by the specified interface.
-//
-// assert.Implements(t, (*MyInterface)(nil), new(MyObject))
-func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- interfaceType := reflect.TypeOf(interfaceObject).Elem()
-
- if object == nil {
- return Fail(t, fmt.Sprintf("Cannot check if nil implements %v", interfaceType), msgAndArgs...)
- }
- if !reflect.TypeOf(object).Implements(interfaceType) {
- return Fail(t, fmt.Sprintf("%T must implement %v", object, interfaceType), msgAndArgs...)
- }
-
- return true
-}
-
-// IsType asserts that the specified objects are of the same type.
-func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
-
- if !ObjectsAreEqual(reflect.TypeOf(object), reflect.TypeOf(expectedType)) {
- return Fail(t, fmt.Sprintf("Object expected to be of type %v, but was %v", reflect.TypeOf(expectedType), reflect.TypeOf(object)), msgAndArgs...)
- }
-
- return true
-}
-
-// Equal asserts that two objects are equal.
-//
-// assert.Equal(t, 123, 123)
-//
-// Pointer variable equality is determined based on the equality of the
-// referenced values (as opposed to the memory addresses). Function equality
-// cannot be determined and will always fail.
-func Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if err := validateEqualArgs(expected, actual); err != nil {
- return Fail(t, fmt.Sprintf("Invalid operation: %#v == %#v (%s)",
- expected, actual, err), msgAndArgs...)
- }
-
- if !ObjectsAreEqual(expected, actual) {
- diff := diff(expected, actual)
- expected, actual = formatUnequalValues(expected, actual)
- return Fail(t, fmt.Sprintf("Not equal: \n"+
- "expected: %s\n"+
- "actual : %s%s", expected, actual, diff), msgAndArgs...)
- }
-
- return true
-
-}
-
-// validateEqualArgs checks whether provided arguments can be safely used in the
-// Equal/NotEqual functions.
-func validateEqualArgs(expected, actual interface{}) error {
- if expected == nil && actual == nil {
- return nil
- }
-
- if isFunction(expected) || isFunction(actual) {
- return errors.New("cannot take func type as argument")
- }
- return nil
-}
-
-// Same asserts that two pointers reference the same object.
-//
-// assert.Same(t, ptr1, ptr2)
-//
-// Both arguments must be pointer variables. Pointer variable sameness is
-// determined based on the equality of both type and value.
-func Same(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
-
- if !samePointers(expected, actual) {
- return Fail(t, fmt.Sprintf("Not same: \n"+
- "expected: %p %#v\n"+
- "actual : %p %#v", expected, expected, actual, actual), msgAndArgs...)
- }
-
- return true
-}
-
-// NotSame asserts that two pointers do not reference the same object.
-//
-// assert.NotSame(t, ptr1, ptr2)
-//
-// Both arguments must be pointer variables. Pointer variable sameness is
-// determined based on the equality of both type and value.
-func NotSame(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
-
- if samePointers(expected, actual) {
- return Fail(t, fmt.Sprintf(
- "Expected and actual point to the same object: %p %#v",
- expected, expected), msgAndArgs...)
- }
- return true
-}
-
-// samePointers compares two generic interface objects and returns whether
-// they point to the same object
-func samePointers(first, second interface{}) bool {
- firstPtr, secondPtr := reflect.ValueOf(first), reflect.ValueOf(second)
- if firstPtr.Kind() != reflect.Ptr || secondPtr.Kind() != reflect.Ptr {
- return false
- }
-
- firstType, secondType := reflect.TypeOf(first), reflect.TypeOf(second)
- if firstType != secondType {
- return false
- }
-
- // compare pointer addresses
- return first == second
-}
-
-// formatUnequalValues takes two values of arbitrary types and returns string
-// representations appropriate to be presented to the user.
-//
-// If the values are not of like type, the returned strings will be prefixed
-// with the type name, and the value will be enclosed in parenthesis similar
-// to a type conversion in the Go grammar.
-func formatUnequalValues(expected, actual interface{}) (e string, a string) {
- if reflect.TypeOf(expected) != reflect.TypeOf(actual) {
- return fmt.Sprintf("%T(%s)", expected, truncatingFormat(expected)),
- fmt.Sprintf("%T(%s)", actual, truncatingFormat(actual))
- }
- switch expected.(type) {
- case time.Duration:
- return fmt.Sprintf("%v", expected), fmt.Sprintf("%v", actual)
- }
- return truncatingFormat(expected), truncatingFormat(actual)
-}
-
-// truncatingFormat formats the data and truncates it if it's too long.
-//
-// This helps keep formatted error messages lines from exceeding the
-// bufio.MaxScanTokenSize max line length that the go testing framework imposes.
-func truncatingFormat(data interface{}) string {
- value := fmt.Sprintf("%#v", data)
- max := bufio.MaxScanTokenSize - 100 // Give us some space the type info too if needed.
- if len(value) > max {
- value = value[0:max] + "<... truncated>"
- }
- return value
-}
-
-// EqualValues asserts that two objects are equal or convertable to the same types
-// and equal.
-//
-// assert.EqualValues(t, uint32(123), int32(123))
-func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
-
- if !ObjectsAreEqualValues(expected, actual) {
- diff := diff(expected, actual)
- expected, actual = formatUnequalValues(expected, actual)
- return Fail(t, fmt.Sprintf("Not equal: \n"+
- "expected: %s\n"+
- "actual : %s%s", expected, actual, diff), msgAndArgs...)
- }
-
- return true
-
-}
-
-// Exactly asserts that two objects are equal in value and type.
-//
-// assert.Exactly(t, int32(123), int64(123))
-func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
-
- aType := reflect.TypeOf(expected)
- bType := reflect.TypeOf(actual)
-
- if aType != bType {
- return Fail(t, fmt.Sprintf("Types expected to match exactly\n\t%v != %v", aType, bType), msgAndArgs...)
- }
-
- return Equal(t, expected, actual, msgAndArgs...)
-
-}
-
-// NotNil asserts that the specified object is not nil.
-//
-// assert.NotNil(t, err)
-func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
- if !isNil(object) {
- return true
- }
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- return Fail(t, "Expected value not to be nil.", msgAndArgs...)
-}
-
-// containsKind checks if a specified kind in the slice of kinds.
-func containsKind(kinds []reflect.Kind, kind reflect.Kind) bool {
- for i := 0; i < len(kinds); i++ {
- if kind == kinds[i] {
- return true
- }
- }
-
- return false
-}
-
-// isNil checks if a specified object is nil or not, without Failing.
-func isNil(object interface{}) bool {
- if object == nil {
- return true
- }
-
- value := reflect.ValueOf(object)
- kind := value.Kind()
- isNilableKind := containsKind(
- []reflect.Kind{
- reflect.Chan, reflect.Func,
- reflect.Interface, reflect.Map,
- reflect.Ptr, reflect.Slice},
- kind)
-
- if isNilableKind && value.IsNil() {
- return true
- }
-
- return false
-}
-
-// Nil asserts that the specified object is nil.
-//
-// assert.Nil(t, err)
-func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
- if isNil(object) {
- return true
- }
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- return Fail(t, fmt.Sprintf("Expected nil, but got: %#v", object), msgAndArgs...)
-}
-
-// isEmpty gets whether the specified object is considered empty or not.
-func isEmpty(object interface{}) bool {
-
- // get nil case out of the way
- if object == nil {
- return true
- }
-
- objValue := reflect.ValueOf(object)
-
- switch objValue.Kind() {
- // collection types are empty when they have no element
- case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice:
- return objValue.Len() == 0
- // pointers are empty if nil or if the value they point to is empty
- case reflect.Ptr:
- if objValue.IsNil() {
- return true
- }
- deref := objValue.Elem().Interface()
- return isEmpty(deref)
- // for all other types, compare against the zero value
- default:
- zero := reflect.Zero(objValue.Type())
- return reflect.DeepEqual(object, zero.Interface())
- }
-}
-
-// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either
-// a slice or a channel with len == 0.
-//
-// assert.Empty(t, obj)
-func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
- pass := isEmpty(object)
- if !pass {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- Fail(t, fmt.Sprintf("Should be empty, but was %v", object), msgAndArgs...)
- }
-
- return pass
-
-}
-
-// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
-// a slice or a channel with len == 0.
-//
-// if assert.NotEmpty(t, obj) {
-// assert.Equal(t, "two", obj[1])
-// }
-func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
- pass := !isEmpty(object)
- if !pass {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- Fail(t, fmt.Sprintf("Should NOT be empty, but was %v", object), msgAndArgs...)
- }
-
- return pass
-
-}
-
-// getLen try to get length of object.
-// return (false, 0) if impossible.
-func getLen(x interface{}) (ok bool, length int) {
- v := reflect.ValueOf(x)
- defer func() {
- if e := recover(); e != nil {
- ok = false
- }
- }()
- return true, v.Len()
-}
-
-// Len asserts that the specified object has specific length.
-// Len also fails if the object has a type that len() not accept.
-//
-// assert.Len(t, mySlice, 3)
-func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- ok, l := getLen(object)
- if !ok {
- return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", object), msgAndArgs...)
- }
-
- if l != length {
- return Fail(t, fmt.Sprintf("\"%s\" should have %d item(s), but has %d", object, length, l), msgAndArgs...)
- }
- return true
-}
-
-// True asserts that the specified value is true.
-//
-// assert.True(t, myBool)
-func True(t TestingT, value bool, msgAndArgs ...interface{}) bool {
- if !value {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- return Fail(t, "Should be true", msgAndArgs...)
- }
-
- return true
-
-}
-
-// False asserts that the specified value is false.
-//
-// assert.False(t, myBool)
-func False(t TestingT, value bool, msgAndArgs ...interface{}) bool {
- if value {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- return Fail(t, "Should be false", msgAndArgs...)
- }
-
- return true
-
-}
-
-// NotEqual asserts that the specified values are NOT equal.
-//
-// assert.NotEqual(t, obj1, obj2)
-//
-// Pointer variable equality is determined based on the equality of the
-// referenced values (as opposed to the memory addresses).
-func NotEqual(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if err := validateEqualArgs(expected, actual); err != nil {
- return Fail(t, fmt.Sprintf("Invalid operation: %#v != %#v (%s)",
- expected, actual, err), msgAndArgs...)
- }
-
- if ObjectsAreEqual(expected, actual) {
- return Fail(t, fmt.Sprintf("Should not be: %#v\n", actual), msgAndArgs...)
- }
-
- return true
-
-}
-
-// NotEqualValues asserts that two objects are not equal even when converted to the same type
-//
-// assert.NotEqualValues(t, obj1, obj2)
-func NotEqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
-
- if ObjectsAreEqualValues(expected, actual) {
- return Fail(t, fmt.Sprintf("Should not be: %#v\n", actual), msgAndArgs...)
- }
-
- return true
-}
-
-// containsElement try loop over the list check if the list includes the element.
-// return (false, false) if impossible.
-// return (true, false) if element was not found.
-// return (true, true) if element was found.
-func containsElement(list interface{}, element interface{}) (ok, found bool) {
-
- listValue := reflect.ValueOf(list)
- listType := reflect.TypeOf(list)
- if listType == nil {
- return false, false
- }
- listKind := listType.Kind()
- defer func() {
- if e := recover(); e != nil {
- ok = false
- found = false
- }
- }()
-
- if listKind == reflect.String {
- elementValue := reflect.ValueOf(element)
- return true, strings.Contains(listValue.String(), elementValue.String())
- }
-
- if listKind == reflect.Map {
- mapKeys := listValue.MapKeys()
- for i := 0; i < len(mapKeys); i++ {
- if ObjectsAreEqual(mapKeys[i].Interface(), element) {
- return true, true
- }
- }
- return true, false
- }
-
- for i := 0; i < listValue.Len(); i++ {
- if ObjectsAreEqual(listValue.Index(i).Interface(), element) {
- return true, true
- }
- }
- return true, false
-
-}
-
-// Contains asserts that the specified string, list(array, slice...) or map contains the
-// specified substring or element.
-//
-// assert.Contains(t, "Hello World", "World")
-// assert.Contains(t, ["Hello", "World"], "World")
-// assert.Contains(t, {"Hello": "World"}, "Hello")
-func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
-
- ok, found := containsElement(s, contains)
- if !ok {
- return Fail(t, fmt.Sprintf("%#v could not be applied builtin len()", s), msgAndArgs...)
- }
- if !found {
- return Fail(t, fmt.Sprintf("%#v does not contain %#v", s, contains), msgAndArgs...)
- }
-
- return true
-
-}
-
-// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the
-// specified substring or element.
-//
-// assert.NotContains(t, "Hello World", "Earth")
-// assert.NotContains(t, ["Hello", "World"], "Earth")
-// assert.NotContains(t, {"Hello": "World"}, "Earth")
-func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
-
- ok, found := containsElement(s, contains)
- if !ok {
- return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...)
- }
- if found {
- return Fail(t, fmt.Sprintf("\"%s\" should not contain \"%s\"", s, contains), msgAndArgs...)
- }
-
- return true
-
-}
-
-// Subset asserts that the specified list(array, slice...) contains all
-// elements given in the specified subset(array, slice...).
-//
-// assert.Subset(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]")
-func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if subset == nil {
- return true // we consider nil to be equal to the nil set
- }
-
- subsetValue := reflect.ValueOf(subset)
- defer func() {
- if e := recover(); e != nil {
- ok = false
- }
- }()
-
- listKind := reflect.TypeOf(list).Kind()
- subsetKind := reflect.TypeOf(subset).Kind()
-
- if listKind != reflect.Array && listKind != reflect.Slice {
- return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...)
- }
-
- if subsetKind != reflect.Array && subsetKind != reflect.Slice {
- return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...)
- }
-
- for i := 0; i < subsetValue.Len(); i++ {
- element := subsetValue.Index(i).Interface()
- ok, found := containsElement(list, element)
- if !ok {
- return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...)
- }
- if !found {
- return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", list, element), msgAndArgs...)
- }
- }
-
- return true
-}
-
-// NotSubset asserts that the specified list(array, slice...) contains not all
-// elements given in the specified subset(array, slice...).
-//
-// assert.NotSubset(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]")
-func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if subset == nil {
- return Fail(t, "nil is the empty set which is a subset of every set", msgAndArgs...)
- }
-
- subsetValue := reflect.ValueOf(subset)
- defer func() {
- if e := recover(); e != nil {
- ok = false
- }
- }()
-
- listKind := reflect.TypeOf(list).Kind()
- subsetKind := reflect.TypeOf(subset).Kind()
-
- if listKind != reflect.Array && listKind != reflect.Slice {
- return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...)
- }
-
- if subsetKind != reflect.Array && subsetKind != reflect.Slice {
- return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...)
- }
-
- for i := 0; i < subsetValue.Len(); i++ {
- element := subsetValue.Index(i).Interface()
- ok, found := containsElement(list, element)
- if !ok {
- return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...)
- }
- if !found {
- return true
- }
- }
-
- return Fail(t, fmt.Sprintf("%q is a subset of %q", subset, list), msgAndArgs...)
-}
-
-// ElementsMatch asserts that the specified listA(array, slice...) is equal to specified
-// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
-// the number of appearances of each of them in both lists should match.
-//
-// assert.ElementsMatch(t, [1, 3, 2, 3], [1, 3, 3, 2])
-func ElementsMatch(t TestingT, listA, listB interface{}, msgAndArgs ...interface{}) (ok bool) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if isEmpty(listA) && isEmpty(listB) {
- return true
- }
-
- if !isList(t, listA, msgAndArgs...) || !isList(t, listB, msgAndArgs...) {
- return false
- }
-
- extraA, extraB := diffLists(listA, listB)
-
- if len(extraA) == 0 && len(extraB) == 0 {
- return true
- }
-
- return Fail(t, formatListDiff(listA, listB, extraA, extraB), msgAndArgs...)
-}
-
-// isList checks that the provided value is array or slice.
-func isList(t TestingT, list interface{}, msgAndArgs ...interface{}) (ok bool) {
- kind := reflect.TypeOf(list).Kind()
- if kind != reflect.Array && kind != reflect.Slice {
- return Fail(t, fmt.Sprintf("%q has an unsupported type %s, expecting array or slice", list, kind),
- msgAndArgs...)
- }
- return true
-}
-
-// diffLists diffs two arrays/slices and returns slices of elements that are only in A and only in B.
-// If some element is present multiple times, each instance is counted separately (e.g. if something is 2x in A and
-// 5x in B, it will be 0x in extraA and 3x in extraB). The order of items in both lists is ignored.
-func diffLists(listA, listB interface{}) (extraA, extraB []interface{}) {
- aValue := reflect.ValueOf(listA)
- bValue := reflect.ValueOf(listB)
-
- aLen := aValue.Len()
- bLen := bValue.Len()
-
- // Mark indexes in bValue that we already used
- visited := make([]bool, bLen)
- for i := 0; i < aLen; i++ {
- element := aValue.Index(i).Interface()
- found := false
- for j := 0; j < bLen; j++ {
- if visited[j] {
- continue
- }
- if ObjectsAreEqual(bValue.Index(j).Interface(), element) {
- visited[j] = true
- found = true
- break
- }
- }
- if !found {
- extraA = append(extraA, element)
- }
- }
-
- for j := 0; j < bLen; j++ {
- if visited[j] {
- continue
- }
- extraB = append(extraB, bValue.Index(j).Interface())
- }
-
- return
-}
-
-func formatListDiff(listA, listB interface{}, extraA, extraB []interface{}) string {
- var msg bytes.Buffer
-
- msg.WriteString("elements differ")
- if len(extraA) > 0 {
- msg.WriteString("\n\nextra elements in list A:\n")
- msg.WriteString(spewConfig.Sdump(extraA))
- }
- if len(extraB) > 0 {
- msg.WriteString("\n\nextra elements in list B:\n")
- msg.WriteString(spewConfig.Sdump(extraB))
- }
- msg.WriteString("\n\nlistA:\n")
- msg.WriteString(spewConfig.Sdump(listA))
- msg.WriteString("\n\nlistB:\n")
- msg.WriteString(spewConfig.Sdump(listB))
-
- return msg.String()
-}
-
-// Condition uses a Comparison to assert a complex condition.
-func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- result := comp()
- if !result {
- Fail(t, "Condition failed!", msgAndArgs...)
- }
- return result
-}
-
-// PanicTestFunc defines a func that should be passed to the assert.Panics and assert.NotPanics
-// methods, and represents a simple func that takes no arguments, and returns nothing.
-type PanicTestFunc func()
-
-// didPanic returns true if the function passed to it panics. Otherwise, it returns false.
-func didPanic(f PanicTestFunc) (didPanic bool, message interface{}, stack string) {
- didPanic = true
-
- defer func() {
- message = recover()
- if didPanic {
- stack = string(debug.Stack())
- }
- }()
-
- // call the target function
- f()
- didPanic = false
-
- return
-}
-
-// Panics asserts that the code inside the specified PanicTestFunc panics.
-//
-// assert.Panics(t, func(){ GoCrazy() })
-func Panics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
-
- if funcDidPanic, panicValue, _ := didPanic(f); !funcDidPanic {
- return Fail(t, fmt.Sprintf("func %#v should panic\n\tPanic value:\t%#v", f, panicValue), msgAndArgs...)
- }
-
- return true
-}
-
-// PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that
-// the recovered panic value equals the expected panic value.
-//
-// assert.PanicsWithValue(t, "crazy error", func(){ GoCrazy() })
-func PanicsWithValue(t TestingT, expected interface{}, f PanicTestFunc, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
-
- funcDidPanic, panicValue, panickedStack := didPanic(f)
- if !funcDidPanic {
- return Fail(t, fmt.Sprintf("func %#v should panic\n\tPanic value:\t%#v", f, panicValue), msgAndArgs...)
- }
- if panicValue != expected {
- return Fail(t, fmt.Sprintf("func %#v should panic with value:\t%#v\n\tPanic value:\t%#v\n\tPanic stack:\t%s", f, expected, panicValue, panickedStack), msgAndArgs...)
- }
-
- return true
-}
-
-// PanicsWithError asserts that the code inside the specified PanicTestFunc
-// panics, and that the recovered panic value is an error that satisfies the
-// EqualError comparison.
-//
-// assert.PanicsWithError(t, "crazy error", func(){ GoCrazy() })
-func PanicsWithError(t TestingT, errString string, f PanicTestFunc, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
-
- funcDidPanic, panicValue, panickedStack := didPanic(f)
- if !funcDidPanic {
- return Fail(t, fmt.Sprintf("func %#v should panic\n\tPanic value:\t%#v", f, panicValue), msgAndArgs...)
- }
- panicErr, ok := panicValue.(error)
- if !ok || panicErr.Error() != errString {
- return Fail(t, fmt.Sprintf("func %#v should panic with error message:\t%#v\n\tPanic value:\t%#v\n\tPanic stack:\t%s", f, errString, panicValue, panickedStack), msgAndArgs...)
- }
-
- return true
-}
-
-// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic.
-//
-// assert.NotPanics(t, func(){ RemainCalm() })
-func NotPanics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
-
- if funcDidPanic, panicValue, panickedStack := didPanic(f); funcDidPanic {
- return Fail(t, fmt.Sprintf("func %#v should not panic\n\tPanic value:\t%v\n\tPanic stack:\t%s", f, panicValue, panickedStack), msgAndArgs...)
- }
-
- return true
-}
-
-// WithinDuration asserts that the two times are within duration delta of each other.
-//
-// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second)
-func WithinDuration(t TestingT, expected, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
-
- dt := expected.Sub(actual)
- if dt < -delta || dt > delta {
- return Fail(t, fmt.Sprintf("Max difference between %v and %v allowed is %v, but difference was %v", expected, actual, delta, dt), msgAndArgs...)
- }
-
- return true
-}
-
-func toFloat(x interface{}) (float64, bool) {
- var xf float64
- xok := true
-
- switch xn := x.(type) {
- case uint:
- xf = float64(xn)
- case uint8:
- xf = float64(xn)
- case uint16:
- xf = float64(xn)
- case uint32:
- xf = float64(xn)
- case uint64:
- xf = float64(xn)
- case int:
- xf = float64(xn)
- case int8:
- xf = float64(xn)
- case int16:
- xf = float64(xn)
- case int32:
- xf = float64(xn)
- case int64:
- xf = float64(xn)
- case float32:
- xf = float64(xn)
- case float64:
- xf = xn
- case time.Duration:
- xf = float64(xn)
- default:
- xok = false
- }
-
- return xf, xok
-}
-
-// InDelta asserts that the two numerals are within delta of each other.
-//
-// assert.InDelta(t, math.Pi, 22/7.0, 0.01)
-func InDelta(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
-
- af, aok := toFloat(expected)
- bf, bok := toFloat(actual)
-
- if !aok || !bok {
- return Fail(t, "Parameters must be numerical", msgAndArgs...)
- }
-
- if math.IsNaN(af) && math.IsNaN(bf) {
- return true
- }
-
- if math.IsNaN(af) {
- return Fail(t, "Expected must not be NaN", msgAndArgs...)
- }
-
- if math.IsNaN(bf) {
- return Fail(t, fmt.Sprintf("Expected %v with delta %v, but was NaN", expected, delta), msgAndArgs...)
- }
-
- dt := af - bf
- if dt < -delta || dt > delta {
- return Fail(t, fmt.Sprintf("Max difference between %v and %v allowed is %v, but difference was %v", expected, actual, delta, dt), msgAndArgs...)
- }
-
- return true
-}
-
-// InDeltaSlice is the same as InDelta, except it compares two slices.
-func InDeltaSlice(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if expected == nil || actual == nil ||
- reflect.TypeOf(actual).Kind() != reflect.Slice ||
- reflect.TypeOf(expected).Kind() != reflect.Slice {
- return Fail(t, "Parameters must be slice", msgAndArgs...)
- }
-
- actualSlice := reflect.ValueOf(actual)
- expectedSlice := reflect.ValueOf(expected)
-
- for i := 0; i < actualSlice.Len(); i++ {
- result := InDelta(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), delta, msgAndArgs...)
- if !result {
- return result
- }
- }
-
- return true
-}
-
-// InDeltaMapValues is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys.
-func InDeltaMapValues(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if expected == nil || actual == nil ||
- reflect.TypeOf(actual).Kind() != reflect.Map ||
- reflect.TypeOf(expected).Kind() != reflect.Map {
- return Fail(t, "Arguments must be maps", msgAndArgs...)
- }
-
- expectedMap := reflect.ValueOf(expected)
- actualMap := reflect.ValueOf(actual)
-
- if expectedMap.Len() != actualMap.Len() {
- return Fail(t, "Arguments must have the same number of keys", msgAndArgs...)
- }
-
- for _, k := range expectedMap.MapKeys() {
- ev := expectedMap.MapIndex(k)
- av := actualMap.MapIndex(k)
-
- if !ev.IsValid() {
- return Fail(t, fmt.Sprintf("missing key %q in expected map", k), msgAndArgs...)
- }
-
- if !av.IsValid() {
- return Fail(t, fmt.Sprintf("missing key %q in actual map", k), msgAndArgs...)
- }
-
- if !InDelta(
- t,
- ev.Interface(),
- av.Interface(),
- delta,
- msgAndArgs...,
- ) {
- return false
- }
- }
-
- return true
-}
-
-func calcRelativeError(expected, actual interface{}) (float64, error) {
- af, aok := toFloat(expected)
- bf, bok := toFloat(actual)
- if !aok || !bok {
- return 0, fmt.Errorf("Parameters must be numerical")
- }
- if math.IsNaN(af) && math.IsNaN(bf) {
- return 0, nil
- }
- if math.IsNaN(af) {
- return 0, errors.New("expected value must not be NaN")
- }
- if af == 0 {
- return 0, fmt.Errorf("expected value must have a value other than zero to calculate the relative error")
- }
- if math.IsNaN(bf) {
- return 0, errors.New("actual value must not be NaN")
- }
-
- return math.Abs(af-bf) / math.Abs(af), nil
-}
-
-// InEpsilon asserts that expected and actual have a relative error less than epsilon
-func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if math.IsNaN(epsilon) {
- return Fail(t, "epsilon must not be NaN")
- }
- actualEpsilon, err := calcRelativeError(expected, actual)
- if err != nil {
- return Fail(t, err.Error(), msgAndArgs...)
- }
- if actualEpsilon > epsilon {
- return Fail(t, fmt.Sprintf("Relative error is too high: %#v (expected)\n"+
- " < %#v (actual)", epsilon, actualEpsilon), msgAndArgs...)
- }
-
- return true
-}
-
-// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices.
-func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if expected == nil || actual == nil ||
- reflect.TypeOf(actual).Kind() != reflect.Slice ||
- reflect.TypeOf(expected).Kind() != reflect.Slice {
- return Fail(t, "Parameters must be slice", msgAndArgs...)
- }
-
- actualSlice := reflect.ValueOf(actual)
- expectedSlice := reflect.ValueOf(expected)
-
- for i := 0; i < actualSlice.Len(); i++ {
- result := InEpsilon(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), epsilon)
- if !result {
- return result
- }
- }
-
- return true
-}
-
-/*
- Errors
-*/
-
-// NoError asserts that a function returned no error (i.e. `nil`).
-//
-// actualObj, err := SomeFunction()
-// if assert.NoError(t, err) {
-// assert.Equal(t, expectedObj, actualObj)
-// }
-func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool {
- if err != nil {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- return Fail(t, fmt.Sprintf("Received unexpected error:\n%+v", err), msgAndArgs...)
- }
-
- return true
-}
-
-// Error asserts that a function returned an error (i.e. not `nil`).
-//
-// actualObj, err := SomeFunction()
-// if assert.Error(t, err) {
-// assert.Equal(t, expectedError, err)
-// }
-func Error(t TestingT, err error, msgAndArgs ...interface{}) bool {
- if err == nil {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- return Fail(t, "An error is expected but got nil.", msgAndArgs...)
- }
-
- return true
-}
-
-// EqualError asserts that a function returned an error (i.e. not `nil`)
-// and that it is equal to the provided error.
-//
-// actualObj, err := SomeFunction()
-// assert.EqualError(t, err, expectedErrorString)
-func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if !Error(t, theError, msgAndArgs...) {
- return false
- }
- expected := errString
- actual := theError.Error()
- // don't need to use deep equals here, we know they are both strings
- if expected != actual {
- return Fail(t, fmt.Sprintf("Error message not equal:\n"+
- "expected: %q\n"+
- "actual : %q", expected, actual), msgAndArgs...)
- }
- return true
-}
-
-// ErrorContains asserts that a function returned an error (i.e. not `nil`)
-// and that the error contains the specified substring.
-//
-// actualObj, err := SomeFunction()
-// assert.ErrorContains(t, err, expectedErrorSubString)
-func ErrorContains(t TestingT, theError error, contains string, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if !Error(t, theError, msgAndArgs...) {
- return false
- }
-
- actual := theError.Error()
- if !strings.Contains(actual, contains) {
- return Fail(t, fmt.Sprintf("Error %#v does not contain %#v", actual, contains), msgAndArgs...)
- }
-
- return true
-}
-
-// matchRegexp return true if a specified regexp matches a string.
-func matchRegexp(rx interface{}, str interface{}) bool {
-
- var r *regexp.Regexp
- if rr, ok := rx.(*regexp.Regexp); ok {
- r = rr
- } else {
- r = regexp.MustCompile(fmt.Sprint(rx))
- }
-
- return (r.FindStringIndex(fmt.Sprint(str)) != nil)
-
-}
-
-// Regexp asserts that a specified regexp matches a string.
-//
-// assert.Regexp(t, regexp.MustCompile("start"), "it's starting")
-// assert.Regexp(t, "start...$", "it's not starting")
-func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
-
- match := matchRegexp(rx, str)
-
- if !match {
- Fail(t, fmt.Sprintf("Expect \"%v\" to match \"%v\"", str, rx), msgAndArgs...)
- }
-
- return match
-}
-
-// NotRegexp asserts that a specified regexp does not match a string.
-//
-// assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting")
-// assert.NotRegexp(t, "^start", "it's not starting")
-func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- match := matchRegexp(rx, str)
-
- if match {
- Fail(t, fmt.Sprintf("Expect \"%v\" to NOT match \"%v\"", str, rx), msgAndArgs...)
- }
-
- return !match
-
-}
-
-// Zero asserts that i is the zero value for its type.
-func Zero(t TestingT, i interface{}, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if i != nil && !reflect.DeepEqual(i, reflect.Zero(reflect.TypeOf(i)).Interface()) {
- return Fail(t, fmt.Sprintf("Should be zero, but was %v", i), msgAndArgs...)
- }
- return true
-}
-
-// NotZero asserts that i is not the zero value for its type.
-func NotZero(t TestingT, i interface{}, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if i == nil || reflect.DeepEqual(i, reflect.Zero(reflect.TypeOf(i)).Interface()) {
- return Fail(t, fmt.Sprintf("Should not be zero, but was %v", i), msgAndArgs...)
- }
- return true
-}
-
-// FileExists checks whether a file exists in the given path. It also fails if
-// the path points to a directory or there is an error when trying to check the file.
-func FileExists(t TestingT, path string, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- info, err := os.Lstat(path)
- if err != nil {
- if os.IsNotExist(err) {
- return Fail(t, fmt.Sprintf("unable to find file %q", path), msgAndArgs...)
- }
- return Fail(t, fmt.Sprintf("error when running os.Lstat(%q): %s", path, err), msgAndArgs...)
- }
- if info.IsDir() {
- return Fail(t, fmt.Sprintf("%q is a directory", path), msgAndArgs...)
- }
- return true
-}
-
-// NoFileExists checks whether a file does not exist in a given path. It fails
-// if the path points to an existing _file_ only.
-func NoFileExists(t TestingT, path string, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- info, err := os.Lstat(path)
- if err != nil {
- return true
- }
- if info.IsDir() {
- return true
- }
- return Fail(t, fmt.Sprintf("file %q exists", path), msgAndArgs...)
-}
-
-// DirExists checks whether a directory exists in the given path. It also fails
-// if the path is a file rather a directory or there is an error checking whether it exists.
-func DirExists(t TestingT, path string, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- info, err := os.Lstat(path)
- if err != nil {
- if os.IsNotExist(err) {
- return Fail(t, fmt.Sprintf("unable to find file %q", path), msgAndArgs...)
- }
- return Fail(t, fmt.Sprintf("error when running os.Lstat(%q): %s", path, err), msgAndArgs...)
- }
- if !info.IsDir() {
- return Fail(t, fmt.Sprintf("%q is a file", path), msgAndArgs...)
- }
- return true
-}
-
-// NoDirExists checks whether a directory does not exist in the given path.
-// It fails if the path points to an existing _directory_ only.
-func NoDirExists(t TestingT, path string, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- info, err := os.Lstat(path)
- if err != nil {
- if os.IsNotExist(err) {
- return true
- }
- return true
- }
- if !info.IsDir() {
- return true
- }
- return Fail(t, fmt.Sprintf("directory %q exists", path), msgAndArgs...)
-}
-
-// JSONEq asserts that two JSON strings are equivalent.
-//
-// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`)
-func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- var expectedJSONAsInterface, actualJSONAsInterface interface{}
-
- if err := json.Unmarshal([]byte(expected), &expectedJSONAsInterface); err != nil {
- return Fail(t, fmt.Sprintf("Expected value ('%s') is not valid json.\nJSON parsing error: '%s'", expected, err.Error()), msgAndArgs...)
- }
-
- if err := json.Unmarshal([]byte(actual), &actualJSONAsInterface); err != nil {
- return Fail(t, fmt.Sprintf("Input ('%s') needs to be valid json.\nJSON parsing error: '%s'", actual, err.Error()), msgAndArgs...)
- }
-
- return Equal(t, expectedJSONAsInterface, actualJSONAsInterface, msgAndArgs...)
-}
-
-// YAMLEq asserts that two YAML strings are equivalent.
-func YAMLEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- var expectedYAMLAsInterface, actualYAMLAsInterface interface{}
-
- if err := yaml.Unmarshal([]byte(expected), &expectedYAMLAsInterface); err != nil {
- return Fail(t, fmt.Sprintf("Expected value ('%s') is not valid yaml.\nYAML parsing error: '%s'", expected, err.Error()), msgAndArgs...)
- }
-
- if err := yaml.Unmarshal([]byte(actual), &actualYAMLAsInterface); err != nil {
- return Fail(t, fmt.Sprintf("Input ('%s') needs to be valid yaml.\nYAML error: '%s'", actual, err.Error()), msgAndArgs...)
- }
-
- return Equal(t, expectedYAMLAsInterface, actualYAMLAsInterface, msgAndArgs...)
-}
-
-func typeAndKind(v interface{}) (reflect.Type, reflect.Kind) {
- t := reflect.TypeOf(v)
- k := t.Kind()
-
- if k == reflect.Ptr {
- t = t.Elem()
- k = t.Kind()
- }
- return t, k
-}
-
-// diff returns a diff of both values as long as both are of the same type and
-// are a struct, map, slice, array or string. Otherwise it returns an empty string.
-func diff(expected interface{}, actual interface{}) string {
- if expected == nil || actual == nil {
- return ""
- }
-
- et, ek := typeAndKind(expected)
- at, _ := typeAndKind(actual)
-
- if et != at {
- return ""
- }
-
- if ek != reflect.Struct && ek != reflect.Map && ek != reflect.Slice && ek != reflect.Array && ek != reflect.String {
- return ""
- }
-
- var e, a string
-
- switch et {
- case reflect.TypeOf(""):
- e = reflect.ValueOf(expected).String()
- a = reflect.ValueOf(actual).String()
- case reflect.TypeOf(time.Time{}):
- e = spewConfigStringerEnabled.Sdump(expected)
- a = spewConfigStringerEnabled.Sdump(actual)
- default:
- e = spewConfig.Sdump(expected)
- a = spewConfig.Sdump(actual)
- }
-
- diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{
- A: difflib.SplitLines(e),
- B: difflib.SplitLines(a),
- FromFile: "Expected",
- FromDate: "",
- ToFile: "Actual",
- ToDate: "",
- Context: 1,
- })
-
- return "\n\nDiff:\n" + diff
-}
-
-func isFunction(arg interface{}) bool {
- if arg == nil {
- return false
- }
- return reflect.TypeOf(arg).Kind() == reflect.Func
-}
-
-var spewConfig = spew.ConfigState{
- Indent: " ",
- DisablePointerAddresses: true,
- DisableCapacities: true,
- SortKeys: true,
- DisableMethods: true,
- MaxDepth: 10,
-}
-
-var spewConfigStringerEnabled = spew.ConfigState{
- Indent: " ",
- DisablePointerAddresses: true,
- DisableCapacities: true,
- SortKeys: true,
- MaxDepth: 10,
-}
-
-type tHelper interface {
- Helper()
-}
-
-// Eventually asserts that given condition will be met in waitFor time,
-// periodically checking target function each tick.
-//
-// assert.Eventually(t, func() bool { return true; }, time.Second, 10*time.Millisecond)
-func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
-
- ch := make(chan bool, 1)
-
- timer := time.NewTimer(waitFor)
- defer timer.Stop()
-
- ticker := time.NewTicker(tick)
- defer ticker.Stop()
-
- for tick := ticker.C; ; {
- select {
- case <-timer.C:
- return Fail(t, "Condition never satisfied", msgAndArgs...)
- case <-tick:
- tick = nil
- go func() { ch <- condition() }()
- case v := <-ch:
- if v {
- return true
- }
- tick = ticker.C
- }
- }
-}
-
-// Never asserts that the given condition doesn't satisfy in waitFor time,
-// periodically checking the target function each tick.
-//
-// assert.Never(t, func() bool { return false; }, time.Second, 10*time.Millisecond)
-func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
-
- ch := make(chan bool, 1)
-
- timer := time.NewTimer(waitFor)
- defer timer.Stop()
-
- ticker := time.NewTicker(tick)
- defer ticker.Stop()
-
- for tick := ticker.C; ; {
- select {
- case <-timer.C:
- return true
- case <-tick:
- tick = nil
- go func() { ch <- condition() }()
- case v := <-ch:
- if v {
- return Fail(t, "Condition satisfied", msgAndArgs...)
- }
- tick = ticker.C
- }
- }
-}
-
-// ErrorIs asserts that at least one of the errors in err's chain matches target.
-// This is a wrapper for errors.Is.
-func ErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if errors.Is(err, target) {
- return true
- }
-
- var expectedText string
- if target != nil {
- expectedText = target.Error()
- }
-
- chain := buildErrorChainString(err)
-
- return Fail(t, fmt.Sprintf("Target error should be in err chain:\n"+
- "expected: %q\n"+
- "in chain: %s", expectedText, chain,
- ), msgAndArgs...)
-}
-
-// NotErrorIs asserts that at none of the errors in err's chain matches target.
-// This is a wrapper for errors.Is.
-func NotErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if !errors.Is(err, target) {
- return true
- }
-
- var expectedText string
- if target != nil {
- expectedText = target.Error()
- }
-
- chain := buildErrorChainString(err)
-
- return Fail(t, fmt.Sprintf("Target error should not be in err chain:\n"+
- "found: %q\n"+
- "in chain: %s", expectedText, chain,
- ), msgAndArgs...)
-}
-
-// ErrorAs asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value.
-// This is a wrapper for errors.As.
-func ErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if errors.As(err, target) {
- return true
- }
-
- chain := buildErrorChainString(err)
-
- return Fail(t, fmt.Sprintf("Should be in error chain:\n"+
- "expected: %q\n"+
- "in chain: %s", target, chain,
- ), msgAndArgs...)
-}
-
-func buildErrorChainString(err error) string {
- if err == nil {
- return ""
- }
-
- e := errors.Unwrap(err)
- chain := fmt.Sprintf("%q", err.Error())
- for e != nil {
- chain += fmt.Sprintf("\n\t%q", e.Error())
- e = errors.Unwrap(e)
- }
- return chain
-}
diff --git a/vendor/github.com/stretchr/testify/assert/doc.go b/vendor/github.com/stretchr/testify/assert/doc.go
deleted file mode 100644
index c9dccc4d..00000000
--- a/vendor/github.com/stretchr/testify/assert/doc.go
+++ /dev/null
@@ -1,45 +0,0 @@
-// Package assert provides a set of comprehensive testing tools for use with the normal Go testing system.
-//
-// Example Usage
-//
-// The following is a complete example using assert in a standard test function:
-// import (
-// "testing"
-// "github.com/stretchr/testify/assert"
-// )
-//
-// func TestSomething(t *testing.T) {
-//
-// var a string = "Hello"
-// var b string = "Hello"
-//
-// assert.Equal(t, a, b, "The two words should be the same.")
-//
-// }
-//
-// if you assert many times, use the format below:
-//
-// import (
-// "testing"
-// "github.com/stretchr/testify/assert"
-// )
-//
-// func TestSomething(t *testing.T) {
-// assert := assert.New(t)
-//
-// var a string = "Hello"
-// var b string = "Hello"
-//
-// assert.Equal(a, b, "The two words should be the same.")
-// }
-//
-// Assertions
-//
-// Assertions allow you to easily write test code, and are global funcs in the `assert` package.
-// All assertion functions take, as the first argument, the `*testing.T` object provided by the
-// testing framework. This allows the assertion funcs to write the failings and other details to
-// the correct place.
-//
-// Every assertion function also takes an optional string message as the final argument,
-// allowing custom error messages to be appended to the message the assertion method outputs.
-package assert
diff --git a/vendor/github.com/stretchr/testify/assert/errors.go b/vendor/github.com/stretchr/testify/assert/errors.go
deleted file mode 100644
index ac9dc9d1..00000000
--- a/vendor/github.com/stretchr/testify/assert/errors.go
+++ /dev/null
@@ -1,10 +0,0 @@
-package assert
-
-import (
- "errors"
-)
-
-// AnError is an error instance useful for testing. If the code does not care
-// about error specifics, and only needs to return the error for example, this
-// error should be used to make the test code more readable.
-var AnError = errors.New("assert.AnError general error for testing")
diff --git a/vendor/github.com/stretchr/testify/assert/forward_assertions.go b/vendor/github.com/stretchr/testify/assert/forward_assertions.go
deleted file mode 100644
index df189d23..00000000
--- a/vendor/github.com/stretchr/testify/assert/forward_assertions.go
+++ /dev/null
@@ -1,16 +0,0 @@
-package assert
-
-// Assertions provides assertion methods around the
-// TestingT interface.
-type Assertions struct {
- t TestingT
-}
-
-// New makes a new Assertions object for the specified TestingT.
-func New(t TestingT) *Assertions {
- return &Assertions{
- t: t,
- }
-}
-
-//go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=assert -template=assertion_forward.go.tmpl -include-format-funcs"
diff --git a/vendor/github.com/stretchr/testify/assert/http_assertions.go b/vendor/github.com/stretchr/testify/assert/http_assertions.go
deleted file mode 100644
index 4ed341dd..00000000
--- a/vendor/github.com/stretchr/testify/assert/http_assertions.go
+++ /dev/null
@@ -1,162 +0,0 @@
-package assert
-
-import (
- "fmt"
- "net/http"
- "net/http/httptest"
- "net/url"
- "strings"
-)
-
-// httpCode is a helper that returns HTTP code of the response. It returns -1 and
-// an error if building a new request fails.
-func httpCode(handler http.HandlerFunc, method, url string, values url.Values) (int, error) {
- w := httptest.NewRecorder()
- req, err := http.NewRequest(method, url, nil)
- if err != nil {
- return -1, err
- }
- req.URL.RawQuery = values.Encode()
- handler(w, req)
- return w.Code, nil
-}
-
-// HTTPSuccess asserts that a specified handler returns a success status code.
-//
-// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil)
-//
-// Returns whether the assertion was successful (true) or not (false).
-func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- code, err := httpCode(handler, method, url, values)
- if err != nil {
- Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
- }
-
- isSuccessCode := code >= http.StatusOK && code <= http.StatusPartialContent
- if !isSuccessCode {
- Fail(t, fmt.Sprintf("Expected HTTP success status code for %q but received %d", url+"?"+values.Encode(), code))
- }
-
- return isSuccessCode
-}
-
-// HTTPRedirect asserts that a specified handler returns a redirect status code.
-//
-// assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
-//
-// Returns whether the assertion was successful (true) or not (false).
-func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- code, err := httpCode(handler, method, url, values)
- if err != nil {
- Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
- }
-
- isRedirectCode := code >= http.StatusMultipleChoices && code <= http.StatusTemporaryRedirect
- if !isRedirectCode {
- Fail(t, fmt.Sprintf("Expected HTTP redirect status code for %q but received %d", url+"?"+values.Encode(), code))
- }
-
- return isRedirectCode
-}
-
-// HTTPError asserts that a specified handler returns an error status code.
-//
-// assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
-//
-// Returns whether the assertion was successful (true) or not (false).
-func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- code, err := httpCode(handler, method, url, values)
- if err != nil {
- Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
- }
-
- isErrorCode := code >= http.StatusBadRequest
- if !isErrorCode {
- Fail(t, fmt.Sprintf("Expected HTTP error status code for %q but received %d", url+"?"+values.Encode(), code))
- }
-
- return isErrorCode
-}
-
-// HTTPStatusCode asserts that a specified handler returns a specified status code.
-//
-// assert.HTTPStatusCode(t, myHandler, "GET", "/notImplemented", nil, 501)
-//
-// Returns whether the assertion was successful (true) or not (false).
-func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, statuscode int, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- code, err := httpCode(handler, method, url, values)
- if err != nil {
- Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
- }
-
- successful := code == statuscode
- if !successful {
- Fail(t, fmt.Sprintf("Expected HTTP status code %d for %q but received %d", statuscode, url+"?"+values.Encode(), code))
- }
-
- return successful
-}
-
-// HTTPBody is a helper that returns HTTP body of the response. It returns
-// empty string if building a new request fails.
-func HTTPBody(handler http.HandlerFunc, method, url string, values url.Values) string {
- w := httptest.NewRecorder()
- req, err := http.NewRequest(method, url+"?"+values.Encode(), nil)
- if err != nil {
- return ""
- }
- handler(w, req)
- return w.Body.String()
-}
-
-// HTTPBodyContains asserts that a specified handler returns a
-// body that contains a string.
-//
-// assert.HTTPBodyContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
-//
-// Returns whether the assertion was successful (true) or not (false).
-func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- body := HTTPBody(handler, method, url, values)
-
- contains := strings.Contains(body, fmt.Sprint(str))
- if !contains {
- Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body))
- }
-
- return contains
-}
-
-// HTTPBodyNotContains asserts that a specified handler returns a
-// body that does not contain a string.
-//
-// assert.HTTPBodyNotContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
-//
-// Returns whether the assertion was successful (true) or not (false).
-func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- body := HTTPBody(handler, method, url, values)
-
- contains := strings.Contains(body, fmt.Sprint(str))
- if contains {
- Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body))
- }
-
- return !contains
-}
diff --git a/vendor/github.com/stretchr/testify/require/doc.go b/vendor/github.com/stretchr/testify/require/doc.go
deleted file mode 100644
index 169de392..00000000
--- a/vendor/github.com/stretchr/testify/require/doc.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// Package require implements the same assertions as the `assert` package but
-// stops test execution when a test fails.
-//
-// Example Usage
-//
-// The following is a complete example using require in a standard test function:
-// import (
-// "testing"
-// "github.com/stretchr/testify/require"
-// )
-//
-// func TestSomething(t *testing.T) {
-//
-// var a string = "Hello"
-// var b string = "Hello"
-//
-// require.Equal(t, a, b, "The two words should be the same.")
-//
-// }
-//
-// Assertions
-//
-// The `require` package have same global functions as in the `assert` package,
-// but instead of returning a boolean result they call `t.FailNow()`.
-//
-// Every assertion function also takes an optional string message as the final argument,
-// allowing custom error messages to be appended to the message the assertion method outputs.
-package require
diff --git a/vendor/github.com/stretchr/testify/require/forward_requirements.go b/vendor/github.com/stretchr/testify/require/forward_requirements.go
deleted file mode 100644
index 1dcb2338..00000000
--- a/vendor/github.com/stretchr/testify/require/forward_requirements.go
+++ /dev/null
@@ -1,16 +0,0 @@
-package require
-
-// Assertions provides assertion methods around the
-// TestingT interface.
-type Assertions struct {
- t TestingT
-}
-
-// New makes a new Assertions object for the specified TestingT.
-func New(t TestingT) *Assertions {
- return &Assertions{
- t: t,
- }
-}
-
-//go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=require -template=require_forward.go.tmpl -include-format-funcs"
diff --git a/vendor/github.com/stretchr/testify/require/require.go b/vendor/github.com/stretchr/testify/require/require.go
deleted file mode 100644
index 59c48277..00000000
--- a/vendor/github.com/stretchr/testify/require/require.go
+++ /dev/null
@@ -1,1909 +0,0 @@
-/*
-* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen
-* THIS FILE MUST NOT BE EDITED BY HAND
- */
-
-package require
-
-import (
- assert "github.com/stretchr/testify/assert"
- http "net/http"
- url "net/url"
- time "time"
-)
-
-// Condition uses a Comparison to assert a complex condition.
-func Condition(t TestingT, comp assert.Comparison, msgAndArgs ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.Condition(t, comp, msgAndArgs...) {
- return
- }
- t.FailNow()
-}
-
-// Conditionf uses a Comparison to assert a complex condition.
-func Conditionf(t TestingT, comp assert.Comparison, msg string, args ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.Conditionf(t, comp, msg, args...) {
- return
- }
- t.FailNow()
-}
-
-// Contains asserts that the specified string, list(array, slice...) or map contains the
-// specified substring or element.
-//
-// assert.Contains(t, "Hello World", "World")
-// assert.Contains(t, ["Hello", "World"], "World")
-// assert.Contains(t, {"Hello": "World"}, "Hello")
-func Contains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.Contains(t, s, contains, msgAndArgs...) {
- return
- }
- t.FailNow()
-}
-
-// Containsf asserts that the specified string, list(array, slice...) or map contains the
-// specified substring or element.
-//
-// assert.Containsf(t, "Hello World", "World", "error message %s", "formatted")
-// assert.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted")
-// assert.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted")
-func Containsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.Containsf(t, s, contains, msg, args...) {
- return
- }
- t.FailNow()
-}
-
-// DirExists checks whether a directory exists in the given path. It also fails
-// if the path is a file rather a directory or there is an error checking whether it exists.
-func DirExists(t TestingT, path string, msgAndArgs ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.DirExists(t, path, msgAndArgs...) {
- return
- }
- t.FailNow()
-}
-
-// DirExistsf checks whether a directory exists in the given path. It also fails
-// if the path is a file rather a directory or there is an error checking whether it exists.
-func DirExistsf(t TestingT, path string, msg string, args ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.DirExistsf(t, path, msg, args...) {
- return
- }
- t.FailNow()
-}
-
-// ElementsMatch asserts that the specified listA(array, slice...) is equal to specified
-// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
-// the number of appearances of each of them in both lists should match.
-//
-// assert.ElementsMatch(t, [1, 3, 2, 3], [1, 3, 3, 2])
-func ElementsMatch(t TestingT, listA interface{}, listB interface{}, msgAndArgs ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.ElementsMatch(t, listA, listB, msgAndArgs...) {
- return
- }
- t.FailNow()
-}
-
-// ElementsMatchf asserts that the specified listA(array, slice...) is equal to specified
-// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
-// the number of appearances of each of them in both lists should match.
-//
-// assert.ElementsMatchf(t, [1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted")
-func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string, args ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.ElementsMatchf(t, listA, listB, msg, args...) {
- return
- }
- t.FailNow()
-}
-
-// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either
-// a slice or a channel with len == 0.
-//
-// assert.Empty(t, obj)
-func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.Empty(t, object, msgAndArgs...) {
- return
- }
- t.FailNow()
-}
-
-// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either
-// a slice or a channel with len == 0.
-//
-// assert.Emptyf(t, obj, "error message %s", "formatted")
-func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.Emptyf(t, object, msg, args...) {
- return
- }
- t.FailNow()
-}
-
-// Equal asserts that two objects are equal.
-//
-// assert.Equal(t, 123, 123)
-//
-// Pointer variable equality is determined based on the equality of the
-// referenced values (as opposed to the memory addresses). Function equality
-// cannot be determined and will always fail.
-func Equal(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.Equal(t, expected, actual, msgAndArgs...) {
- return
- }
- t.FailNow()
-}
-
-// EqualError asserts that a function returned an error (i.e. not `nil`)
-// and that it is equal to the provided error.
-//
-// actualObj, err := SomeFunction()
-// assert.EqualError(t, err, expectedErrorString)
-func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.EqualError(t, theError, errString, msgAndArgs...) {
- return
- }
- t.FailNow()
-}
-
-// EqualErrorf asserts that a function returned an error (i.e. not `nil`)
-// and that it is equal to the provided error.
-//
-// actualObj, err := SomeFunction()
-// assert.EqualErrorf(t, err, expectedErrorString, "error message %s", "formatted")
-func EqualErrorf(t TestingT, theError error, errString string, msg string, args ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.EqualErrorf(t, theError, errString, msg, args...) {
- return
- }
- t.FailNow()
-}
-
-// EqualValues asserts that two objects are equal or convertable to the same types
-// and equal.
-//
-// assert.EqualValues(t, uint32(123), int32(123))
-func EqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.EqualValues(t, expected, actual, msgAndArgs...) {
- return
- }
- t.FailNow()
-}
-
-// EqualValuesf asserts that two objects are equal or convertable to the same types
-// and equal.
-//
-// assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted")
-func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.EqualValuesf(t, expected, actual, msg, args...) {
- return
- }
- t.FailNow()
-}
-
-// Equalf asserts that two objects are equal.
-//
-// assert.Equalf(t, 123, 123, "error message %s", "formatted")
-//
-// Pointer variable equality is determined based on the equality of the
-// referenced values (as opposed to the memory addresses). Function equality
-// cannot be determined and will always fail.
-func Equalf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.Equalf(t, expected, actual, msg, args...) {
- return
- }
- t.FailNow()
-}
-
-// Error asserts that a function returned an error (i.e. not `nil`).
-//
-// actualObj, err := SomeFunction()
-// if assert.Error(t, err) {
-// assert.Equal(t, expectedError, err)
-// }
-func Error(t TestingT, err error, msgAndArgs ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.Error(t, err, msgAndArgs...) {
- return
- }
- t.FailNow()
-}
-
-// ErrorAs asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value.
-// This is a wrapper for errors.As.
-func ErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.ErrorAs(t, err, target, msgAndArgs...) {
- return
- }
- t.FailNow()
-}
-
-// ErrorAsf asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value.
-// This is a wrapper for errors.As.
-func ErrorAsf(t TestingT, err error, target interface{}, msg string, args ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.ErrorAsf(t, err, target, msg, args...) {
- return
- }
- t.FailNow()
-}
-
-// ErrorContains asserts that a function returned an error (i.e. not `nil`)
-// and that the error contains the specified substring.
-//
-// actualObj, err := SomeFunction()
-// assert.ErrorContains(t, err, expectedErrorSubString)
-func ErrorContains(t TestingT, theError error, contains string, msgAndArgs ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.ErrorContains(t, theError, contains, msgAndArgs...) {
- return
- }
- t.FailNow()
-}
-
-// ErrorContainsf asserts that a function returned an error (i.e. not `nil`)
-// and that the error contains the specified substring.
-//
-// actualObj, err := SomeFunction()
-// assert.ErrorContainsf(t, err, expectedErrorSubString, "error message %s", "formatted")
-func ErrorContainsf(t TestingT, theError error, contains string, msg string, args ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.ErrorContainsf(t, theError, contains, msg, args...) {
- return
- }
- t.FailNow()
-}
-
-// ErrorIs asserts that at least one of the errors in err's chain matches target.
-// This is a wrapper for errors.Is.
-func ErrorIs(t TestingT, err error, target error, msgAndArgs ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.ErrorIs(t, err, target, msgAndArgs...) {
- return
- }
- t.FailNow()
-}
-
-// ErrorIsf asserts that at least one of the errors in err's chain matches target.
-// This is a wrapper for errors.Is.
-func ErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.ErrorIsf(t, err, target, msg, args...) {
- return
- }
- t.FailNow()
-}
-
-// Errorf asserts that a function returned an error (i.e. not `nil`).
-//
-// actualObj, err := SomeFunction()
-// if assert.Errorf(t, err, "error message %s", "formatted") {
-// assert.Equal(t, expectedErrorf, err)
-// }
-func Errorf(t TestingT, err error, msg string, args ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.Errorf(t, err, msg, args...) {
- return
- }
- t.FailNow()
-}
-
-// Eventually asserts that given condition will be met in waitFor time,
-// periodically checking target function each tick.
-//
-// assert.Eventually(t, func() bool { return true; }, time.Second, 10*time.Millisecond)
-func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.Eventually(t, condition, waitFor, tick, msgAndArgs...) {
- return
- }
- t.FailNow()
-}
-
-// Eventuallyf asserts that given condition will be met in waitFor time,
-// periodically checking target function each tick.
-//
-// assert.Eventuallyf(t, func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
-func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.Eventuallyf(t, condition, waitFor, tick, msg, args...) {
- return
- }
- t.FailNow()
-}
-
-// Exactly asserts that two objects are equal in value and type.
-//
-// assert.Exactly(t, int32(123), int64(123))
-func Exactly(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.Exactly(t, expected, actual, msgAndArgs...) {
- return
- }
- t.FailNow()
-}
-
-// Exactlyf asserts that two objects are equal in value and type.
-//
-// assert.Exactlyf(t, int32(123), int64(123), "error message %s", "formatted")
-func Exactlyf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.Exactlyf(t, expected, actual, msg, args...) {
- return
- }
- t.FailNow()
-}
-
-// Fail reports a failure through
-func Fail(t TestingT, failureMessage string, msgAndArgs ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.Fail(t, failureMessage, msgAndArgs...) {
- return
- }
- t.FailNow()
-}
-
-// FailNow fails test
-func FailNow(t TestingT, failureMessage string, msgAndArgs ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.FailNow(t, failureMessage, msgAndArgs...) {
- return
- }
- t.FailNow()
-}
-
-// FailNowf fails test
-func FailNowf(t TestingT, failureMessage string, msg string, args ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.FailNowf(t, failureMessage, msg, args...) {
- return
- }
- t.FailNow()
-}
-
-// Failf reports a failure through
-func Failf(t TestingT, failureMessage string, msg string, args ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.Failf(t, failureMessage, msg, args...) {
- return
- }
- t.FailNow()
-}
-
-// False asserts that the specified value is false.
-//
-// assert.False(t, myBool)
-func False(t TestingT, value bool, msgAndArgs ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.False(t, value, msgAndArgs...) {
- return
- }
- t.FailNow()
-}
-
-// Falsef asserts that the specified value is false.
-//
-// assert.Falsef(t, myBool, "error message %s", "formatted")
-func Falsef(t TestingT, value bool, msg string, args ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.Falsef(t, value, msg, args...) {
- return
- }
- t.FailNow()
-}
-
-// FileExists checks whether a file exists in the given path. It also fails if
-// the path points to a directory or there is an error when trying to check the file.
-func FileExists(t TestingT, path string, msgAndArgs ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.FileExists(t, path, msgAndArgs...) {
- return
- }
- t.FailNow()
-}
-
-// FileExistsf checks whether a file exists in the given path. It also fails if
-// the path points to a directory or there is an error when trying to check the file.
-func FileExistsf(t TestingT, path string, msg string, args ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.FileExistsf(t, path, msg, args...) {
- return
- }
- t.FailNow()
-}
-
-// Greater asserts that the first element is greater than the second
-//
-// assert.Greater(t, 2, 1)
-// assert.Greater(t, float64(2), float64(1))
-// assert.Greater(t, "b", "a")
-func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.Greater(t, e1, e2, msgAndArgs...) {
- return
- }
- t.FailNow()
-}
-
-// GreaterOrEqual asserts that the first element is greater than or equal to the second
-//
-// assert.GreaterOrEqual(t, 2, 1)
-// assert.GreaterOrEqual(t, 2, 2)
-// assert.GreaterOrEqual(t, "b", "a")
-// assert.GreaterOrEqual(t, "b", "b")
-func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.GreaterOrEqual(t, e1, e2, msgAndArgs...) {
- return
- }
- t.FailNow()
-}
-
-// GreaterOrEqualf asserts that the first element is greater than or equal to the second
-//
-// assert.GreaterOrEqualf(t, 2, 1, "error message %s", "formatted")
-// assert.GreaterOrEqualf(t, 2, 2, "error message %s", "formatted")
-// assert.GreaterOrEqualf(t, "b", "a", "error message %s", "formatted")
-// assert.GreaterOrEqualf(t, "b", "b", "error message %s", "formatted")
-func GreaterOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.GreaterOrEqualf(t, e1, e2, msg, args...) {
- return
- }
- t.FailNow()
-}
-
-// Greaterf asserts that the first element is greater than the second
-//
-// assert.Greaterf(t, 2, 1, "error message %s", "formatted")
-// assert.Greaterf(t, float64(2), float64(1), "error message %s", "formatted")
-// assert.Greaterf(t, "b", "a", "error message %s", "formatted")
-func Greaterf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.Greaterf(t, e1, e2, msg, args...) {
- return
- }
- t.FailNow()
-}
-
-// HTTPBodyContains asserts that a specified handler returns a
-// body that contains a string.
-//
-// assert.HTTPBodyContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
-//
-// Returns whether the assertion was successful (true) or not (false).
-func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.HTTPBodyContains(t, handler, method, url, values, str, msgAndArgs...) {
- return
- }
- t.FailNow()
-}
-
-// HTTPBodyContainsf asserts that a specified handler returns a
-// body that contains a string.
-//
-// assert.HTTPBodyContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
-//
-// Returns whether the assertion was successful (true) or not (false).
-func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.HTTPBodyContainsf(t, handler, method, url, values, str, msg, args...) {
- return
- }
- t.FailNow()
-}
-
-// HTTPBodyNotContains asserts that a specified handler returns a
-// body that does not contain a string.
-//
-// assert.HTTPBodyNotContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
-//
-// Returns whether the assertion was successful (true) or not (false).
-func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.HTTPBodyNotContains(t, handler, method, url, values, str, msgAndArgs...) {
- return
- }
- t.FailNow()
-}
-
-// HTTPBodyNotContainsf asserts that a specified handler returns a
-// body that does not contain a string.
-//
-// assert.HTTPBodyNotContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
-//
-// Returns whether the assertion was successful (true) or not (false).
-func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.HTTPBodyNotContainsf(t, handler, method, url, values, str, msg, args...) {
- return
- }
- t.FailNow()
-}
-
-// HTTPError asserts that a specified handler returns an error status code.
-//
-// assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
-//
-// Returns whether the assertion was successful (true) or not (false).
-func HTTPError(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.HTTPError(t, handler, method, url, values, msgAndArgs...) {
- return
- }
- t.FailNow()
-}
-
-// HTTPErrorf asserts that a specified handler returns an error status code.
-//
-// assert.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
-//
-// Returns whether the assertion was successful (true) or not (false).
-func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.HTTPErrorf(t, handler, method, url, values, msg, args...) {
- return
- }
- t.FailNow()
-}
-
-// HTTPRedirect asserts that a specified handler returns a redirect status code.
-//
-// assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
-//
-// Returns whether the assertion was successful (true) or not (false).
-func HTTPRedirect(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.HTTPRedirect(t, handler, method, url, values, msgAndArgs...) {
- return
- }
- t.FailNow()
-}
-
-// HTTPRedirectf asserts that a specified handler returns a redirect status code.
-//
-// assert.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
-//
-// Returns whether the assertion was successful (true) or not (false).
-func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.HTTPRedirectf(t, handler, method, url, values, msg, args...) {
- return
- }
- t.FailNow()
-}
-
-// HTTPStatusCode asserts that a specified handler returns a specified status code.
-//
-// assert.HTTPStatusCode(t, myHandler, "GET", "/notImplemented", nil, 501)
-//
-// Returns whether the assertion was successful (true) or not (false).
-func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msgAndArgs ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.HTTPStatusCode(t, handler, method, url, values, statuscode, msgAndArgs...) {
- return
- }
- t.FailNow()
-}
-
-// HTTPStatusCodef asserts that a specified handler returns a specified status code.
-//
-// assert.HTTPStatusCodef(t, myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted")
-//
-// Returns whether the assertion was successful (true) or not (false).
-func HTTPStatusCodef(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msg string, args ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.HTTPStatusCodef(t, handler, method, url, values, statuscode, msg, args...) {
- return
- }
- t.FailNow()
-}
-
-// HTTPSuccess asserts that a specified handler returns a success status code.
-//
-// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil)
-//
-// Returns whether the assertion was successful (true) or not (false).
-func HTTPSuccess(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.HTTPSuccess(t, handler, method, url, values, msgAndArgs...) {
- return
- }
- t.FailNow()
-}
-
-// HTTPSuccessf asserts that a specified handler returns a success status code.
-//
-// assert.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted")
-//
-// Returns whether the assertion was successful (true) or not (false).
-func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.HTTPSuccessf(t, handler, method, url, values, msg, args...) {
- return
- }
- t.FailNow()
-}
-
-// Implements asserts that an object is implemented by the specified interface.
-//
-// assert.Implements(t, (*MyInterface)(nil), new(MyObject))
-func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.Implements(t, interfaceObject, object, msgAndArgs...) {
- return
- }
- t.FailNow()
-}
-
-// Implementsf asserts that an object is implemented by the specified interface.
-//
-// assert.Implementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted")
-func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.Implementsf(t, interfaceObject, object, msg, args...) {
- return
- }
- t.FailNow()
-}
-
-// InDelta asserts that the two numerals are within delta of each other.
-//
-// assert.InDelta(t, math.Pi, 22/7.0, 0.01)
-func InDelta(t TestingT, expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.InDelta(t, expected, actual, delta, msgAndArgs...) {
- return
- }
- t.FailNow()
-}
-
-// InDeltaMapValues is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys.
-func InDeltaMapValues(t TestingT, expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.InDeltaMapValues(t, expected, actual, delta, msgAndArgs...) {
- return
- }
- t.FailNow()
-}
-
-// InDeltaMapValuesf is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys.
-func InDeltaMapValuesf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.InDeltaMapValuesf(t, expected, actual, delta, msg, args...) {
- return
- }
- t.FailNow()
-}
-
-// InDeltaSlice is the same as InDelta, except it compares two slices.
-func InDeltaSlice(t TestingT, expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.InDeltaSlice(t, expected, actual, delta, msgAndArgs...) {
- return
- }
- t.FailNow()
-}
-
-// InDeltaSlicef is the same as InDelta, except it compares two slices.
-func InDeltaSlicef(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.InDeltaSlicef(t, expected, actual, delta, msg, args...) {
- return
- }
- t.FailNow()
-}
-
-// InDeltaf asserts that the two numerals are within delta of each other.
-//
-// assert.InDeltaf(t, math.Pi, 22/7.0, 0.01, "error message %s", "formatted")
-func InDeltaf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.InDeltaf(t, expected, actual, delta, msg, args...) {
- return
- }
- t.FailNow()
-}
-
-// InEpsilon asserts that expected and actual have a relative error less than epsilon
-func InEpsilon(t TestingT, expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.InEpsilon(t, expected, actual, epsilon, msgAndArgs...) {
- return
- }
- t.FailNow()
-}
-
-// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices.
-func InEpsilonSlice(t TestingT, expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.InEpsilonSlice(t, expected, actual, epsilon, msgAndArgs...) {
- return
- }
- t.FailNow()
-}
-
-// InEpsilonSlicef is the same as InEpsilon, except it compares each value from two slices.
-func InEpsilonSlicef(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.InEpsilonSlicef(t, expected, actual, epsilon, msg, args...) {
- return
- }
- t.FailNow()
-}
-
-// InEpsilonf asserts that expected and actual have a relative error less than epsilon
-func InEpsilonf(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.InEpsilonf(t, expected, actual, epsilon, msg, args...) {
- return
- }
- t.FailNow()
-}
-
-// IsDecreasing asserts that the collection is decreasing
-//
-// assert.IsDecreasing(t, []int{2, 1, 0})
-// assert.IsDecreasing(t, []float{2, 1})
-// assert.IsDecreasing(t, []string{"b", "a"})
-func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.IsDecreasing(t, object, msgAndArgs...) {
- return
- }
- t.FailNow()
-}
-
-// IsDecreasingf asserts that the collection is decreasing
-//
-// assert.IsDecreasingf(t, []int{2, 1, 0}, "error message %s", "formatted")
-// assert.IsDecreasingf(t, []float{2, 1}, "error message %s", "formatted")
-// assert.IsDecreasingf(t, []string{"b", "a"}, "error message %s", "formatted")
-func IsDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.IsDecreasingf(t, object, msg, args...) {
- return
- }
- t.FailNow()
-}
-
-// IsIncreasing asserts that the collection is increasing
-//
-// assert.IsIncreasing(t, []int{1, 2, 3})
-// assert.IsIncreasing(t, []float{1, 2})
-// assert.IsIncreasing(t, []string{"a", "b"})
-func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.IsIncreasing(t, object, msgAndArgs...) {
- return
- }
- t.FailNow()
-}
-
-// IsIncreasingf asserts that the collection is increasing
-//
-// assert.IsIncreasingf(t, []int{1, 2, 3}, "error message %s", "formatted")
-// assert.IsIncreasingf(t, []float{1, 2}, "error message %s", "formatted")
-// assert.IsIncreasingf(t, []string{"a", "b"}, "error message %s", "formatted")
-func IsIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.IsIncreasingf(t, object, msg, args...) {
- return
- }
- t.FailNow()
-}
-
-// IsNonDecreasing asserts that the collection is not decreasing
-//
-// assert.IsNonDecreasing(t, []int{1, 1, 2})
-// assert.IsNonDecreasing(t, []float{1, 2})
-// assert.IsNonDecreasing(t, []string{"a", "b"})
-func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.IsNonDecreasing(t, object, msgAndArgs...) {
- return
- }
- t.FailNow()
-}
-
-// IsNonDecreasingf asserts that the collection is not decreasing
-//
-// assert.IsNonDecreasingf(t, []int{1, 1, 2}, "error message %s", "formatted")
-// assert.IsNonDecreasingf(t, []float{1, 2}, "error message %s", "formatted")
-// assert.IsNonDecreasingf(t, []string{"a", "b"}, "error message %s", "formatted")
-func IsNonDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.IsNonDecreasingf(t, object, msg, args...) {
- return
- }
- t.FailNow()
-}
-
-// IsNonIncreasing asserts that the collection is not increasing
-//
-// assert.IsNonIncreasing(t, []int{2, 1, 1})
-// assert.IsNonIncreasing(t, []float{2, 1})
-// assert.IsNonIncreasing(t, []string{"b", "a"})
-func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.IsNonIncreasing(t, object, msgAndArgs...) {
- return
- }
- t.FailNow()
-}
-
-// IsNonIncreasingf asserts that the collection is not increasing
-//
-// assert.IsNonIncreasingf(t, []int{2, 1, 1}, "error message %s", "formatted")
-// assert.IsNonIncreasingf(t, []float{2, 1}, "error message %s", "formatted")
-// assert.IsNonIncreasingf(t, []string{"b", "a"}, "error message %s", "formatted")
-func IsNonIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.IsNonIncreasingf(t, object, msg, args...) {
- return
- }
- t.FailNow()
-}
-
-// IsType asserts that the specified objects are of the same type.
-func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.IsType(t, expectedType, object, msgAndArgs...) {
- return
- }
- t.FailNow()
-}
-
-// IsTypef asserts that the specified objects are of the same type.
-func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.IsTypef(t, expectedType, object, msg, args...) {
- return
- }
- t.FailNow()
-}
-
-// JSONEq asserts that two JSON strings are equivalent.
-//
-// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`)
-func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.JSONEq(t, expected, actual, msgAndArgs...) {
- return
- }
- t.FailNow()
-}
-
-// JSONEqf asserts that two JSON strings are equivalent.
-//
-// assert.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted")
-func JSONEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.JSONEqf(t, expected, actual, msg, args...) {
- return
- }
- t.FailNow()
-}
-
-// Len asserts that the specified object has specific length.
-// Len also fails if the object has a type that len() not accept.
-//
-// assert.Len(t, mySlice, 3)
-func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.Len(t, object, length, msgAndArgs...) {
- return
- }
- t.FailNow()
-}
-
-// Lenf asserts that the specified object has specific length.
-// Lenf also fails if the object has a type that len() not accept.
-//
-// assert.Lenf(t, mySlice, 3, "error message %s", "formatted")
-func Lenf(t TestingT, object interface{}, length int, msg string, args ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.Lenf(t, object, length, msg, args...) {
- return
- }
- t.FailNow()
-}
-
-// Less asserts that the first element is less than the second
-//
-// assert.Less(t, 1, 2)
-// assert.Less(t, float64(1), float64(2))
-// assert.Less(t, "a", "b")
-func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.Less(t, e1, e2, msgAndArgs...) {
- return
- }
- t.FailNow()
-}
-
-// LessOrEqual asserts that the first element is less than or equal to the second
-//
-// assert.LessOrEqual(t, 1, 2)
-// assert.LessOrEqual(t, 2, 2)
-// assert.LessOrEqual(t, "a", "b")
-// assert.LessOrEqual(t, "b", "b")
-func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.LessOrEqual(t, e1, e2, msgAndArgs...) {
- return
- }
- t.FailNow()
-}
-
-// LessOrEqualf asserts that the first element is less than or equal to the second
-//
-// assert.LessOrEqualf(t, 1, 2, "error message %s", "formatted")
-// assert.LessOrEqualf(t, 2, 2, "error message %s", "formatted")
-// assert.LessOrEqualf(t, "a", "b", "error message %s", "formatted")
-// assert.LessOrEqualf(t, "b", "b", "error message %s", "formatted")
-func LessOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.LessOrEqualf(t, e1, e2, msg, args...) {
- return
- }
- t.FailNow()
-}
-
-// Lessf asserts that the first element is less than the second
-//
-// assert.Lessf(t, 1, 2, "error message %s", "formatted")
-// assert.Lessf(t, float64(1), float64(2), "error message %s", "formatted")
-// assert.Lessf(t, "a", "b", "error message %s", "formatted")
-func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.Lessf(t, e1, e2, msg, args...) {
- return
- }
- t.FailNow()
-}
-
-// Negative asserts that the specified element is negative
-//
-// assert.Negative(t, -1)
-// assert.Negative(t, -1.23)
-func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.Negative(t, e, msgAndArgs...) {
- return
- }
- t.FailNow()
-}
-
-// Negativef asserts that the specified element is negative
-//
-// assert.Negativef(t, -1, "error message %s", "formatted")
-// assert.Negativef(t, -1.23, "error message %s", "formatted")
-func Negativef(t TestingT, e interface{}, msg string, args ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.Negativef(t, e, msg, args...) {
- return
- }
- t.FailNow()
-}
-
-// Never asserts that the given condition doesn't satisfy in waitFor time,
-// periodically checking the target function each tick.
-//
-// assert.Never(t, func() bool { return false; }, time.Second, 10*time.Millisecond)
-func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.Never(t, condition, waitFor, tick, msgAndArgs...) {
- return
- }
- t.FailNow()
-}
-
-// Neverf asserts that the given condition doesn't satisfy in waitFor time,
-// periodically checking the target function each tick.
-//
-// assert.Neverf(t, func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
-func Neverf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.Neverf(t, condition, waitFor, tick, msg, args...) {
- return
- }
- t.FailNow()
-}
-
-// Nil asserts that the specified object is nil.
-//
-// assert.Nil(t, err)
-func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.Nil(t, object, msgAndArgs...) {
- return
- }
- t.FailNow()
-}
-
-// Nilf asserts that the specified object is nil.
-//
-// assert.Nilf(t, err, "error message %s", "formatted")
-func Nilf(t TestingT, object interface{}, msg string, args ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.Nilf(t, object, msg, args...) {
- return
- }
- t.FailNow()
-}
-
-// NoDirExists checks whether a directory does not exist in the given path.
-// It fails if the path points to an existing _directory_ only.
-func NoDirExists(t TestingT, path string, msgAndArgs ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.NoDirExists(t, path, msgAndArgs...) {
- return
- }
- t.FailNow()
-}
-
-// NoDirExistsf checks whether a directory does not exist in the given path.
-// It fails if the path points to an existing _directory_ only.
-func NoDirExistsf(t TestingT, path string, msg string, args ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.NoDirExistsf(t, path, msg, args...) {
- return
- }
- t.FailNow()
-}
-
-// NoError asserts that a function returned no error (i.e. `nil`).
-//
-// actualObj, err := SomeFunction()
-// if assert.NoError(t, err) {
-// assert.Equal(t, expectedObj, actualObj)
-// }
-func NoError(t TestingT, err error, msgAndArgs ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.NoError(t, err, msgAndArgs...) {
- return
- }
- t.FailNow()
-}
-
-// NoErrorf asserts that a function returned no error (i.e. `nil`).
-//
-// actualObj, err := SomeFunction()
-// if assert.NoErrorf(t, err, "error message %s", "formatted") {
-// assert.Equal(t, expectedObj, actualObj)
-// }
-func NoErrorf(t TestingT, err error, msg string, args ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.NoErrorf(t, err, msg, args...) {
- return
- }
- t.FailNow()
-}
-
-// NoFileExists checks whether a file does not exist in a given path. It fails
-// if the path points to an existing _file_ only.
-func NoFileExists(t TestingT, path string, msgAndArgs ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.NoFileExists(t, path, msgAndArgs...) {
- return
- }
- t.FailNow()
-}
-
-// NoFileExistsf checks whether a file does not exist in a given path. It fails
-// if the path points to an existing _file_ only.
-func NoFileExistsf(t TestingT, path string, msg string, args ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.NoFileExistsf(t, path, msg, args...) {
- return
- }
- t.FailNow()
-}
-
-// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the
-// specified substring or element.
-//
-// assert.NotContains(t, "Hello World", "Earth")
-// assert.NotContains(t, ["Hello", "World"], "Earth")
-// assert.NotContains(t, {"Hello": "World"}, "Earth")
-func NotContains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.NotContains(t, s, contains, msgAndArgs...) {
- return
- }
- t.FailNow()
-}
-
-// NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the
-// specified substring or element.
-//
-// assert.NotContainsf(t, "Hello World", "Earth", "error message %s", "formatted")
-// assert.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted")
-// assert.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted")
-func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.NotContainsf(t, s, contains, msg, args...) {
- return
- }
- t.FailNow()
-}
-
-// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
-// a slice or a channel with len == 0.
-//
-// if assert.NotEmpty(t, obj) {
-// assert.Equal(t, "two", obj[1])
-// }
-func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.NotEmpty(t, object, msgAndArgs...) {
- return
- }
- t.FailNow()
-}
-
-// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
-// a slice or a channel with len == 0.
-//
-// if assert.NotEmptyf(t, obj, "error message %s", "formatted") {
-// assert.Equal(t, "two", obj[1])
-// }
-func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.NotEmptyf(t, object, msg, args...) {
- return
- }
- t.FailNow()
-}
-
-// NotEqual asserts that the specified values are NOT equal.
-//
-// assert.NotEqual(t, obj1, obj2)
-//
-// Pointer variable equality is determined based on the equality of the
-// referenced values (as opposed to the memory addresses).
-func NotEqual(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.NotEqual(t, expected, actual, msgAndArgs...) {
- return
- }
- t.FailNow()
-}
-
-// NotEqualValues asserts that two objects are not equal even when converted to the same type
-//
-// assert.NotEqualValues(t, obj1, obj2)
-func NotEqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.NotEqualValues(t, expected, actual, msgAndArgs...) {
- return
- }
- t.FailNow()
-}
-
-// NotEqualValuesf asserts that two objects are not equal even when converted to the same type
-//
-// assert.NotEqualValuesf(t, obj1, obj2, "error message %s", "formatted")
-func NotEqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.NotEqualValuesf(t, expected, actual, msg, args...) {
- return
- }
- t.FailNow()
-}
-
-// NotEqualf asserts that the specified values are NOT equal.
-//
-// assert.NotEqualf(t, obj1, obj2, "error message %s", "formatted")
-//
-// Pointer variable equality is determined based on the equality of the
-// referenced values (as opposed to the memory addresses).
-func NotEqualf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.NotEqualf(t, expected, actual, msg, args...) {
- return
- }
- t.FailNow()
-}
-
-// NotErrorIs asserts that at none of the errors in err's chain matches target.
-// This is a wrapper for errors.Is.
-func NotErrorIs(t TestingT, err error, target error, msgAndArgs ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.NotErrorIs(t, err, target, msgAndArgs...) {
- return
- }
- t.FailNow()
-}
-
-// NotErrorIsf asserts that at none of the errors in err's chain matches target.
-// This is a wrapper for errors.Is.
-func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.NotErrorIsf(t, err, target, msg, args...) {
- return
- }
- t.FailNow()
-}
-
-// NotNil asserts that the specified object is not nil.
-//
-// assert.NotNil(t, err)
-func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.NotNil(t, object, msgAndArgs...) {
- return
- }
- t.FailNow()
-}
-
-// NotNilf asserts that the specified object is not nil.
-//
-// assert.NotNilf(t, err, "error message %s", "formatted")
-func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.NotNilf(t, object, msg, args...) {
- return
- }
- t.FailNow()
-}
-
-// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic.
-//
-// assert.NotPanics(t, func(){ RemainCalm() })
-func NotPanics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.NotPanics(t, f, msgAndArgs...) {
- return
- }
- t.FailNow()
-}
-
-// NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic.
-//
-// assert.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted")
-func NotPanicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.NotPanicsf(t, f, msg, args...) {
- return
- }
- t.FailNow()
-}
-
-// NotRegexp asserts that a specified regexp does not match a string.
-//
-// assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting")
-// assert.NotRegexp(t, "^start", "it's not starting")
-func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.NotRegexp(t, rx, str, msgAndArgs...) {
- return
- }
- t.FailNow()
-}
-
-// NotRegexpf asserts that a specified regexp does not match a string.
-//
-// assert.NotRegexpf(t, regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted")
-// assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted")
-func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.NotRegexpf(t, rx, str, msg, args...) {
- return
- }
- t.FailNow()
-}
-
-// NotSame asserts that two pointers do not reference the same object.
-//
-// assert.NotSame(t, ptr1, ptr2)
-//
-// Both arguments must be pointer variables. Pointer variable sameness is
-// determined based on the equality of both type and value.
-func NotSame(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.NotSame(t, expected, actual, msgAndArgs...) {
- return
- }
- t.FailNow()
-}
-
-// NotSamef asserts that two pointers do not reference the same object.
-//
-// assert.NotSamef(t, ptr1, ptr2, "error message %s", "formatted")
-//
-// Both arguments must be pointer variables. Pointer variable sameness is
-// determined based on the equality of both type and value.
-func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.NotSamef(t, expected, actual, msg, args...) {
- return
- }
- t.FailNow()
-}
-
-// NotSubset asserts that the specified list(array, slice...) contains not all
-// elements given in the specified subset(array, slice...).
-//
-// assert.NotSubset(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]")
-func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.NotSubset(t, list, subset, msgAndArgs...) {
- return
- }
- t.FailNow()
-}
-
-// NotSubsetf asserts that the specified list(array, slice...) contains not all
-// elements given in the specified subset(array, slice...).
-//
-// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted")
-func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.NotSubsetf(t, list, subset, msg, args...) {
- return
- }
- t.FailNow()
-}
-
-// NotZero asserts that i is not the zero value for its type.
-func NotZero(t TestingT, i interface{}, msgAndArgs ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.NotZero(t, i, msgAndArgs...) {
- return
- }
- t.FailNow()
-}
-
-// NotZerof asserts that i is not the zero value for its type.
-func NotZerof(t TestingT, i interface{}, msg string, args ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.NotZerof(t, i, msg, args...) {
- return
- }
- t.FailNow()
-}
-
-// Panics asserts that the code inside the specified PanicTestFunc panics.
-//
-// assert.Panics(t, func(){ GoCrazy() })
-func Panics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.Panics(t, f, msgAndArgs...) {
- return
- }
- t.FailNow()
-}
-
-// PanicsWithError asserts that the code inside the specified PanicTestFunc
-// panics, and that the recovered panic value is an error that satisfies the
-// EqualError comparison.
-//
-// assert.PanicsWithError(t, "crazy error", func(){ GoCrazy() })
-func PanicsWithError(t TestingT, errString string, f assert.PanicTestFunc, msgAndArgs ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.PanicsWithError(t, errString, f, msgAndArgs...) {
- return
- }
- t.FailNow()
-}
-
-// PanicsWithErrorf asserts that the code inside the specified PanicTestFunc
-// panics, and that the recovered panic value is an error that satisfies the
-// EqualError comparison.
-//
-// assert.PanicsWithErrorf(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
-func PanicsWithErrorf(t TestingT, errString string, f assert.PanicTestFunc, msg string, args ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.PanicsWithErrorf(t, errString, f, msg, args...) {
- return
- }
- t.FailNow()
-}
-
-// PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that
-// the recovered panic value equals the expected panic value.
-//
-// assert.PanicsWithValue(t, "crazy error", func(){ GoCrazy() })
-func PanicsWithValue(t TestingT, expected interface{}, f assert.PanicTestFunc, msgAndArgs ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.PanicsWithValue(t, expected, f, msgAndArgs...) {
- return
- }
- t.FailNow()
-}
-
-// PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that
-// the recovered panic value equals the expected panic value.
-//
-// assert.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
-func PanicsWithValuef(t TestingT, expected interface{}, f assert.PanicTestFunc, msg string, args ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.PanicsWithValuef(t, expected, f, msg, args...) {
- return
- }
- t.FailNow()
-}
-
-// Panicsf asserts that the code inside the specified PanicTestFunc panics.
-//
-// assert.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted")
-func Panicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.Panicsf(t, f, msg, args...) {
- return
- }
- t.FailNow()
-}
-
-// Positive asserts that the specified element is positive
-//
-// assert.Positive(t, 1)
-// assert.Positive(t, 1.23)
-func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.Positive(t, e, msgAndArgs...) {
- return
- }
- t.FailNow()
-}
-
-// Positivef asserts that the specified element is positive
-//
-// assert.Positivef(t, 1, "error message %s", "formatted")
-// assert.Positivef(t, 1.23, "error message %s", "formatted")
-func Positivef(t TestingT, e interface{}, msg string, args ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.Positivef(t, e, msg, args...) {
- return
- }
- t.FailNow()
-}
-
-// Regexp asserts that a specified regexp matches a string.
-//
-// assert.Regexp(t, regexp.MustCompile("start"), "it's starting")
-// assert.Regexp(t, "start...$", "it's not starting")
-func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.Regexp(t, rx, str, msgAndArgs...) {
- return
- }
- t.FailNow()
-}
-
-// Regexpf asserts that a specified regexp matches a string.
-//
-// assert.Regexpf(t, regexp.MustCompile("start"), "it's starting", "error message %s", "formatted")
-// assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted")
-func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.Regexpf(t, rx, str, msg, args...) {
- return
- }
- t.FailNow()
-}
-
-// Same asserts that two pointers reference the same object.
-//
-// assert.Same(t, ptr1, ptr2)
-//
-// Both arguments must be pointer variables. Pointer variable sameness is
-// determined based on the equality of both type and value.
-func Same(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.Same(t, expected, actual, msgAndArgs...) {
- return
- }
- t.FailNow()
-}
-
-// Samef asserts that two pointers reference the same object.
-//
-// assert.Samef(t, ptr1, ptr2, "error message %s", "formatted")
-//
-// Both arguments must be pointer variables. Pointer variable sameness is
-// determined based on the equality of both type and value.
-func Samef(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.Samef(t, expected, actual, msg, args...) {
- return
- }
- t.FailNow()
-}
-
-// Subset asserts that the specified list(array, slice...) contains all
-// elements given in the specified subset(array, slice...).
-//
-// assert.Subset(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]")
-func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.Subset(t, list, subset, msgAndArgs...) {
- return
- }
- t.FailNow()
-}
-
-// Subsetf asserts that the specified list(array, slice...) contains all
-// elements given in the specified subset(array, slice...).
-//
-// assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted")
-func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.Subsetf(t, list, subset, msg, args...) {
- return
- }
- t.FailNow()
-}
-
-// True asserts that the specified value is true.
-//
-// assert.True(t, myBool)
-func True(t TestingT, value bool, msgAndArgs ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.True(t, value, msgAndArgs...) {
- return
- }
- t.FailNow()
-}
-
-// Truef asserts that the specified value is true.
-//
-// assert.Truef(t, myBool, "error message %s", "formatted")
-func Truef(t TestingT, value bool, msg string, args ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.Truef(t, value, msg, args...) {
- return
- }
- t.FailNow()
-}
-
-// WithinDuration asserts that the two times are within duration delta of each other.
-//
-// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second)
-func WithinDuration(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.WithinDuration(t, expected, actual, delta, msgAndArgs...) {
- return
- }
- t.FailNow()
-}
-
-// WithinDurationf asserts that the two times are within duration delta of each other.
-//
-// assert.WithinDurationf(t, time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted")
-func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.WithinDurationf(t, expected, actual, delta, msg, args...) {
- return
- }
- t.FailNow()
-}
-
-// YAMLEq asserts that two YAML strings are equivalent.
-func YAMLEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.YAMLEq(t, expected, actual, msgAndArgs...) {
- return
- }
- t.FailNow()
-}
-
-// YAMLEqf asserts that two YAML strings are equivalent.
-func YAMLEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.YAMLEqf(t, expected, actual, msg, args...) {
- return
- }
- t.FailNow()
-}
-
-// Zero asserts that i is the zero value for its type.
-func Zero(t TestingT, i interface{}, msgAndArgs ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.Zero(t, i, msgAndArgs...) {
- return
- }
- t.FailNow()
-}
-
-// Zerof asserts that i is the zero value for its type.
-func Zerof(t TestingT, i interface{}, msg string, args ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.Zerof(t, i, msg, args...) {
- return
- }
- t.FailNow()
-}
diff --git a/vendor/github.com/stretchr/testify/require/require.go.tmpl b/vendor/github.com/stretchr/testify/require/require.go.tmpl
deleted file mode 100644
index 55e42dde..00000000
--- a/vendor/github.com/stretchr/testify/require/require.go.tmpl
+++ /dev/null
@@ -1,6 +0,0 @@
-{{.Comment}}
-func {{.DocInfo.Name}}(t TestingT, {{.Params}}) {
- if h, ok := t.(tHelper); ok { h.Helper() }
- if assert.{{.DocInfo.Name}}(t, {{.ForwardedParams}}) { return }
- t.FailNow()
-}
diff --git a/vendor/github.com/stretchr/testify/require/require_forward.go b/vendor/github.com/stretchr/testify/require/require_forward.go
deleted file mode 100644
index 5bb07c89..00000000
--- a/vendor/github.com/stretchr/testify/require/require_forward.go
+++ /dev/null
@@ -1,1495 +0,0 @@
-/*
-* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen
-* THIS FILE MUST NOT BE EDITED BY HAND
- */
-
-package require
-
-import (
- assert "github.com/stretchr/testify/assert"
- http "net/http"
- url "net/url"
- time "time"
-)
-
-// Condition uses a Comparison to assert a complex condition.
-func (a *Assertions) Condition(comp assert.Comparison, msgAndArgs ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- Condition(a.t, comp, msgAndArgs...)
-}
-
-// Conditionf uses a Comparison to assert a complex condition.
-func (a *Assertions) Conditionf(comp assert.Comparison, msg string, args ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- Conditionf(a.t, comp, msg, args...)
-}
-
-// Contains asserts that the specified string, list(array, slice...) or map contains the
-// specified substring or element.
-//
-// a.Contains("Hello World", "World")
-// a.Contains(["Hello", "World"], "World")
-// a.Contains({"Hello": "World"}, "Hello")
-func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- Contains(a.t, s, contains, msgAndArgs...)
-}
-
-// Containsf asserts that the specified string, list(array, slice...) or map contains the
-// specified substring or element.
-//
-// a.Containsf("Hello World", "World", "error message %s", "formatted")
-// a.Containsf(["Hello", "World"], "World", "error message %s", "formatted")
-// a.Containsf({"Hello": "World"}, "Hello", "error message %s", "formatted")
-func (a *Assertions) Containsf(s interface{}, contains interface{}, msg string, args ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- Containsf(a.t, s, contains, msg, args...)
-}
-
-// DirExists checks whether a directory exists in the given path. It also fails
-// if the path is a file rather a directory or there is an error checking whether it exists.
-func (a *Assertions) DirExists(path string, msgAndArgs ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- DirExists(a.t, path, msgAndArgs...)
-}
-
-// DirExistsf checks whether a directory exists in the given path. It also fails
-// if the path is a file rather a directory or there is an error checking whether it exists.
-func (a *Assertions) DirExistsf(path string, msg string, args ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- DirExistsf(a.t, path, msg, args...)
-}
-
-// ElementsMatch asserts that the specified listA(array, slice...) is equal to specified
-// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
-// the number of appearances of each of them in both lists should match.
-//
-// a.ElementsMatch([1, 3, 2, 3], [1, 3, 3, 2])
-func (a *Assertions) ElementsMatch(listA interface{}, listB interface{}, msgAndArgs ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- ElementsMatch(a.t, listA, listB, msgAndArgs...)
-}
-
-// ElementsMatchf asserts that the specified listA(array, slice...) is equal to specified
-// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
-// the number of appearances of each of them in both lists should match.
-//
-// a.ElementsMatchf([1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted")
-func (a *Assertions) ElementsMatchf(listA interface{}, listB interface{}, msg string, args ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- ElementsMatchf(a.t, listA, listB, msg, args...)
-}
-
-// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either
-// a slice or a channel with len == 0.
-//
-// a.Empty(obj)
-func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- Empty(a.t, object, msgAndArgs...)
-}
-
-// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either
-// a slice or a channel with len == 0.
-//
-// a.Emptyf(obj, "error message %s", "formatted")
-func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- Emptyf(a.t, object, msg, args...)
-}
-
-// Equal asserts that two objects are equal.
-//
-// a.Equal(123, 123)
-//
-// Pointer variable equality is determined based on the equality of the
-// referenced values (as opposed to the memory addresses). Function equality
-// cannot be determined and will always fail.
-func (a *Assertions) Equal(expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- Equal(a.t, expected, actual, msgAndArgs...)
-}
-
-// EqualError asserts that a function returned an error (i.e. not `nil`)
-// and that it is equal to the provided error.
-//
-// actualObj, err := SomeFunction()
-// a.EqualError(err, expectedErrorString)
-func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- EqualError(a.t, theError, errString, msgAndArgs...)
-}
-
-// EqualErrorf asserts that a function returned an error (i.e. not `nil`)
-// and that it is equal to the provided error.
-//
-// actualObj, err := SomeFunction()
-// a.EqualErrorf(err, expectedErrorString, "error message %s", "formatted")
-func (a *Assertions) EqualErrorf(theError error, errString string, msg string, args ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- EqualErrorf(a.t, theError, errString, msg, args...)
-}
-
-// EqualValues asserts that two objects are equal or convertable to the same types
-// and equal.
-//
-// a.EqualValues(uint32(123), int32(123))
-func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- EqualValues(a.t, expected, actual, msgAndArgs...)
-}
-
-// EqualValuesf asserts that two objects are equal or convertable to the same types
-// and equal.
-//
-// a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted")
-func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- EqualValuesf(a.t, expected, actual, msg, args...)
-}
-
-// Equalf asserts that two objects are equal.
-//
-// a.Equalf(123, 123, "error message %s", "formatted")
-//
-// Pointer variable equality is determined based on the equality of the
-// referenced values (as opposed to the memory addresses). Function equality
-// cannot be determined and will always fail.
-func (a *Assertions) Equalf(expected interface{}, actual interface{}, msg string, args ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- Equalf(a.t, expected, actual, msg, args...)
-}
-
-// Error asserts that a function returned an error (i.e. not `nil`).
-//
-// actualObj, err := SomeFunction()
-// if a.Error(err) {
-// assert.Equal(t, expectedError, err)
-// }
-func (a *Assertions) Error(err error, msgAndArgs ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- Error(a.t, err, msgAndArgs...)
-}
-
-// ErrorAs asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value.
-// This is a wrapper for errors.As.
-func (a *Assertions) ErrorAs(err error, target interface{}, msgAndArgs ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- ErrorAs(a.t, err, target, msgAndArgs...)
-}
-
-// ErrorAsf asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value.
-// This is a wrapper for errors.As.
-func (a *Assertions) ErrorAsf(err error, target interface{}, msg string, args ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- ErrorAsf(a.t, err, target, msg, args...)
-}
-
-// ErrorContains asserts that a function returned an error (i.e. not `nil`)
-// and that the error contains the specified substring.
-//
-// actualObj, err := SomeFunction()
-// a.ErrorContains(err, expectedErrorSubString)
-func (a *Assertions) ErrorContains(theError error, contains string, msgAndArgs ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- ErrorContains(a.t, theError, contains, msgAndArgs...)
-}
-
-// ErrorContainsf asserts that a function returned an error (i.e. not `nil`)
-// and that the error contains the specified substring.
-//
-// actualObj, err := SomeFunction()
-// a.ErrorContainsf(err, expectedErrorSubString, "error message %s", "formatted")
-func (a *Assertions) ErrorContainsf(theError error, contains string, msg string, args ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- ErrorContainsf(a.t, theError, contains, msg, args...)
-}
-
-// ErrorIs asserts that at least one of the errors in err's chain matches target.
-// This is a wrapper for errors.Is.
-func (a *Assertions) ErrorIs(err error, target error, msgAndArgs ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- ErrorIs(a.t, err, target, msgAndArgs...)
-}
-
-// ErrorIsf asserts that at least one of the errors in err's chain matches target.
-// This is a wrapper for errors.Is.
-func (a *Assertions) ErrorIsf(err error, target error, msg string, args ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- ErrorIsf(a.t, err, target, msg, args...)
-}
-
-// Errorf asserts that a function returned an error (i.e. not `nil`).
-//
-// actualObj, err := SomeFunction()
-// if a.Errorf(err, "error message %s", "formatted") {
-// assert.Equal(t, expectedErrorf, err)
-// }
-func (a *Assertions) Errorf(err error, msg string, args ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- Errorf(a.t, err, msg, args...)
-}
-
-// Eventually asserts that given condition will be met in waitFor time,
-// periodically checking target function each tick.
-//
-// a.Eventually(func() bool { return true; }, time.Second, 10*time.Millisecond)
-func (a *Assertions) Eventually(condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- Eventually(a.t, condition, waitFor, tick, msgAndArgs...)
-}
-
-// Eventuallyf asserts that given condition will be met in waitFor time,
-// periodically checking target function each tick.
-//
-// a.Eventuallyf(func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
-func (a *Assertions) Eventuallyf(condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- Eventuallyf(a.t, condition, waitFor, tick, msg, args...)
-}
-
-// Exactly asserts that two objects are equal in value and type.
-//
-// a.Exactly(int32(123), int64(123))
-func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- Exactly(a.t, expected, actual, msgAndArgs...)
-}
-
-// Exactlyf asserts that two objects are equal in value and type.
-//
-// a.Exactlyf(int32(123), int64(123), "error message %s", "formatted")
-func (a *Assertions) Exactlyf(expected interface{}, actual interface{}, msg string, args ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- Exactlyf(a.t, expected, actual, msg, args...)
-}
-
-// Fail reports a failure through
-func (a *Assertions) Fail(failureMessage string, msgAndArgs ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- Fail(a.t, failureMessage, msgAndArgs...)
-}
-
-// FailNow fails test
-func (a *Assertions) FailNow(failureMessage string, msgAndArgs ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- FailNow(a.t, failureMessage, msgAndArgs...)
-}
-
-// FailNowf fails test
-func (a *Assertions) FailNowf(failureMessage string, msg string, args ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- FailNowf(a.t, failureMessage, msg, args...)
-}
-
-// Failf reports a failure through
-func (a *Assertions) Failf(failureMessage string, msg string, args ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- Failf(a.t, failureMessage, msg, args...)
-}
-
-// False asserts that the specified value is false.
-//
-// a.False(myBool)
-func (a *Assertions) False(value bool, msgAndArgs ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- False(a.t, value, msgAndArgs...)
-}
-
-// Falsef asserts that the specified value is false.
-//
-// a.Falsef(myBool, "error message %s", "formatted")
-func (a *Assertions) Falsef(value bool, msg string, args ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- Falsef(a.t, value, msg, args...)
-}
-
-// FileExists checks whether a file exists in the given path. It also fails if
-// the path points to a directory or there is an error when trying to check the file.
-func (a *Assertions) FileExists(path string, msgAndArgs ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- FileExists(a.t, path, msgAndArgs...)
-}
-
-// FileExistsf checks whether a file exists in the given path. It also fails if
-// the path points to a directory or there is an error when trying to check the file.
-func (a *Assertions) FileExistsf(path string, msg string, args ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- FileExistsf(a.t, path, msg, args...)
-}
-
-// Greater asserts that the first element is greater than the second
-//
-// a.Greater(2, 1)
-// a.Greater(float64(2), float64(1))
-// a.Greater("b", "a")
-func (a *Assertions) Greater(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- Greater(a.t, e1, e2, msgAndArgs...)
-}
-
-// GreaterOrEqual asserts that the first element is greater than or equal to the second
-//
-// a.GreaterOrEqual(2, 1)
-// a.GreaterOrEqual(2, 2)
-// a.GreaterOrEqual("b", "a")
-// a.GreaterOrEqual("b", "b")
-func (a *Assertions) GreaterOrEqual(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- GreaterOrEqual(a.t, e1, e2, msgAndArgs...)
-}
-
-// GreaterOrEqualf asserts that the first element is greater than or equal to the second
-//
-// a.GreaterOrEqualf(2, 1, "error message %s", "formatted")
-// a.GreaterOrEqualf(2, 2, "error message %s", "formatted")
-// a.GreaterOrEqualf("b", "a", "error message %s", "formatted")
-// a.GreaterOrEqualf("b", "b", "error message %s", "formatted")
-func (a *Assertions) GreaterOrEqualf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- GreaterOrEqualf(a.t, e1, e2, msg, args...)
-}
-
-// Greaterf asserts that the first element is greater than the second
-//
-// a.Greaterf(2, 1, "error message %s", "formatted")
-// a.Greaterf(float64(2), float64(1), "error message %s", "formatted")
-// a.Greaterf("b", "a", "error message %s", "formatted")
-func (a *Assertions) Greaterf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- Greaterf(a.t, e1, e2, msg, args...)
-}
-
-// HTTPBodyContains asserts that a specified handler returns a
-// body that contains a string.
-//
-// a.HTTPBodyContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
-//
-// Returns whether the assertion was successful (true) or not (false).
-func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- HTTPBodyContains(a.t, handler, method, url, values, str, msgAndArgs...)
-}
-
-// HTTPBodyContainsf asserts that a specified handler returns a
-// body that contains a string.
-//
-// a.HTTPBodyContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
-//
-// Returns whether the assertion was successful (true) or not (false).
-func (a *Assertions) HTTPBodyContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- HTTPBodyContainsf(a.t, handler, method, url, values, str, msg, args...)
-}
-
-// HTTPBodyNotContains asserts that a specified handler returns a
-// body that does not contain a string.
-//
-// a.HTTPBodyNotContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
-//
-// Returns whether the assertion was successful (true) or not (false).
-func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- HTTPBodyNotContains(a.t, handler, method, url, values, str, msgAndArgs...)
-}
-
-// HTTPBodyNotContainsf asserts that a specified handler returns a
-// body that does not contain a string.
-//
-// a.HTTPBodyNotContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
-//
-// Returns whether the assertion was successful (true) or not (false).
-func (a *Assertions) HTTPBodyNotContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- HTTPBodyNotContainsf(a.t, handler, method, url, values, str, msg, args...)
-}
-
-// HTTPError asserts that a specified handler returns an error status code.
-//
-// a.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
-//
-// Returns whether the assertion was successful (true) or not (false).
-func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- HTTPError(a.t, handler, method, url, values, msgAndArgs...)
-}
-
-// HTTPErrorf asserts that a specified handler returns an error status code.
-//
-// a.HTTPErrorf(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
-//
-// Returns whether the assertion was successful (true) or not (false).
-func (a *Assertions) HTTPErrorf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- HTTPErrorf(a.t, handler, method, url, values, msg, args...)
-}
-
-// HTTPRedirect asserts that a specified handler returns a redirect status code.
-//
-// a.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
-//
-// Returns whether the assertion was successful (true) or not (false).
-func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- HTTPRedirect(a.t, handler, method, url, values, msgAndArgs...)
-}
-
-// HTTPRedirectf asserts that a specified handler returns a redirect status code.
-//
-// a.HTTPRedirectf(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
-//
-// Returns whether the assertion was successful (true) or not (false).
-func (a *Assertions) HTTPRedirectf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- HTTPRedirectf(a.t, handler, method, url, values, msg, args...)
-}
-
-// HTTPStatusCode asserts that a specified handler returns a specified status code.
-//
-// a.HTTPStatusCode(myHandler, "GET", "/notImplemented", nil, 501)
-//
-// Returns whether the assertion was successful (true) or not (false).
-func (a *Assertions) HTTPStatusCode(handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msgAndArgs ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- HTTPStatusCode(a.t, handler, method, url, values, statuscode, msgAndArgs...)
-}
-
-// HTTPStatusCodef asserts that a specified handler returns a specified status code.
-//
-// a.HTTPStatusCodef(myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted")
-//
-// Returns whether the assertion was successful (true) or not (false).
-func (a *Assertions) HTTPStatusCodef(handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msg string, args ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- HTTPStatusCodef(a.t, handler, method, url, values, statuscode, msg, args...)
-}
-
-// HTTPSuccess asserts that a specified handler returns a success status code.
-//
-// a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil)
-//
-// Returns whether the assertion was successful (true) or not (false).
-func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- HTTPSuccess(a.t, handler, method, url, values, msgAndArgs...)
-}
-
-// HTTPSuccessf asserts that a specified handler returns a success status code.
-//
-// a.HTTPSuccessf(myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted")
-//
-// Returns whether the assertion was successful (true) or not (false).
-func (a *Assertions) HTTPSuccessf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- HTTPSuccessf(a.t, handler, method, url, values, msg, args...)
-}
-
-// Implements asserts that an object is implemented by the specified interface.
-//
-// a.Implements((*MyInterface)(nil), new(MyObject))
-func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- Implements(a.t, interfaceObject, object, msgAndArgs...)
-}
-
-// Implementsf asserts that an object is implemented by the specified interface.
-//
-// a.Implementsf((*MyInterface)(nil), new(MyObject), "error message %s", "formatted")
-func (a *Assertions) Implementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- Implementsf(a.t, interfaceObject, object, msg, args...)
-}
-
-// InDelta asserts that the two numerals are within delta of each other.
-//
-// a.InDelta(math.Pi, 22/7.0, 0.01)
-func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- InDelta(a.t, expected, actual, delta, msgAndArgs...)
-}
-
-// InDeltaMapValues is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys.
-func (a *Assertions) InDeltaMapValues(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- InDeltaMapValues(a.t, expected, actual, delta, msgAndArgs...)
-}
-
-// InDeltaMapValuesf is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys.
-func (a *Assertions) InDeltaMapValuesf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- InDeltaMapValuesf(a.t, expected, actual, delta, msg, args...)
-}
-
-// InDeltaSlice is the same as InDelta, except it compares two slices.
-func (a *Assertions) InDeltaSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- InDeltaSlice(a.t, expected, actual, delta, msgAndArgs...)
-}
-
-// InDeltaSlicef is the same as InDelta, except it compares two slices.
-func (a *Assertions) InDeltaSlicef(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- InDeltaSlicef(a.t, expected, actual, delta, msg, args...)
-}
-
-// InDeltaf asserts that the two numerals are within delta of each other.
-//
-// a.InDeltaf(math.Pi, 22/7.0, 0.01, "error message %s", "formatted")
-func (a *Assertions) InDeltaf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- InDeltaf(a.t, expected, actual, delta, msg, args...)
-}
-
-// InEpsilon asserts that expected and actual have a relative error less than epsilon
-func (a *Assertions) InEpsilon(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- InEpsilon(a.t, expected, actual, epsilon, msgAndArgs...)
-}
-
-// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices.
-func (a *Assertions) InEpsilonSlice(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- InEpsilonSlice(a.t, expected, actual, epsilon, msgAndArgs...)
-}
-
-// InEpsilonSlicef is the same as InEpsilon, except it compares each value from two slices.
-func (a *Assertions) InEpsilonSlicef(expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- InEpsilonSlicef(a.t, expected, actual, epsilon, msg, args...)
-}
-
-// InEpsilonf asserts that expected and actual have a relative error less than epsilon
-func (a *Assertions) InEpsilonf(expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- InEpsilonf(a.t, expected, actual, epsilon, msg, args...)
-}
-
-// IsDecreasing asserts that the collection is decreasing
-//
-// a.IsDecreasing([]int{2, 1, 0})
-// a.IsDecreasing([]float{2, 1})
-// a.IsDecreasing([]string{"b", "a"})
-func (a *Assertions) IsDecreasing(object interface{}, msgAndArgs ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- IsDecreasing(a.t, object, msgAndArgs...)
-}
-
-// IsDecreasingf asserts that the collection is decreasing
-//
-// a.IsDecreasingf([]int{2, 1, 0}, "error message %s", "formatted")
-// a.IsDecreasingf([]float{2, 1}, "error message %s", "formatted")
-// a.IsDecreasingf([]string{"b", "a"}, "error message %s", "formatted")
-func (a *Assertions) IsDecreasingf(object interface{}, msg string, args ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- IsDecreasingf(a.t, object, msg, args...)
-}
-
-// IsIncreasing asserts that the collection is increasing
-//
-// a.IsIncreasing([]int{1, 2, 3})
-// a.IsIncreasing([]float{1, 2})
-// a.IsIncreasing([]string{"a", "b"})
-func (a *Assertions) IsIncreasing(object interface{}, msgAndArgs ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- IsIncreasing(a.t, object, msgAndArgs...)
-}
-
-// IsIncreasingf asserts that the collection is increasing
-//
-// a.IsIncreasingf([]int{1, 2, 3}, "error message %s", "formatted")
-// a.IsIncreasingf([]float{1, 2}, "error message %s", "formatted")
-// a.IsIncreasingf([]string{"a", "b"}, "error message %s", "formatted")
-func (a *Assertions) IsIncreasingf(object interface{}, msg string, args ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- IsIncreasingf(a.t, object, msg, args...)
-}
-
-// IsNonDecreasing asserts that the collection is not decreasing
-//
-// a.IsNonDecreasing([]int{1, 1, 2})
-// a.IsNonDecreasing([]float{1, 2})
-// a.IsNonDecreasing([]string{"a", "b"})
-func (a *Assertions) IsNonDecreasing(object interface{}, msgAndArgs ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- IsNonDecreasing(a.t, object, msgAndArgs...)
-}
-
-// IsNonDecreasingf asserts that the collection is not decreasing
-//
-// a.IsNonDecreasingf([]int{1, 1, 2}, "error message %s", "formatted")
-// a.IsNonDecreasingf([]float{1, 2}, "error message %s", "formatted")
-// a.IsNonDecreasingf([]string{"a", "b"}, "error message %s", "formatted")
-func (a *Assertions) IsNonDecreasingf(object interface{}, msg string, args ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- IsNonDecreasingf(a.t, object, msg, args...)
-}
-
-// IsNonIncreasing asserts that the collection is not increasing
-//
-// a.IsNonIncreasing([]int{2, 1, 1})
-// a.IsNonIncreasing([]float{2, 1})
-// a.IsNonIncreasing([]string{"b", "a"})
-func (a *Assertions) IsNonIncreasing(object interface{}, msgAndArgs ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- IsNonIncreasing(a.t, object, msgAndArgs...)
-}
-
-// IsNonIncreasingf asserts that the collection is not increasing
-//
-// a.IsNonIncreasingf([]int{2, 1, 1}, "error message %s", "formatted")
-// a.IsNonIncreasingf([]float{2, 1}, "error message %s", "formatted")
-// a.IsNonIncreasingf([]string{"b", "a"}, "error message %s", "formatted")
-func (a *Assertions) IsNonIncreasingf(object interface{}, msg string, args ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- IsNonIncreasingf(a.t, object, msg, args...)
-}
-
-// IsType asserts that the specified objects are of the same type.
-func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- IsType(a.t, expectedType, object, msgAndArgs...)
-}
-
-// IsTypef asserts that the specified objects are of the same type.
-func (a *Assertions) IsTypef(expectedType interface{}, object interface{}, msg string, args ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- IsTypef(a.t, expectedType, object, msg, args...)
-}
-
-// JSONEq asserts that two JSON strings are equivalent.
-//
-// a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`)
-func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- JSONEq(a.t, expected, actual, msgAndArgs...)
-}
-
-// JSONEqf asserts that two JSON strings are equivalent.
-//
-// a.JSONEqf(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted")
-func (a *Assertions) JSONEqf(expected string, actual string, msg string, args ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- JSONEqf(a.t, expected, actual, msg, args...)
-}
-
-// Len asserts that the specified object has specific length.
-// Len also fails if the object has a type that len() not accept.
-//
-// a.Len(mySlice, 3)
-func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- Len(a.t, object, length, msgAndArgs...)
-}
-
-// Lenf asserts that the specified object has specific length.
-// Lenf also fails if the object has a type that len() not accept.
-//
-// a.Lenf(mySlice, 3, "error message %s", "formatted")
-func (a *Assertions) Lenf(object interface{}, length int, msg string, args ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- Lenf(a.t, object, length, msg, args...)
-}
-
-// Less asserts that the first element is less than the second
-//
-// a.Less(1, 2)
-// a.Less(float64(1), float64(2))
-// a.Less("a", "b")
-func (a *Assertions) Less(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- Less(a.t, e1, e2, msgAndArgs...)
-}
-
-// LessOrEqual asserts that the first element is less than or equal to the second
-//
-// a.LessOrEqual(1, 2)
-// a.LessOrEqual(2, 2)
-// a.LessOrEqual("a", "b")
-// a.LessOrEqual("b", "b")
-func (a *Assertions) LessOrEqual(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- LessOrEqual(a.t, e1, e2, msgAndArgs...)
-}
-
-// LessOrEqualf asserts that the first element is less than or equal to the second
-//
-// a.LessOrEqualf(1, 2, "error message %s", "formatted")
-// a.LessOrEqualf(2, 2, "error message %s", "formatted")
-// a.LessOrEqualf("a", "b", "error message %s", "formatted")
-// a.LessOrEqualf("b", "b", "error message %s", "formatted")
-func (a *Assertions) LessOrEqualf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- LessOrEqualf(a.t, e1, e2, msg, args...)
-}
-
-// Lessf asserts that the first element is less than the second
-//
-// a.Lessf(1, 2, "error message %s", "formatted")
-// a.Lessf(float64(1), float64(2), "error message %s", "formatted")
-// a.Lessf("a", "b", "error message %s", "formatted")
-func (a *Assertions) Lessf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- Lessf(a.t, e1, e2, msg, args...)
-}
-
-// Negative asserts that the specified element is negative
-//
-// a.Negative(-1)
-// a.Negative(-1.23)
-func (a *Assertions) Negative(e interface{}, msgAndArgs ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- Negative(a.t, e, msgAndArgs...)
-}
-
-// Negativef asserts that the specified element is negative
-//
-// a.Negativef(-1, "error message %s", "formatted")
-// a.Negativef(-1.23, "error message %s", "formatted")
-func (a *Assertions) Negativef(e interface{}, msg string, args ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- Negativef(a.t, e, msg, args...)
-}
-
-// Never asserts that the given condition doesn't satisfy in waitFor time,
-// periodically checking the target function each tick.
-//
-// a.Never(func() bool { return false; }, time.Second, 10*time.Millisecond)
-func (a *Assertions) Never(condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- Never(a.t, condition, waitFor, tick, msgAndArgs...)
-}
-
-// Neverf asserts that the given condition doesn't satisfy in waitFor time,
-// periodically checking the target function each tick.
-//
-// a.Neverf(func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
-func (a *Assertions) Neverf(condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- Neverf(a.t, condition, waitFor, tick, msg, args...)
-}
-
-// Nil asserts that the specified object is nil.
-//
-// a.Nil(err)
-func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- Nil(a.t, object, msgAndArgs...)
-}
-
-// Nilf asserts that the specified object is nil.
-//
-// a.Nilf(err, "error message %s", "formatted")
-func (a *Assertions) Nilf(object interface{}, msg string, args ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- Nilf(a.t, object, msg, args...)
-}
-
-// NoDirExists checks whether a directory does not exist in the given path.
-// It fails if the path points to an existing _directory_ only.
-func (a *Assertions) NoDirExists(path string, msgAndArgs ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- NoDirExists(a.t, path, msgAndArgs...)
-}
-
-// NoDirExistsf checks whether a directory does not exist in the given path.
-// It fails if the path points to an existing _directory_ only.
-func (a *Assertions) NoDirExistsf(path string, msg string, args ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- NoDirExistsf(a.t, path, msg, args...)
-}
-
-// NoError asserts that a function returned no error (i.e. `nil`).
-//
-// actualObj, err := SomeFunction()
-// if a.NoError(err) {
-// assert.Equal(t, expectedObj, actualObj)
-// }
-func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- NoError(a.t, err, msgAndArgs...)
-}
-
-// NoErrorf asserts that a function returned no error (i.e. `nil`).
-//
-// actualObj, err := SomeFunction()
-// if a.NoErrorf(err, "error message %s", "formatted") {
-// assert.Equal(t, expectedObj, actualObj)
-// }
-func (a *Assertions) NoErrorf(err error, msg string, args ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- NoErrorf(a.t, err, msg, args...)
-}
-
-// NoFileExists checks whether a file does not exist in a given path. It fails
-// if the path points to an existing _file_ only.
-func (a *Assertions) NoFileExists(path string, msgAndArgs ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- NoFileExists(a.t, path, msgAndArgs...)
-}
-
-// NoFileExistsf checks whether a file does not exist in a given path. It fails
-// if the path points to an existing _file_ only.
-func (a *Assertions) NoFileExistsf(path string, msg string, args ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- NoFileExistsf(a.t, path, msg, args...)
-}
-
-// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the
-// specified substring or element.
-//
-// a.NotContains("Hello World", "Earth")
-// a.NotContains(["Hello", "World"], "Earth")
-// a.NotContains({"Hello": "World"}, "Earth")
-func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- NotContains(a.t, s, contains, msgAndArgs...)
-}
-
-// NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the
-// specified substring or element.
-//
-// a.NotContainsf("Hello World", "Earth", "error message %s", "formatted")
-// a.NotContainsf(["Hello", "World"], "Earth", "error message %s", "formatted")
-// a.NotContainsf({"Hello": "World"}, "Earth", "error message %s", "formatted")
-func (a *Assertions) NotContainsf(s interface{}, contains interface{}, msg string, args ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- NotContainsf(a.t, s, contains, msg, args...)
-}
-
-// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
-// a slice or a channel with len == 0.
-//
-// if a.NotEmpty(obj) {
-// assert.Equal(t, "two", obj[1])
-// }
-func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- NotEmpty(a.t, object, msgAndArgs...)
-}
-
-// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
-// a slice or a channel with len == 0.
-//
-// if a.NotEmptyf(obj, "error message %s", "formatted") {
-// assert.Equal(t, "two", obj[1])
-// }
-func (a *Assertions) NotEmptyf(object interface{}, msg string, args ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- NotEmptyf(a.t, object, msg, args...)
-}
-
-// NotEqual asserts that the specified values are NOT equal.
-//
-// a.NotEqual(obj1, obj2)
-//
-// Pointer variable equality is determined based on the equality of the
-// referenced values (as opposed to the memory addresses).
-func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- NotEqual(a.t, expected, actual, msgAndArgs...)
-}
-
-// NotEqualValues asserts that two objects are not equal even when converted to the same type
-//
-// a.NotEqualValues(obj1, obj2)
-func (a *Assertions) NotEqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- NotEqualValues(a.t, expected, actual, msgAndArgs...)
-}
-
-// NotEqualValuesf asserts that two objects are not equal even when converted to the same type
-//
-// a.NotEqualValuesf(obj1, obj2, "error message %s", "formatted")
-func (a *Assertions) NotEqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- NotEqualValuesf(a.t, expected, actual, msg, args...)
-}
-
-// NotEqualf asserts that the specified values are NOT equal.
-//
-// a.NotEqualf(obj1, obj2, "error message %s", "formatted")
-//
-// Pointer variable equality is determined based on the equality of the
-// referenced values (as opposed to the memory addresses).
-func (a *Assertions) NotEqualf(expected interface{}, actual interface{}, msg string, args ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- NotEqualf(a.t, expected, actual, msg, args...)
-}
-
-// NotErrorIs asserts that at none of the errors in err's chain matches target.
-// This is a wrapper for errors.Is.
-func (a *Assertions) NotErrorIs(err error, target error, msgAndArgs ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- NotErrorIs(a.t, err, target, msgAndArgs...)
-}
-
-// NotErrorIsf asserts that at none of the errors in err's chain matches target.
-// This is a wrapper for errors.Is.
-func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- NotErrorIsf(a.t, err, target, msg, args...)
-}
-
-// NotNil asserts that the specified object is not nil.
-//
-// a.NotNil(err)
-func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- NotNil(a.t, object, msgAndArgs...)
-}
-
-// NotNilf asserts that the specified object is not nil.
-//
-// a.NotNilf(err, "error message %s", "formatted")
-func (a *Assertions) NotNilf(object interface{}, msg string, args ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- NotNilf(a.t, object, msg, args...)
-}
-
-// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic.
-//
-// a.NotPanics(func(){ RemainCalm() })
-func (a *Assertions) NotPanics(f assert.PanicTestFunc, msgAndArgs ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- NotPanics(a.t, f, msgAndArgs...)
-}
-
-// NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic.
-//
-// a.NotPanicsf(func(){ RemainCalm() }, "error message %s", "formatted")
-func (a *Assertions) NotPanicsf(f assert.PanicTestFunc, msg string, args ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- NotPanicsf(a.t, f, msg, args...)
-}
-
-// NotRegexp asserts that a specified regexp does not match a string.
-//
-// a.NotRegexp(regexp.MustCompile("starts"), "it's starting")
-// a.NotRegexp("^start", "it's not starting")
-func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- NotRegexp(a.t, rx, str, msgAndArgs...)
-}
-
-// NotRegexpf asserts that a specified regexp does not match a string.
-//
-// a.NotRegexpf(regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted")
-// a.NotRegexpf("^start", "it's not starting", "error message %s", "formatted")
-func (a *Assertions) NotRegexpf(rx interface{}, str interface{}, msg string, args ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- NotRegexpf(a.t, rx, str, msg, args...)
-}
-
-// NotSame asserts that two pointers do not reference the same object.
-//
-// a.NotSame(ptr1, ptr2)
-//
-// Both arguments must be pointer variables. Pointer variable sameness is
-// determined based on the equality of both type and value.
-func (a *Assertions) NotSame(expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- NotSame(a.t, expected, actual, msgAndArgs...)
-}
-
-// NotSamef asserts that two pointers do not reference the same object.
-//
-// a.NotSamef(ptr1, ptr2, "error message %s", "formatted")
-//
-// Both arguments must be pointer variables. Pointer variable sameness is
-// determined based on the equality of both type and value.
-func (a *Assertions) NotSamef(expected interface{}, actual interface{}, msg string, args ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- NotSamef(a.t, expected, actual, msg, args...)
-}
-
-// NotSubset asserts that the specified list(array, slice...) contains not all
-// elements given in the specified subset(array, slice...).
-//
-// a.NotSubset([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]")
-func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- NotSubset(a.t, list, subset, msgAndArgs...)
-}
-
-// NotSubsetf asserts that the specified list(array, slice...) contains not all
-// elements given in the specified subset(array, slice...).
-//
-// a.NotSubsetf([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted")
-func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- NotSubsetf(a.t, list, subset, msg, args...)
-}
-
-// NotZero asserts that i is not the zero value for its type.
-func (a *Assertions) NotZero(i interface{}, msgAndArgs ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- NotZero(a.t, i, msgAndArgs...)
-}
-
-// NotZerof asserts that i is not the zero value for its type.
-func (a *Assertions) NotZerof(i interface{}, msg string, args ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- NotZerof(a.t, i, msg, args...)
-}
-
-// Panics asserts that the code inside the specified PanicTestFunc panics.
-//
-// a.Panics(func(){ GoCrazy() })
-func (a *Assertions) Panics(f assert.PanicTestFunc, msgAndArgs ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- Panics(a.t, f, msgAndArgs...)
-}
-
-// PanicsWithError asserts that the code inside the specified PanicTestFunc
-// panics, and that the recovered panic value is an error that satisfies the
-// EqualError comparison.
-//
-// a.PanicsWithError("crazy error", func(){ GoCrazy() })
-func (a *Assertions) PanicsWithError(errString string, f assert.PanicTestFunc, msgAndArgs ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- PanicsWithError(a.t, errString, f, msgAndArgs...)
-}
-
-// PanicsWithErrorf asserts that the code inside the specified PanicTestFunc
-// panics, and that the recovered panic value is an error that satisfies the
-// EqualError comparison.
-//
-// a.PanicsWithErrorf("crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
-func (a *Assertions) PanicsWithErrorf(errString string, f assert.PanicTestFunc, msg string, args ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- PanicsWithErrorf(a.t, errString, f, msg, args...)
-}
-
-// PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that
-// the recovered panic value equals the expected panic value.
-//
-// a.PanicsWithValue("crazy error", func(){ GoCrazy() })
-func (a *Assertions) PanicsWithValue(expected interface{}, f assert.PanicTestFunc, msgAndArgs ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- PanicsWithValue(a.t, expected, f, msgAndArgs...)
-}
-
-// PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that
-// the recovered panic value equals the expected panic value.
-//
-// a.PanicsWithValuef("crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
-func (a *Assertions) PanicsWithValuef(expected interface{}, f assert.PanicTestFunc, msg string, args ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- PanicsWithValuef(a.t, expected, f, msg, args...)
-}
-
-// Panicsf asserts that the code inside the specified PanicTestFunc panics.
-//
-// a.Panicsf(func(){ GoCrazy() }, "error message %s", "formatted")
-func (a *Assertions) Panicsf(f assert.PanicTestFunc, msg string, args ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- Panicsf(a.t, f, msg, args...)
-}
-
-// Positive asserts that the specified element is positive
-//
-// a.Positive(1)
-// a.Positive(1.23)
-func (a *Assertions) Positive(e interface{}, msgAndArgs ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- Positive(a.t, e, msgAndArgs...)
-}
-
-// Positivef asserts that the specified element is positive
-//
-// a.Positivef(1, "error message %s", "formatted")
-// a.Positivef(1.23, "error message %s", "formatted")
-func (a *Assertions) Positivef(e interface{}, msg string, args ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- Positivef(a.t, e, msg, args...)
-}
-
-// Regexp asserts that a specified regexp matches a string.
-//
-// a.Regexp(regexp.MustCompile("start"), "it's starting")
-// a.Regexp("start...$", "it's not starting")
-func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- Regexp(a.t, rx, str, msgAndArgs...)
-}
-
-// Regexpf asserts that a specified regexp matches a string.
-//
-// a.Regexpf(regexp.MustCompile("start"), "it's starting", "error message %s", "formatted")
-// a.Regexpf("start...$", "it's not starting", "error message %s", "formatted")
-func (a *Assertions) Regexpf(rx interface{}, str interface{}, msg string, args ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- Regexpf(a.t, rx, str, msg, args...)
-}
-
-// Same asserts that two pointers reference the same object.
-//
-// a.Same(ptr1, ptr2)
-//
-// Both arguments must be pointer variables. Pointer variable sameness is
-// determined based on the equality of both type and value.
-func (a *Assertions) Same(expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- Same(a.t, expected, actual, msgAndArgs...)
-}
-
-// Samef asserts that two pointers reference the same object.
-//
-// a.Samef(ptr1, ptr2, "error message %s", "formatted")
-//
-// Both arguments must be pointer variables. Pointer variable sameness is
-// determined based on the equality of both type and value.
-func (a *Assertions) Samef(expected interface{}, actual interface{}, msg string, args ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- Samef(a.t, expected, actual, msg, args...)
-}
-
-// Subset asserts that the specified list(array, slice...) contains all
-// elements given in the specified subset(array, slice...).
-//
-// a.Subset([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]")
-func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- Subset(a.t, list, subset, msgAndArgs...)
-}
-
-// Subsetf asserts that the specified list(array, slice...) contains all
-// elements given in the specified subset(array, slice...).
-//
-// a.Subsetf([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted")
-func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- Subsetf(a.t, list, subset, msg, args...)
-}
-
-// True asserts that the specified value is true.
-//
-// a.True(myBool)
-func (a *Assertions) True(value bool, msgAndArgs ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- True(a.t, value, msgAndArgs...)
-}
-
-// Truef asserts that the specified value is true.
-//
-// a.Truef(myBool, "error message %s", "formatted")
-func (a *Assertions) Truef(value bool, msg string, args ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- Truef(a.t, value, msg, args...)
-}
-
-// WithinDuration asserts that the two times are within duration delta of each other.
-//
-// a.WithinDuration(time.Now(), time.Now(), 10*time.Second)
-func (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- WithinDuration(a.t, expected, actual, delta, msgAndArgs...)
-}
-
-// WithinDurationf asserts that the two times are within duration delta of each other.
-//
-// a.WithinDurationf(time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted")
-func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- WithinDurationf(a.t, expected, actual, delta, msg, args...)
-}
-
-// YAMLEq asserts that two YAML strings are equivalent.
-func (a *Assertions) YAMLEq(expected string, actual string, msgAndArgs ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- YAMLEq(a.t, expected, actual, msgAndArgs...)
-}
-
-// YAMLEqf asserts that two YAML strings are equivalent.
-func (a *Assertions) YAMLEqf(expected string, actual string, msg string, args ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- YAMLEqf(a.t, expected, actual, msg, args...)
-}
-
-// Zero asserts that i is the zero value for its type.
-func (a *Assertions) Zero(i interface{}, msgAndArgs ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- Zero(a.t, i, msgAndArgs...)
-}
-
-// Zerof asserts that i is the zero value for its type.
-func (a *Assertions) Zerof(i interface{}, msg string, args ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- Zerof(a.t, i, msg, args...)
-}
diff --git a/vendor/github.com/stretchr/testify/require/require_forward.go.tmpl b/vendor/github.com/stretchr/testify/require/require_forward.go.tmpl
deleted file mode 100644
index 54124df1..00000000
--- a/vendor/github.com/stretchr/testify/require/require_forward.go.tmpl
+++ /dev/null
@@ -1,5 +0,0 @@
-{{.CommentWithoutT "a"}}
-func (a *Assertions) {{.DocInfo.Name}}({{.Params}}) {
- if h, ok := a.t.(tHelper); ok { h.Helper() }
- {{.DocInfo.Name}}(a.t, {{.ForwardedParams}})
-}
diff --git a/vendor/github.com/stretchr/testify/require/requirements.go b/vendor/github.com/stretchr/testify/require/requirements.go
deleted file mode 100644
index 91772dfe..00000000
--- a/vendor/github.com/stretchr/testify/require/requirements.go
+++ /dev/null
@@ -1,29 +0,0 @@
-package require
-
-// TestingT is an interface wrapper around *testing.T
-type TestingT interface {
- Errorf(format string, args ...interface{})
- FailNow()
-}
-
-type tHelper interface {
- Helper()
-}
-
-// ComparisonAssertionFunc is a common function prototype when comparing two values. Can be useful
-// for table driven tests.
-type ComparisonAssertionFunc func(TestingT, interface{}, interface{}, ...interface{})
-
-// ValueAssertionFunc is a common function prototype when validating a single value. Can be useful
-// for table driven tests.
-type ValueAssertionFunc func(TestingT, interface{}, ...interface{})
-
-// BoolAssertionFunc is a common function prototype when validating a bool value. Can be useful
-// for table driven tests.
-type BoolAssertionFunc func(TestingT, bool, ...interface{})
-
-// ErrorAssertionFunc is a common function prototype when validating an error value. Can be useful
-// for table driven tests.
-type ErrorAssertionFunc func(TestingT, error, ...interface{})
-
-//go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=require -template=require.go.tmpl -include-format-funcs"
diff --git a/vendor/github.com/timshannon/badgerhold/.gitignore b/vendor/github.com/timshannon/badgerhold/.gitignore
deleted file mode 100644
index f1c181ec..00000000
--- a/vendor/github.com/timshannon/badgerhold/.gitignore
+++ /dev/null
@@ -1,12 +0,0 @@
-# Binaries for programs and plugins
-*.exe
-*.exe~
-*.dll
-*.so
-*.dylib
-
-# Test binary, build with `go test -c`
-*.test
-
-# Output of the go coverage tool, specifically when used with LiteIDE
-*.out
diff --git a/vendor/github.com/timshannon/badgerhold/.travis.yml b/vendor/github.com/timshannon/badgerhold/.travis.yml
deleted file mode 100644
index 4c36a28c..00000000
--- a/vendor/github.com/timshannon/badgerhold/.travis.yml
+++ /dev/null
@@ -1,10 +0,0 @@
-language: go
-
-install:
- - go get ./...
- - go get golang.org/x/tools/cmd/cover
- - go get github.com/mattn/goveralls
-
-script:
- - go test -v -covermode=count -coverprofile=coverage.out
- - $HOME/gopath/bin/goveralls -coverprofile=coverage.out -service=travis-ci
diff --git a/vendor/github.com/timshannon/badgerhold/LICENSE b/vendor/github.com/timshannon/badgerhold/LICENSE
deleted file mode 100644
index 3a97d3bb..00000000
--- a/vendor/github.com/timshannon/badgerhold/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-MIT License
-
-Copyright (c) 2019 Tim Shannon
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
diff --git a/vendor/github.com/timshannon/badgerhold/README.md b/vendor/github.com/timshannon/badgerhold/README.md
deleted file mode 100644
index 6dc48392..00000000
--- a/vendor/github.com/timshannon/badgerhold/README.md
+++ /dev/null
@@ -1,264 +0,0 @@
-# BadgerHold
-
-[![Build Status](https://travis-ci.org/timshannon/badgerhold.svg?branch=master)](https://travis-ci.org/timshannon/badgerhold) [![GoDoc](https://godoc.org/github.com/timshannon/badgerhold?status.svg)](https://godoc.org/github.com/timshannon/badgerhold) [![Coverage Status](https://coveralls.io/repos/github/timshannon/badgerhold/badge.svg?branch=master)](https://coveralls.io/github/timshannon/badgerhold?branch=master) [![Go Report Card](https://goreportcard.com/badge/github.com/timshannon/badgerhold)](https://goreportcard.com/report/github.com/timshannon/badgerhold)
-
-BadgerHold is a simple querying and indexing layer on top of a [Badger](https://github.com/dgraph-io/badger) instance. The goal is to create a simple, higher level interface on top of Badger DB that simplifies dealing with Go Types and finding data, but exposes the underlying Badger DB for customizing as you wish. By default the encoding used is Gob, so feel free to use the GobEncoder/Decoder interface for faster serialization. Or, alternately, you can use any serialization you want by supplying encode / decode funcs to the `Options` struct on Open.
-
-One Go Type will be prefixed with it's type name, so you can store multiple types in a single Badger database with conflicts.
-
-This project is a rewrite of the [BoltHold](https://github.com/timshannon/bolthold) project on the Badger KV database instead of [Bolt](https://github.com/etcd-io/bbolt). For a performance comparison between bolt and badger, see https://blog.dgraph.io/post/badger-lmdb-boltdb/. I've written up my own comparison of the two focusing on characteristics _other_ than performance here: https://tech.townsourced.com/post/boltdb-vs-badger/.
-
-## Indexes
-
-Indexes allow you to skip checking any records that don't meet your index criteria. If you have 1000 records and only 10 of them are of the Division you want to deal with, then you don't need to check to see if the other 990 records match your query criteria if you create an index on the Division field. The downside of an index is added disk reads and writes on every write operation. For read heavy operations datasets, indexes can be very useful.
-
-In every BadgerHold store, there will be a reserved bucket _\_indexes_ which will be used to hold indexes that point back to another bucket's Key system. Indexes will be defined by setting the `badgerhold:"index"` struct tag on a field in a type.
-
-```Go
-type Person struct {
- Name string
- Division string `badgerhold:"index"`
-}
-
-// alternate struct tag if you wish to specify the index name
-type Person struct {
- Name string
- Division string `badgerholdIndex:"IdxDivision"`
-}
-```
-
-This means that there will be an index created for `Division` that will contain the set of unique divisions, and the main record keys they refer to.
-
-Optionally, you can implement the `Storer` interface, to specify your own indexes, rather than using the `badgerHoldIndex` struct tag.
-
-## Queries
-
-Queries are chain-able constructs that filters out any data that doesn't match it's criteria. An index will be used if the `.Index()` chain is called, otherwise BadgerHold won't use any index.
-
-Queries will look like this:
-
-```Go
-s.Find(&result, badgerhold.Where("FieldName").Eq(value).And("AnotherField").Lt(AnotherValue).Or(badgerhold.Where("FieldName").Eq(anotherValue)))
-```
-
-Fields must be exported, and thus always need to start with an upper-case letter. Available operators include:
-
-- Equal - `Where("field").Eq(value)`
-- Not Equal - `Where("field").Ne(value)`
-- Greater Than - `Where("field").Gt(value)`
-- Less Than - `Where("field").Lt(value)`
-- Less than or Equal To - `Where("field").Le(value)`
-- Greater Than or Equal To - `Where("field").Ge(value)`
-- In - `Where("field").In(val1, val2, val3)`
-- IsNil - `Where("field").IsNil()`
-- Regular Expression - `Where("field").RegExp(regexp.MustCompile("ea"))`
-- Matches Function - `Where("field").MatchFunc(func(ra *RecordAccess) (bool, error))`
-- Skip - `Where("field").Eq(value).Skip(10)`
-- Limit - `Where("field").Eq(value).Limit(10)`
-- SortBy - `Where("field").Eq(value).SortBy("field1", "field2")`
-- Reverse - `Where("field").Eq(value).SortBy("field").Reverse()`
-- Index - `Where("field").Eq(value).Index("indexName")`
-
-If you want to run a query's criteria against the Key value, you can use the `badgerhold.Key` constant:
-
-```Go
-store.Find(&result, badgerhold.Where(badgerhold.Key).Ne(value))
-```
-
-You can access nested structure fields in queries like this:
-
-```Go
-type Repo struct {
- Name string
- Contact ContactPerson
-}
-
-type ContactPerson struct {
- Name string
-}
-
-store.Find(&repo, badgerhold.Where("Contact.Name").Eq("some-name")
-```
-
-Instead of passing in a specific value to compare against in a query, you can compare against another field in the same struct. Consider the following struct:
-
-```Go
-type Person struct {
- Name string
- Birth time.Time
- Death time.Time
-}
-```
-
-If you wanted to find any invalid records where a Person's death was before their birth, you could do the following:
-
-```Go
-store.Find(&result, badgerhold.Where("Death").Lt(badgerhold.Field("Birth")))
-```
-
-Queries can be used in more than just selecting data. You can delete or update data that matches a query.
-
-Using the example above, if you wanted to remove all of the invalid records where Death < Birth:
-
-```Go
-// you must pass in a sample type, so BadgerHold knows which bucket to use and what indexes to update
-store.DeleteMatching(&Person{}, badgerhold.Where("Death").Lt(badgerhold.Field("Birth")))
-
-```
-
-Or if you wanted to update all the invalid records to flip/flop the Birth and Death dates:
-
-```Go
-
-store.UpdateMatching(&Person{}, badgerhold.Where("Death").Lt(badgerhold.Field("Birth")), func(record interface{}) error {
- update, ok := record.(*Person) // record will always be a pointer
- if !ok {
- return fmt.Errorf("Record isn't the correct type! Wanted Person, got %T", record)
- }
-
- update.Birth, update.Death = update.Death, update.Birth
-
- return nil
-})
-```
-
-### Keys in Structs
-
-A common scenario is to store the badgerhold Key in the same struct that is stored in the badgerDB value. You can automatically populate a record's Key in a struct by using the `badgerhold:"key"` struct tag when running `Find` queries.
-
-Another common scenario is to insert data with an auto-incrementing key assigned by the database. When performing an `Insert`, if the type of the key matches the type of the `badgerhold:"key"` tagged field, the data is passed in by reference, **and** the field's current value is the zero-value for that type, then it is set on the data _before_ insertion.
-
-```Go
-type Employee struct {
- ID uint64 `badgerhold:"key"`
- FirstName string
- LastName string
- Division string
- Hired time.Time
-}
-
-// old struct tag, currenty still supported but may be deprecated in the future
-type Employee struct {
- ID uint64 `badgerholdKey`
- FirstName string
- LastName string
- Division string
- Hired time.Time
-}
-```
-
-Badgerhold assumes only one of such struct tags exists. If a value already exists in the key field, it will be overwritten.
-
-If you want to insert an auto-incrementing Key you can pass the `badgerhold.NextSequence()` func as the Key value.
-
-```Go
-err := store.Insert(badgerhold.NextSequence(), data)
-```
-
-The key value will be a `uint64`.
-
-If you want to know the value of the auto-incrementing Key that was generated using `badgerhold.NextSequence()`, then make sure to pass a pointer to your data and that the `badgerholdKey` tagged field is of type `uint64`.
-
-```Go
-err := store.Insert(badgerhold.NextSequence(), &data)
-```
-
-### Unique Constraints
-
-You can create a unique constraint on a given field by using the `badgerhold:"unique"` struct tag:
-
-```Go
-type User struct {
- Name string
- Email string `badgerhold:"unique"` // this field will be indexed with a unique constraint
-}
-```
-
-The example above will only allow one record of type `User` to exist with a given `Email` field. Any insert, update or upsert that would violate that constraint will fail and return the `badgerhold.ErrUniqueExists` error.
-
-### Aggregate Queries
-
-Aggregate queries are queries that group results by a field. For example, lets say you had a collection of employees:
-
-```Go
-type Employee struct {
- FirstName string
- LastName string
- Division string
- Hired time.Time
-}
-```
-
-And you wanted to find the most senior (first hired) employee in each division:
-
-```Go
-result, err := store.FindAggregate(&Employee{}, nil, "Division") //nil query matches against all records
-```
-
-This will return a slice of `Aggregate Result` from which you can extract your groups and find Min, Max, Avg, Count, etc.
-
-```Go
-for i := range result {
- var division string
- employee := &Employee{}
-
- result[i].Group(&division)
- result[i].Min("Hired", employee)
-
- fmt.Printf("The most senior employee in the %s division is %s.\n",
- division, employee.FirstName + " " + employee.LastName)
-}
-```
-
-Aggregate queries become especially powerful when combined with the sub-querying capability of `MatchFunc`.
-
-Many more examples of queries can be found in the [find_test.go](https://github.com/timshannon/badgerhold/blob/master/find_test.go) file in this repository.
-
-## Comparing
-
-Just like with Go, types must be the same in order to be compared with each other. You cannot compare an int to a int32. The built-in Go comparable types (ints, floats, strings, etc) will work as expected. Other types from the standard library can also be compared such as `time.Time`, `big.Rat`, `big.Int`, and `big.Float`. If there are other standard library types that I missed, let me know.
-
-You can compare any custom type either by using the `MatchFunc` criteria, or by satisfying the `Comparer` interface with your type by adding the Compare method: `Compare(other interface{}) (int, error)`.
-
-If a type doesn't have a predefined comparer, and doesn't satisfy the Comparer interface, then the types value is converted to a string and compared lexicographically.
-
-## Behavior Changes
-
-Since BadgerHold is a higher level interface than Badger DB, there are some added helpers. Instead of _Put_, you have the options of:
-
-- _Insert_ - Fails if key already exists.
-- _Update_ - Fails if key doesn't exist `ErrNotFound`.
-- _Upsert_ - If key doesn't exist, it inserts the data, otherwise it updates the existing record.
-
-When getting data instead of returning `nil` if a value doesn't exist, BadgerHold returns `badgerhold.ErrNotFound`, and similarly when deleting data, instead of silently continuing if a value isn't found to delete, BadgerHold returns `badgerhold.ErrNotFound`. The exception to this is when using query based functions such as `Find` (returns an empty slice), `DeleteMatching` and `UpdateMatching` where no error is returned.
-
-## When should I use BadgerHold?
-
-BadgerHold will be useful in the same scenarios where BadgerDB is useful, with the added benefit of being able to retire some of your data filtering code and possibly improved performance.
-
-You can also use it instead of SQLite for many scenarios. BadgerHold's main benefit over SQLite is its simplicity when working with Go Types. There is no need for an ORM layer to translate records to types, simply put types in, and get types out. You also don't have to deal with database initialization. Usually with SQLite you'll need several scripts to create the database, create the tables you expect, and create any indexes. With BadgerHold you simply open a new file and put any type of data you want in it.
-
-```Go
-options := badgerhold.DefaultOptions
-options.Dir = "data"
-options.ValueDir = "data"
-
-store, err := badgerhold.Open(options)
-defer store.Close()
-if err != nil {
- // handle error
- log.Fatal(err)
-}
-
-
-err = store.Insert("key", &Item{
- Name: "Test Name",
- Created: time.Now(),
-})
-```
-
-That's it!
-
-Badgerhold currently has over 80% coverage in unit tests, and it's backed by BadgerDB which is a very solid and well built piece of software, so I encourage you to give it a try.
-
-If you end up using BadgerHold, I'd love to hear about it.
diff --git a/vendor/github.com/timshannon/badgerhold/aggregate.go b/vendor/github.com/timshannon/badgerhold/aggregate.go
deleted file mode 100644
index dae88d1f..00000000
--- a/vendor/github.com/timshannon/badgerhold/aggregate.go
+++ /dev/null
@@ -1,199 +0,0 @@
-// Copyright 2019 Tim Shannon. All rights reserved.
-// Use of this source code is governed by the MIT license
-// that can be found in the LICENSE file.
-
-package badgerhold
-
-import (
- "fmt"
- "reflect"
- "sort"
-
- "github.com/dgraph-io/badger"
-)
-
-// AggregateResult allows you to access the results of an aggregate query
-type AggregateResult struct {
- reduction []reflect.Value // always pointers
- group []reflect.Value
- sortby string
-}
-
-// Group returns the field grouped by in the query
-func (a *AggregateResult) Group(result ...interface{}) {
- for i := range result {
- resultVal := reflect.ValueOf(result[i])
- if resultVal.Kind() != reflect.Ptr {
- panic("result argument must be an address")
- }
-
- if i >= len(a.group) {
- panic(fmt.Sprintf("There is not %d elements in the grouping", i))
- }
-
- resultVal.Elem().Set(a.group[i])
- }
-}
-
-// Reduction is the collection of records that are part of the AggregateResult Group
-func (a *AggregateResult) Reduction(result interface{}) {
- resultVal := reflect.ValueOf(result)
-
- if resultVal.Kind() != reflect.Ptr || resultVal.Elem().Kind() != reflect.Slice {
- panic("result argument must be a slice address")
- }
-
- sliceVal := resultVal.Elem()
-
- elType := sliceVal.Type().Elem()
-
- for i := range a.reduction {
- if elType.Kind() == reflect.Ptr {
- sliceVal = reflect.Append(sliceVal, a.reduction[i])
- } else {
- sliceVal = reflect.Append(sliceVal, a.reduction[i].Elem())
- }
- }
-
- resultVal.Elem().Set(sliceVal.Slice(0, sliceVal.Len()))
-}
-
-type aggregateResultSort AggregateResult
-
-func (a *aggregateResultSort) Len() int { return len(a.reduction) }
-func (a *aggregateResultSort) Swap(i, j int) {
- a.reduction[i], a.reduction[j] = a.reduction[j], a.reduction[i]
-}
-func (a *aggregateResultSort) Less(i, j int) bool {
- //reduction values are always pointers
- iVal := a.reduction[i].Elem().FieldByName(a.sortby)
- if !iVal.IsValid() {
- panic(fmt.Sprintf("The field %s does not exist in the type %s", a.sortby, a.reduction[i].Type()))
- }
-
- jVal := a.reduction[j].Elem().FieldByName(a.sortby)
- if !jVal.IsValid() {
- panic(fmt.Sprintf("The field %s does not exist in the type %s", a.sortby, a.reduction[j].Type()))
- }
-
- c, err := compare(iVal.Interface(), jVal.Interface())
- if err != nil {
- panic(err)
- }
-
- return c == -1
-}
-
-// Sort sorts the aggregate reduction by the passed in field in ascending order
-// Sort is called automatically by calls to Min / Max to get the min and max values
-func (a *AggregateResult) Sort(field string) {
- if !startsUpper(field) {
- panic("The first letter of a field must be upper-case")
- }
- if a.sortby == field {
- // already sorted
- return
- }
-
- a.sortby = field
- sort.Sort((*aggregateResultSort)(a))
-}
-
-// Max Returns the maxiumum value of the Aggregate Grouping, uses the Comparer interface
-func (a *AggregateResult) Max(field string, result interface{}) {
- a.Sort(field)
-
- resultVal := reflect.ValueOf(result)
- if resultVal.Kind() != reflect.Ptr {
- panic("result argument must be an address")
- }
-
- if resultVal.IsNil() {
- panic("result argument must not be nil")
- }
-
- resultVal.Elem().Set(a.reduction[len(a.reduction)-1].Elem())
-}
-
-// Min returns the minimum value of the Aggregate Grouping, uses the Comparer interface
-func (a *AggregateResult) Min(field string, result interface{}) {
- a.Sort(field)
-
- resultVal := reflect.ValueOf(result)
- if resultVal.Kind() != reflect.Ptr {
- panic("result argument must be an address")
- }
-
- if resultVal.IsNil() {
- panic("result argument must not be nil")
- }
-
- resultVal.Elem().Set(a.reduction[0].Elem())
-}
-
-// Avg returns the average float value of the aggregate grouping
-// panics if the field cannot be converted to an float64
-func (a *AggregateResult) Avg(field string) float64 {
- sum := a.Sum(field)
- return sum / float64(len(a.reduction))
-}
-
-// Sum returns the sum value of the aggregate grouping
-// panics if the field cannot be converted to an float64
-func (a *AggregateResult) Sum(field string) float64 {
- var sum float64
-
- for i := range a.reduction {
- fVal := a.reduction[i].Elem().FieldByName(field)
- if !fVal.IsValid() {
- panic(fmt.Sprintf("The field %s does not exist in the type %s", field, a.reduction[i].Type()))
- }
-
- sum += tryFloat(fVal)
- }
-
- return sum
-}
-
-// Count returns the number of records in the aggregate grouping
-func (a *AggregateResult) Count() int {
- return len(a.reduction)
-}
-
-// FindAggregate returns an aggregate grouping for the passed in query
-// groupBy is optional
-func (s *Store) FindAggregate(dataType interface{}, query *Query, groupBy ...string) ([]*AggregateResult, error) {
- var result []*AggregateResult
- var err error
- err = s.Badger().View(func(tx *badger.Txn) error {
- result, err = s.TxFindAggregate(tx, dataType, query, groupBy...)
- return err
- })
-
- if err != nil {
- return nil, err
- }
-
- return result, nil
-}
-
-// TxFindAggregate is the same as FindAggregate, but you specify your own transaction
-// groupBy is optional
-func (s *Store) TxFindAggregate(tx *badger.Txn, dataType interface{}, query *Query,
- groupBy ...string) ([]*AggregateResult, error) {
- return aggregateQuery(tx, dataType, query, groupBy...)
-}
-
-func tryFloat(val reflect.Value) float64 {
- switch val.Kind() {
- case reflect.Int, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int8:
- return float64(val.Int())
- case reflect.Uint, reflect.Uint16,
- reflect.Uint32, reflect.Uint64, reflect.Uint8:
- return float64(val.Uint())
- case reflect.Float32, reflect.Float64:
- return val.Float()
- default:
- panic(fmt.Sprintf("The field is of Kind %s and cannot be converted to a float64", val.Kind()))
- }
-}
diff --git a/vendor/github.com/timshannon/badgerhold/compare.go b/vendor/github.com/timshannon/badgerhold/compare.go
deleted file mode 100644
index d12fc2e9..00000000
--- a/vendor/github.com/timshannon/badgerhold/compare.go
+++ /dev/null
@@ -1,311 +0,0 @@
-// Copyright 2019 Tim Shannon. All rights reserved.
-// Use of this source code is governed by the MIT license
-// that can be found in the LICENSE file.
-
-package badgerhold
-
-import (
- "fmt"
- "math/big"
- "reflect"
- "time"
-)
-
-// ErrTypeMismatch is the error thrown when two types cannot be compared
-type ErrTypeMismatch struct {
- Value interface{}
- Other interface{}
-}
-
-func (e *ErrTypeMismatch) Error() string {
- return fmt.Sprintf("%v (%T) cannot be compared with %v (%T)", e.Value, e.Value, e.Other, e.Other)
-}
-
-//Comparer compares a type against the encoded value in the store. The result should be 0 if current==other,
-// -1 if current < other, and +1 if current > other.
-// If a field in a struct doesn't specify a comparer, then the default comparison is used (convert to string and compare)
-// this interface is already handled for standard Go Types as well as more complex ones such as those in time and big
-// an error is returned if the type cannot be compared
-// The concrete type will always be passedin, not a pointer
-type Comparer interface {
- Compare(other interface{}) (int, error)
-}
-
-func (c *Criterion) compare(rowValue, criterionValue interface{}, currentRow interface{}) (int, error) {
- if rowValue == nil || criterionValue == nil {
- if rowValue == criterionValue {
- return 0, nil
- }
- return 0, &ErrTypeMismatch{rowValue, criterionValue}
- }
-
- if _, ok := criterionValue.(Field); ok {
- fVal := reflect.ValueOf(currentRow).Elem().FieldByName(string(criterionValue.(Field)))
- if !fVal.IsValid() {
- return 0, fmt.Errorf("The field %s does not exist in the type %s", criterionValue,
- reflect.TypeOf(currentRow))
- }
-
- criterionValue = fVal.Interface()
- }
-
- value := rowValue
-
- for reflect.TypeOf(value).Kind() == reflect.Ptr {
- value = reflect.ValueOf(value).Elem().Interface()
- }
-
- other := criterionValue
- for reflect.TypeOf(other).Kind() == reflect.Ptr {
- other = reflect.ValueOf(other).Elem().Interface()
- }
-
- return compare(value, other)
-}
-
-func compare(value, other interface{}) (int, error) {
- switch t := value.(type) {
- case time.Time:
- tother, ok := other.(time.Time)
- if !ok {
- return 0, &ErrTypeMismatch{t, other}
- }
-
- if value.(time.Time).Equal(tother) {
- return 0, nil
- }
-
- if value.(time.Time).Before(tother) {
- return -1, nil
- }
- return 1, nil
- case big.Float:
- o, ok := other.(big.Float)
- if !ok {
- return 0, &ErrTypeMismatch{t, other}
- }
-
- v := value.(big.Float)
-
- return v.Cmp(&o), nil
- case big.Int:
- o, ok := other.(big.Int)
- if !ok {
- return 0, &ErrTypeMismatch{t, other}
- }
-
- v := value.(big.Int)
-
- return v.Cmp(&o), nil
- case big.Rat:
- o, ok := other.(big.Rat)
- if !ok {
- return 0, &ErrTypeMismatch{t, other}
- }
-
- v := value.(big.Rat)
-
- return v.Cmp(&o), nil
- case int:
- tother, ok := other.(int)
- if !ok {
- return 0, &ErrTypeMismatch{t, other}
- }
-
- if value.(int) == tother {
- return 0, nil
- }
-
- if value.(int) < tother {
- return -1, nil
- }
- return 1, nil
- case int8:
- tother, ok := other.(int8)
- if !ok {
- return 0, &ErrTypeMismatch{t, other}
- }
-
- if value.(int8) == tother {
- return 0, nil
- }
-
- if value.(int8) < tother {
- return -1, nil
- }
- return 1, nil
-
- case int16:
- tother, ok := other.(int16)
- if !ok {
- return 0, &ErrTypeMismatch{t, other}
- }
-
- if value.(int16) == tother {
- return 0, nil
- }
-
- if value.(int16) < tother {
- return -1, nil
- }
- return 1, nil
- case int32:
- tother, ok := other.(int32)
- if !ok {
- return 0, &ErrTypeMismatch{t, other}
- }
-
- if value.(int32) == tother {
- return 0, nil
- }
-
- if value.(int32) < tother {
- return -1, nil
- }
- return 1, nil
-
- case int64:
- tother, ok := other.(int64)
- if !ok {
- return 0, &ErrTypeMismatch{t, other}
- }
-
- if value.(int64) == tother {
- return 0, nil
- }
-
- if value.(int64) < tother {
- return -1, nil
- }
- return 1, nil
- case uint:
- tother, ok := other.(uint)
- if !ok {
- return 0, &ErrTypeMismatch{t, other}
- }
-
- if value.(uint) == tother {
- return 0, nil
- }
-
- if value.(uint) < tother {
- return -1, nil
- }
- return 1, nil
- case uint8:
- tother, ok := other.(uint8)
- if !ok {
- return 0, &ErrTypeMismatch{t, other}
- }
-
- if value.(uint8) == tother {
- return 0, nil
- }
-
- if value.(uint8) < tother {
- return -1, nil
- }
- return 1, nil
-
- case uint16:
- tother, ok := other.(uint16)
- if !ok {
- return 0, &ErrTypeMismatch{t, other}
- }
-
- if value.(uint16) == tother {
- return 0, nil
- }
-
- if value.(uint16) < tother {
- return -1, nil
- }
- return 1, nil
- case uint32:
- tother, ok := other.(uint32)
- if !ok {
- return 0, &ErrTypeMismatch{t, other}
- }
-
- if value.(uint32) == tother {
- return 0, nil
- }
-
- if value.(uint32) < tother {
- return -1, nil
- }
- return 1, nil
-
- case uint64:
- tother, ok := other.(uint64)
- if !ok {
- return 0, &ErrTypeMismatch{t, other}
- }
-
- if value.(uint64) == tother {
- return 0, nil
- }
-
- if value.(uint64) < tother {
- return -1, nil
- }
- return 1, nil
- case float32:
- tother, ok := other.(float32)
- if !ok {
- return 0, &ErrTypeMismatch{t, other}
- }
-
- if value.(float32) == tother {
- return 0, nil
- }
-
- if value.(float32) < tother {
- return -1, nil
- }
- return 1, nil
- case float64:
- tother, ok := other.(float64)
- if !ok {
- return 0, &ErrTypeMismatch{t, other}
- }
-
- if value.(float64) == tother {
- return 0, nil
- }
-
- if value.(float64) < tother {
- return -1, nil
- }
- return 1, nil
- case string:
- tother, ok := other.(string)
- if !ok {
- return 0, &ErrTypeMismatch{t, other}
- }
-
- if value.(string) == tother {
- return 0, nil
- }
-
- if value.(string) < tother {
- return -1, nil
- }
- return 1, nil
- case Comparer:
- return value.(Comparer).Compare(other)
- default:
- valS := fmt.Sprintf("%s", value)
- otherS := fmt.Sprintf("%s", other)
- if valS == otherS {
- return 0, nil
- }
-
- if valS < otherS {
- return -1, nil
- }
-
- return 1, nil
- }
-
-}
diff --git a/vendor/github.com/timshannon/badgerhold/delete.go b/vendor/github.com/timshannon/badgerhold/delete.go
deleted file mode 100644
index 0c558841..00000000
--- a/vendor/github.com/timshannon/badgerhold/delete.go
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2019 Tim Shannon. All rights reserved.
-// Use of this source code is governed by the MIT license
-// that can be found in the LICENSE file.
-
-package badgerhold
-
-import (
- "reflect"
-
- "github.com/dgraph-io/badger"
-)
-
-// Delete deletes a record from the bolthold, datatype just needs to be an example of the type stored so that
-// the proper bucket and indexes are updated
-func (s *Store) Delete(key, dataType interface{}) error {
- return s.Badger().Update(func(tx *badger.Txn) error {
- return s.TxDelete(tx, key, dataType)
- })
-}
-
-// TxDelete is the same as Delete except it allows you specify your own transaction
-func (s *Store) TxDelete(tx *badger.Txn, key, dataType interface{}) error {
- storer := newStorer(dataType)
- gk, err := encodeKey(key, storer.Type())
-
- if err != nil {
- return err
- }
-
- value := reflect.New(reflect.TypeOf(dataType)).Interface()
-
- item, err := tx.Get(gk)
- if err == badger.ErrKeyNotFound {
- return ErrNotFound
- }
- if err != nil {
- return err
- }
-
- item.Value(func(bVal []byte) error {
- return decode(bVal, value)
- })
- if err != nil {
- return err
- }
-
- // delete data
- err = tx.Delete(gk)
-
- if err != nil {
- return err
- }
-
- // remove any indexes
- return indexDelete(storer, tx, gk, value)
-}
-
-// DeleteMatching deletes all of the records that match the passed in query
-func (s *Store) DeleteMatching(dataType interface{}, query *Query) error {
- return s.Badger().Update(func(tx *badger.Txn) error {
- return s.TxDeleteMatching(tx, dataType, query)
- })
-}
-
-// TxDeleteMatching does the same as DeleteMatching, but allows you to specify your own transaction
-func (s *Store) TxDeleteMatching(tx *badger.Txn, dataType interface{}, query *Query) error {
- return deleteQuery(tx, dataType, query)
-}
diff --git a/vendor/github.com/timshannon/badgerhold/doc.go b/vendor/github.com/timshannon/badgerhold/doc.go
deleted file mode 100644
index ae7469f4..00000000
--- a/vendor/github.com/timshannon/badgerhold/doc.go
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
-Package badgerhold is an indexing and querying layer on top of a badger DB. The goal is to allow easy, persistent storage
-and retrieval of Go types. badgerDB is an embedded key-value store, and badgerhold serves a similar use case however with
-a higher level interface for common uses of Badger.
-
-Go Types
-
-BadgerHold deals directly with Go Types. When inserting data, you pass in your structure directly. When querying data you
-pass in a pointer to a slice of the type you want to return. By default Gob encoding is used. You can put multiple
-different types into the same DB file and they (and their indexes) will be stored separately.
-
- err := store.Insert(1234, Item{
- Name: "Test Name",
- Created: time.Now(),
- })
-
- var result []Item
-
- err := store.Find(&result, query)
-
-
-Indexes
-
-BadgerHold will automatically create an index for any struct fields tags with "badgerholdIndex"
-
- type Item struct {
- ID int
- Name string
- Category string `badgerholdIndex:"Category"`
- Created time.Time
- }
-
-The first field specified in query will be used as the index (if one exists).
-
-Queries are chained together criteria that applies to a set of fields:
-
- badgerhold.Where("Name").Eq("John Doe").And("DOB").Lt(time.Now())
-
-
-*/
-package badgerhold
diff --git a/vendor/github.com/timshannon/badgerhold/encode.go b/vendor/github.com/timshannon/badgerhold/encode.go
deleted file mode 100644
index 420e2d36..00000000
--- a/vendor/github.com/timshannon/badgerhold/encode.go
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright 2019 Tim Shannon. All rights reserved.
-// Use of this source code is governed by the MIT license
-// that can be found in the LICENSE file.
-
-package badgerhold
-
-import (
- "bytes"
- "encoding/gob"
-)
-
-// EncodeFunc is a function for encoding a value into bytes
-type EncodeFunc func(value interface{}) ([]byte, error)
-
-// DecodeFunc is a function for decoding a value from bytes
-type DecodeFunc func(data []byte, value interface{}) error
-
-var encode EncodeFunc
-var decode DecodeFunc
-
-// DefaultEncode is the default encoding func for badgerhold (Gob)
-func DefaultEncode(value interface{}) ([]byte, error) {
- var buff bytes.Buffer
-
- en := gob.NewEncoder(&buff)
-
- err := en.Encode(value)
- if err != nil {
- return nil, err
- }
-
- return buff.Bytes(), nil
-}
-
-// DefaultDecode is the default decoding func for badgerhold (Gob)
-func DefaultDecode(data []byte, value interface{}) error {
- var buff bytes.Buffer
- de := gob.NewDecoder(&buff)
-
- _, err := buff.Write(data)
- if err != nil {
- return err
- }
-
- return de.Decode(value)
-}
-
-// encodeKey encodes key values with a type prefix which allows multiple different types
-// to exist in the badger DB
-func encodeKey(key interface{}, typeName string) ([]byte, error) {
- encoded, err := encode(key)
- if err != nil {
- return nil, err
- }
-
- return append(typePrefix(typeName), encoded...), nil
-}
-
-// decodeKey decodes the key value and removes the type prefix
-func decodeKey(data []byte, key interface{}, typeName string) error {
- return decode(data[len(typePrefix(typeName)):], key)
-}
diff --git a/vendor/github.com/timshannon/badgerhold/get.go b/vendor/github.com/timshannon/badgerhold/get.go
deleted file mode 100644
index 3da40d28..00000000
--- a/vendor/github.com/timshannon/badgerhold/get.go
+++ /dev/null
@@ -1,86 +0,0 @@
-// Copyright 2019 Tim Shannon. All rights reserved.
-// Use of this source code is governed by the MIT license
-// that can be found in the LICENSE file.
-
-package badgerhold
-
-import (
- "errors"
- "reflect"
- "strings"
-
- "github.com/dgraph-io/badger"
-)
-
-// ErrNotFound is returned when no data is found for the given key
-var ErrNotFound = errors.New("No data found for this key")
-
-// Get retrieves a value from badgerhold and puts it into result. Result must be a pointer
-func (s *Store) Get(key, result interface{}) error {
- return s.Badger().View(func(tx *badger.Txn) error {
- return s.TxGet(tx, key, result)
- })
-}
-
-// TxGet allows you to pass in your own badger transaction to retrieve a value from the badgerhold and puts it
-// into result
-func (s *Store) TxGet(tx *badger.Txn, key, result interface{}) error {
- storer := newStorer(result)
-
- gk, err := encodeKey(key, storer.Type())
-
- if err != nil {
- return err
- }
-
- item, err := tx.Get(gk)
- if err == badger.ErrKeyNotFound {
- return ErrNotFound
- }
-
- err = item.Value(func(value []byte) error {
- return decode(value, result)
- })
-
- if err != nil {
- return err
- }
-
- tp := reflect.TypeOf(result)
- for tp.Kind() == reflect.Ptr {
- tp = tp.Elem()
- }
-
- var keyField string
-
- for i := 0; i < tp.NumField(); i++ {
- if strings.Contains(string(tp.Field(i).Tag), BadgerholdKeyTag) {
- keyField = tp.Field(i).Name
- break
- }
- }
-
- if keyField != "" {
- err := decodeKey(gk, reflect.ValueOf(result).Elem().FieldByName(keyField).Addr().Interface(), storer.Type())
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// Find retrieves a set of values from the badgerhold that matches the passed in query
-// result must be a pointer to a slice.
-// The result of the query will be appended to the passed in result slice, rather than the passed in slice being
-// emptied.
-func (s *Store) Find(result interface{}, query *Query) error {
- return s.Badger().View(func(tx *badger.Txn) error {
- return s.TxFind(tx, result, query)
- })
-}
-
-// TxFind allows you to pass in your own badger transaction to retrieve a set of values from the badgerhold
-func (s *Store) TxFind(tx *badger.Txn, result interface{}, query *Query) error {
- return findQuery(tx, result, query)
-}
diff --git a/vendor/github.com/timshannon/badgerhold/index.go b/vendor/github.com/timshannon/badgerhold/index.go
deleted file mode 100644
index 3a445624..00000000
--- a/vendor/github.com/timshannon/badgerhold/index.go
+++ /dev/null
@@ -1,366 +0,0 @@
-// Copyright 2019 Tim Shannon. All rights reserved.
-// Use of this source code is governed by the MIT license
-// that can be found in the LICENSE file.
-
-package badgerhold
-
-import (
- "bytes"
- "reflect"
- "sort"
-
- "github.com/dgraph-io/badger"
-)
-
-const indexPrefix = "_bhIndex"
-
-// size of iterator keys stored in memory before more are fetched
-const iteratorKeyMinCacheSize = 100
-
-// Index is a function that returns the indexable, encoded bytes of the passed in value
-type Index struct {
- IndexFunc func(name string, value interface{}) ([]byte, error)
- Unique bool
-}
-
-// adds an item to the index
-func indexAdd(storer Storer, tx *badger.Txn, key []byte, data interface{}) error {
- indexes := storer.Indexes()
- for name, index := range indexes {
- err := indexUpdate(storer.Type(), name, index, tx, key, data, false)
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// // removes an item from the index
-// // be sure to pass the data from the old record, not the new one
-func indexDelete(storer Storer, tx *badger.Txn, key []byte, originalData interface{}) error {
- indexes := storer.Indexes()
-
- for name, index := range indexes {
- err := indexUpdate(storer.Type(), name, index, tx, key, originalData, true)
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// // adds or removes a specific index on an item
-func indexUpdate(typeName, indexName string, index Index, tx *badger.Txn, key []byte, value interface{},
- delete bool) error {
-
- indexKey, err := index.IndexFunc(indexName, value)
- if indexKey == nil {
- return nil
- }
-
- indexValue := make(keyList, 0)
-
- if err != nil {
- return err
- }
-
- indexKey = append(indexKeyPrefix(typeName, indexName), indexKey...)
-
- item, err := tx.Get(indexKey)
- if err != nil && err != badger.ErrKeyNotFound {
- return err
- }
-
- if err != badger.ErrKeyNotFound {
- if index.Unique && !delete {
- return ErrUniqueExists
- }
- err = item.Value(func(iVal []byte) error {
- return decode(iVal, &indexValue)
- })
- if err != nil {
- return err
- }
- }
-
- if delete {
- indexValue.remove(key)
- } else {
- indexValue.add(key)
- }
-
- if len(indexValue) == 0 {
- return tx.Delete(indexKey)
- }
-
- iVal, err := encode(indexValue)
- if err != nil {
- return err
- }
-
- return tx.Set(indexKey, iVal)
-}
-
-// indexKeyPrefix returns the prefix of the badger key where this index is stored
-func indexKeyPrefix(typeName, indexName string) []byte {
- return []byte(indexPrefix + ":" + typeName + ":" + indexName)
-}
-
-// keyList is a slice of unique, sorted keys([]byte) such as what an index points to
-type keyList [][]byte
-
-func (v *keyList) add(key []byte) {
- i := sort.Search(len(*v), func(i int) bool {
- return bytes.Compare((*v)[i], key) >= 0
- })
-
- if i < len(*v) && bytes.Equal((*v)[i], key) {
- // already added
- return
- }
-
- *v = append(*v, nil)
- copy((*v)[i+1:], (*v)[i:])
- (*v)[i] = key
-}
-
-func (v *keyList) remove(key []byte) {
- i := sort.Search(len(*v), func(i int) bool {
- return bytes.Compare((*v)[i], key) >= 0
- })
-
- if i < len(*v) {
- copy((*v)[i:], (*v)[i+1:])
- (*v)[len(*v)-1] = nil
- *v = (*v)[:len(*v)-1]
- }
-}
-
-func (v *keyList) in(key []byte) bool {
- i := sort.Search(len(*v), func(i int) bool {
- return bytes.Compare((*v)[i], key) >= 0
- })
-
- return (i < len(*v) && bytes.Equal((*v)[i], key))
-}
-
-func indexExists(it *badger.Iterator, typeName, indexName string) bool {
- iPrefix := indexKeyPrefix(typeName, indexName)
- tPrefix := typePrefix(typeName)
- // test if any data exists for type
- it.Seek(tPrefix)
- if !it.ValidForPrefix(tPrefix) {
- // store is empty for this data type so the index could possibly exist
- // we don't want to fail on a "bad index" because they could simply be running a query against
- // an empty dataset
- return true
- }
-
- // test if an index exists
- it.Seek(iPrefix)
- if it.ValidForPrefix(iPrefix) {
- return true
- }
-
- return false
-}
-
-type iterator struct {
- keyCache [][]byte
- nextKeys func(*badger.Iterator) ([][]byte, error)
- iter *badger.Iterator
- bookmark *iterBookmark
- lastSeek []byte
- tx *badger.Txn
- err error
-}
-
-// iterBookmark stores a seek location in a specific iterator
-// so that a single RW iterator can be shared within a single transaction
-type iterBookmark struct {
- iter *badger.Iterator
- seekKey []byte
-}
-
-func newIterator(tx *badger.Txn, typeName string, query *Query, bookmark *iterBookmark) *iterator {
- i := &iterator{
- tx: tx,
- }
-
- if bookmark != nil {
- i.iter = bookmark.iter
- } else {
- i.iter = tx.NewIterator(badger.DefaultIteratorOptions)
- }
-
- var prefix []byte
-
- if query.index != "" {
- query.badIndex = !indexExists(i.iter, typeName, query.index)
- }
-
- criteria := query.fieldCriteria[query.index]
- if hasMatchFunc(criteria) {
- // can't use indexes on matchFuncs as the entire record isn't available for testing in the passed
- // in function
- criteria = nil
- }
-
- // Key field or index not specified - test key against criteria (if it exists) or return everything
- if query.index == "" || len(criteria) == 0 {
- prefix = typePrefix(typeName)
- i.iter.Seek(prefix)
- i.nextKeys = func(iter *badger.Iterator) ([][]byte, error) {
- var nKeys [][]byte
-
- for len(nKeys) < iteratorKeyMinCacheSize {
- if !iter.ValidForPrefix(prefix) {
- return nKeys, nil
- }
-
- item := iter.Item()
- key := item.KeyCopy(nil)
- var ok bool
- if len(criteria) == 0 {
- // nothing to check return key for value testing
- ok = true
- } else {
-
- val := reflect.New(query.dataType)
-
- err := item.Value(func(v []byte) error {
- return decode(v, val.Interface())
- })
- if err != nil {
- return nil, err
- }
-
- ok, err = matchesAllCriteria(criteria, key, true, typeName, val.Interface())
- if err != nil {
- return nil, err
- }
- }
-
- if ok {
- nKeys = append(nKeys, key)
-
- }
- i.lastSeek = key
- iter.Next()
- }
- return nKeys, nil
- }
-
- return i
- }
-
- // indexed field, get keys from index
- prefix = indexKeyPrefix(typeName, query.index)
- i.iter.Seek(prefix)
- i.nextKeys = func(iter *badger.Iterator) ([][]byte, error) {
- var nKeys [][]byte
-
- for len(nKeys) < iteratorKeyMinCacheSize {
- if !iter.ValidForPrefix(prefix) {
- return nKeys, nil
- }
-
- item := iter.Item()
- key := item.KeyCopy(nil)
- // no currentRow on indexes as it refers to multiple rows
- // remove index prefix for matching
- ok, err := matchesAllCriteria(criteria, key[len(prefix):], true, "", nil)
- if err != nil {
- return nil, err
- }
-
- if ok {
- item.Value(func(v []byte) error {
- // append the slice of keys stored in the index
- var keys = make(keyList, 0)
- err := decode(v, &keys)
- if err != nil {
- return err
- }
-
- nKeys = append(nKeys, [][]byte(keys)...)
- return nil
- })
- }
-
- i.lastSeek = key
- iter.Next()
-
- }
- return nKeys, nil
-
- }
-
- return i
-}
-
-func (i *iterator) createBookmark() *iterBookmark {
- return &iterBookmark{
- iter: i.iter,
- seekKey: i.lastSeek,
- }
-}
-
-// Next returns the next key value that matches the iterators criteria
-// If no more kv's are available the return nil, if there is an error, they return nil
-// and iterator.Error() will return the error
-func (i *iterator) Next() (key []byte, value []byte) {
- if i.err != nil {
- return nil, nil
- }
-
- if len(i.keyCache) == 0 {
- newKeys, err := i.nextKeys(i.iter)
- if err != nil {
- i.err = err
- return nil, nil
- }
-
- if len(newKeys) == 0 {
- return nil, nil
- }
-
- i.keyCache = append(i.keyCache, newKeys...)
- }
-
- key = i.keyCache[0]
- i.keyCache = i.keyCache[1:]
-
- item, err := i.tx.Get(key)
- if err != nil {
- i.err = err
- return nil, nil
- }
-
- err = item.Value(func(val []byte) error {
- value = val
- return nil
- })
- if err != nil {
- i.err = err
- return nil, nil
- }
-
- return
-}
-
-// Error returns the last error, iterator.Next() will not continue if there is an error present
-func (i *iterator) Error() error {
- return i.err
-}
-
-func (i *iterator) Close() {
- if i.bookmark != nil {
- i.iter.Seek(i.bookmark.seekKey)
- return
- }
-
- i.iter.Close()
-}
diff --git a/vendor/github.com/timshannon/badgerhold/put.go b/vendor/github.com/timshannon/badgerhold/put.go
deleted file mode 100644
index 42631c31..00000000
--- a/vendor/github.com/timshannon/badgerhold/put.go
+++ /dev/null
@@ -1,239 +0,0 @@
-// Copyright 2019 Tim Shannon. All rights reserved.
-// Use of this source code is governed by the MIT license
-// that can be found in the LICENSE file.
-
-package badgerhold
-
-import (
- "errors"
- "reflect"
-
- "github.com/dgraph-io/badger"
-)
-
-// ErrKeyExists is the error returned when data is being Inserted for a Key that already exists
-var ErrKeyExists = errors.New("This Key already exists in badgerhold for this type")
-
-// ErrUniqueExists is the error thrown when data is being inserted for a unique constraint value that already exists
-var ErrUniqueExists = errors.New("This value cannot be written due to the unique constraint on the field")
-
-// sequence tells badgerhold to insert the key as the next sequence in the bucket
-type sequence struct{}
-
-// NextSequence is used to create a sequential key for inserts
-// Inserts a uint64 as the key
-// store.Insert(badgerhold.NextSequence(), data)
-func NextSequence() interface{} {
- return sequence{}
-}
-
-// Insert inserts the passed in data into the the badgerhold
-//
-// If the the key already exists in the badgerhold, then an ErrKeyExists is returned
-// If the data struct has a field tagged as `badgerholdKey` and it is the same type
-// as the Insert key, AND the data struct is passed by reference, AND the key field
-// is currently set to the zero-value for that type, then that field will be set to
-// the value of the insert key.
-//
-// To use this with badgerhold.NextSequence() use a type of `uint64` for the key field.
-func (s *Store) Insert(key, data interface{}) error {
- return s.Badger().Update(func(tx *badger.Txn) error {
- return s.TxInsert(tx, key, data)
- })
-}
-
-// TxInsert is the same as Insert except it allows you specify your own transaction
-func (s *Store) TxInsert(tx *badger.Txn, key, data interface{}) error {
- storer := newStorer(data)
- var err error
-
- if _, ok := key.(sequence); ok {
- key, err = s.getSequence(storer.Type())
- if err != nil {
- return err
- }
- }
-
- gk, err := encodeKey(key, storer.Type())
-
- if err != nil {
- return err
- }
-
- _, err = tx.Get(gk)
- if err != badger.ErrKeyNotFound {
- return ErrKeyExists
- }
-
- value, err := encode(data)
- if err != nil {
- return err
- }
-
- // insert data
- err = tx.Set(gk, value)
-
- if err != nil {
- return err
- }
-
- // insert any indexes
- err = indexAdd(storer, tx, gk, data)
- if err != nil {
- return err
- }
-
- dataVal := reflect.Indirect(reflect.ValueOf(data))
- if !dataVal.CanSet() {
- return nil
- }
- dataType := dataVal.Type()
-
- for i := 0; i < dataType.NumField(); i++ {
- tf := dataType.Field(i)
- if _, ok := tf.Tag.Lookup(BadgerholdKeyTag); ok ||
- tf.Tag.Get(badgerholdPrefixTag) == badgerholdPrefixKeyValue {
- fieldValue := dataVal.Field(i)
- keyValue := reflect.ValueOf(key)
- if keyValue.Type() != tf.Type {
- break
- }
- if !fieldValue.CanSet() {
- break
- }
- if !reflect.DeepEqual(fieldValue.Interface(), reflect.Zero(tf.Type).Interface()) {
- break
- }
- fieldValue.Set(keyValue)
- break
- }
- }
-
- return nil
-}
-
-// Update updates an existing record in the badgerhold
-// if the Key doesn't already exist in the store, then it fails with ErrNotFound
-func (s *Store) Update(key interface{}, data interface{}) error {
- return s.Badger().Update(func(tx *badger.Txn) error {
- return s.TxUpdate(tx, key, data)
- })
-}
-
-// TxUpdate is the same as Update except it allows you to specify your own transaction
-func (s *Store) TxUpdate(tx *badger.Txn, key interface{}, data interface{}) error {
- storer := newStorer(data)
-
- gk, err := encodeKey(key, storer.Type())
-
- if err != nil {
- return err
- }
-
- existingItem, err := tx.Get(gk)
- if err == badger.ErrKeyNotFound {
- return ErrNotFound
- }
- if err != nil {
- return err
- }
-
- // delete any existing indexes
- existingVal := reflect.New(reflect.TypeOf(data)).Interface()
-
- err = existingItem.Value(func(existing []byte) error {
- return decode(existing, existingVal)
- })
- if err != nil {
- return err
- }
- err = indexDelete(storer, tx, gk, existingVal)
- if err != nil {
- return err
- }
-
- value, err := encode(data)
- if err != nil {
- return err
- }
-
- // put data
- err = tx.Set(gk, value)
- if err != nil {
- return err
- }
-
- // insert any new indexes
- return indexAdd(storer, tx, gk, data)
-}
-
-// Upsert inserts the record into the badgerhold if it doesn't exist. If it does already exist, then it updates
-// the existing record
-func (s *Store) Upsert(key interface{}, data interface{}) error {
- return s.Badger().Update(func(tx *badger.Txn) error {
- return s.TxUpsert(tx, key, data)
- })
-}
-
-// TxUpsert is the same as Upsert except it allows you to specify your own transaction
-func (s *Store) TxUpsert(tx *badger.Txn, key interface{}, data interface{}) error {
- storer := newStorer(data)
-
- gk, err := encodeKey(key, storer.Type())
-
- if err != nil {
- return err
- }
-
- existingItem, err := tx.Get(gk)
-
- if err == nil {
- // existing entry found
- // delete any existing indexes
- existingVal := reflect.New(reflect.TypeOf(data)).Interface()
-
- err = existingItem.Value(func(existing []byte) error {
- return decode(existing, existingVal)
- })
- if err != nil {
- return err
- }
-
- err = indexDelete(storer, tx, gk, existingVal)
- if err != nil {
- return err
- }
- } else if err != badger.ErrKeyNotFound {
- return err
- }
-
- // existing entry not found
-
- value, err := encode(data)
- if err != nil {
- return err
- }
-
- // put data
- err = tx.Set(gk, value)
- if err != nil {
- return err
- }
-
- // insert any new indexes
- return indexAdd(storer, tx, gk, data)
-}
-
-// UpdateMatching runs the update function for every record that match the passed in query
-// Note that the type of record in the update func always has to be a pointer
-func (s *Store) UpdateMatching(dataType interface{}, query *Query, update func(record interface{}) error) error {
- return s.Badger().Update(func(tx *badger.Txn) error {
- return s.TxUpdateMatching(tx, dataType, query, update)
- })
-}
-
-// TxUpdateMatching does the same as UpdateMatching, but allows you to specify your own transaction
-func (s *Store) TxUpdateMatching(tx *badger.Txn, dataType interface{}, query *Query,
- update func(record interface{}) error) error {
- return updateQuery(tx, dataType, query, update)
-}
diff --git a/vendor/github.com/timshannon/badgerhold/query.go b/vendor/github.com/timshannon/badgerhold/query.go
deleted file mode 100644
index 13d02acf..00000000
--- a/vendor/github.com/timshannon/badgerhold/query.go
+++ /dev/null
@@ -1,1022 +0,0 @@
-// Copyright 2019 Tim Shannon. All rights reserved.
-// Use of this source code is governed by the MIT license
-// that can be found in the LICENSE file.
-
-package badgerhold
-
-import (
- "fmt"
- "reflect"
- "regexp"
- "sort"
- "strings"
- "unicode"
-
- "github.com/dgraph-io/badger"
-)
-
-const (
- eq = iota // ==
- ne // !=
- gt // >
- lt // <
- ge // >=
- le // <=
- in // in
- re // regular expression
- fn // func
- isnil // test's for nil
- sw // string starts with
- ew // string ends with
-)
-
-// Key is shorthand for specifying a query to run again the Key in a badgerhold, simply returns ""
-// Where(badgerhold.Key).Eq("testkey")
-const Key = ""
-
-// Query is a chained collection of criteria of which an object in the badgerhold needs to match to be returned
-// an empty query matches against all records
-type Query struct {
- index string
- currentField string
- fieldCriteria map[string][]*Criterion
- ors []*Query
-
- badIndex bool
- dataType reflect.Type
- tx *badger.Txn
- writable bool
- subquery bool
- bookmark *iterBookmark
-
- limit int
- skip int
- sort []string
- reverse bool
-}
-
-// IsEmpty returns true if the query is an empty query
-// an empty query matches against everything
-func (q *Query) IsEmpty() bool {
- if q.index != "" {
- return false
- }
- if len(q.fieldCriteria) != 0 {
- return false
- }
-
- if q.ors != nil {
- return false
- }
-
- return true
-}
-
-// Criterion is an operator and a value that a given field needs to match on
-type Criterion struct {
- query *Query
- operator int
- value interface{}
- inValues []interface{}
-}
-
-func hasMatchFunc(criteria []*Criterion) bool {
- for _, c := range criteria {
- if c.operator == fn {
- return true
- }
- }
- return false
-}
-
-// Field allows for referencing a field in structure being compared
-type Field string
-
-// Where starts a query for specifying the criteria that an object in the badgerhold needs to match to
-// be returned in a Find result
-/*
-Query API Example
-
- s.Find(badgerhold.Where("FieldName").Eq(value).And("AnotherField").Lt(AnotherValue).
- Or(badgerhold.Where("FieldName").Eq(anotherValue)
-
-Since Gobs only encode exported fields, this will panic if you pass in a field with a lower case first letter
-*/
-func Where(field string) *Criterion {
- if !startsUpper(field) {
- panic("The first letter of a field in a badgerhold query must be upper-case")
- }
-
- return &Criterion{
- query: &Query{
- currentField: field,
- fieldCriteria: make(map[string][]*Criterion),
- },
- }
-}
-
-// And creates a nother set of criterion the needs to apply to a query
-func (q *Query) And(field string) *Criterion {
- if !startsUpper(field) {
- panic("The first letter of a field in a badgerhold query must be upper-case")
- }
-
- q.currentField = field
- return &Criterion{
- query: q,
- }
-}
-
-// Skip skips the number of records that match all the rest of the query criteria, and does not return them
-// in the result set. Setting skip multiple times, or to a negative value will panic
-func (q *Query) Skip(amount int) *Query {
- if amount < 0 {
- panic("Skip must be set to a positive number")
- }
-
- if q.skip != 0 {
- panic(fmt.Sprintf("Skip has already been set to %d", q.skip))
- }
-
- q.skip = amount
-
- return q
-}
-
-// Limit sets the maximum number of records that can be returned by a query
-// Setting Limit multiple times, or to a negative value will panic
-func (q *Query) Limit(amount int) *Query {
- if amount < 0 {
- panic("Limit must be set to a positive number")
- }
-
- if q.limit != 0 {
- panic(fmt.Sprintf("Limit has already been set to %d", q.limit))
- }
-
- q.limit = amount
-
- return q
-}
-
-// SortBy sorts the results by the given fields name
-// Multiple fields can be used
-func (q *Query) SortBy(fields ...string) *Query {
- for i := range fields {
- if fields[i] == Key {
- panic("Cannot sort by Key.")
- }
- var found bool
- for k := range q.sort {
- if q.sort[k] == fields[i] {
- found = true
- break
- }
- }
- if !found {
- q.sort = append(q.sort, fields[i])
- }
- }
- return q
-}
-
-// Reverse will reverse the current result set
-// useful with SortBy
-func (q *Query) Reverse() *Query {
- q.reverse = !q.reverse
- return q
-}
-
-// Index specifies the index to use when running this query
-func (q *Query) Index(indexName string) *Query {
- if strings.Contains(indexName, ".") {
- // NOTE: I may reconsider this in the future
- panic("Nested indexes are not supported. Only top level structures can be indexed")
- }
- q.index = indexName
- return q
-}
-
-// Or creates another separate query that gets unioned with any other results in the query
-// Or will panic if the query passed in contains a limit or skip value, as they are only
-// allowed on top level queries
-func (q *Query) Or(query *Query) *Query {
- if query.skip != 0 || query.limit != 0 {
- panic("Or'd queries cannot contain skip or limit values")
- }
- q.ors = append(q.ors, query)
- return q
-}
-
-func (q *Query) matchesAllFields(key []byte, value reflect.Value, currentRow interface{}) (bool, error) {
- if q.IsEmpty() {
- return true, nil
- }
-
- for field, criteria := range q.fieldCriteria {
- if field == q.index && !q.badIndex && !hasMatchFunc(criteria) {
- // already handled by index Iterator
- continue
- }
-
- if field == Key {
- ok, err := matchesAllCriteria(criteria, key, true, q.dataType.Name(), currentRow)
- if err != nil {
- return false, err
- }
- if !ok {
- return false, nil
- }
-
- continue
- }
-
- fVal, err := fieldValue(value, field)
- if err != nil {
- return false, err
- }
-
- ok, err := matchesAllCriteria(criteria, fVal.Interface(), false, "", currentRow)
- if err != nil {
- return false, err
- }
- if !ok {
- return false, nil
- }
- }
-
- return true, nil
-}
-
-func fieldValue(value reflect.Value, field string) (reflect.Value, error) {
- fields := strings.Split(field, ".")
-
- current := value
- for i := range fields {
- if current.Kind() == reflect.Ptr {
- current = current.Elem().FieldByName(fields[i])
- } else {
- current = current.FieldByName(fields[i])
- }
- if !current.IsValid() {
- return reflect.Value{}, fmt.Errorf("The field %s does not exist in the type %s", field, value)
- }
- }
- return current, nil
-}
-
-func (c *Criterion) op(op int, value interface{}) *Query {
- c.operator = op
- c.value = value
-
- q := c.query
- q.fieldCriteria[q.currentField] = append(q.fieldCriteria[q.currentField], c)
-
- return q
-}
-
-// Eq tests if the current field is Equal to the passed in value
-func (c *Criterion) Eq(value interface{}) *Query {
- return c.op(eq, value)
-}
-
-// Ne test if the current field is Not Equal to the passed in value
-func (c *Criterion) Ne(value interface{}) *Query {
- return c.op(ne, value)
-}
-
-// Gt test if the current field is Greater Than the passed in value
-func (c *Criterion) Gt(value interface{}) *Query {
- return c.op(gt, value)
-}
-
-// Lt test if the current field is Less Than the passed in value
-func (c *Criterion) Lt(value interface{}) *Query {
- return c.op(lt, value)
-}
-
-// Ge test if the current field is Greater Than or Equal To the passed in value
-func (c *Criterion) Ge(value interface{}) *Query {
- return c.op(ge, value)
-}
-
-// Le test if the current field is Less Than or Equal To the passed in value
-func (c *Criterion) Le(value interface{}) *Query {
- return c.op(le, value)
-}
-
-// In test if the current field is a member of the slice of values passed in
-func (c *Criterion) In(values ...interface{}) *Query {
- c.operator = in
- c.inValues = values
-
- q := c.query
- q.fieldCriteria[q.currentField] = append(q.fieldCriteria[q.currentField], c)
-
- return q
-}
-
-// RegExp will test if a field matches against the regular expression
-// The Field Value will be converted to string (%s) before testing
-func (c *Criterion) RegExp(expression *regexp.Regexp) *Query {
- return c.op(re, expression)
-}
-
-// IsNil will test if a field is equal to nil
-func (c *Criterion) IsNil() *Query {
- return c.op(isnil, nil)
-}
-
-// HasPrefix will test if a field starts with provided string
-func (c *Criterion) HasPrefix(prefix string) *Query {
- return c.op(sw, prefix)
-}
-
-// HasSuffix will test if a field ends with provided string
-func (c *Criterion) HasSuffix(suffix string) *Query {
- return c.op(ew, suffix)
-}
-
-// MatchFunc is a function used to test an arbitrary matching value in a query
-type MatchFunc func(ra *RecordAccess) (bool, error)
-
-// RecordAccess allows access to the current record, field or allows running a subquery within a
-// MatchFunc
-type RecordAccess struct {
- record interface{}
- field interface{}
- query *Query
-}
-
-// Field is the current field being queried
-func (r *RecordAccess) Field() interface{} {
- return r.field
-}
-
-// Record is the complete record for a given row in badgerhold
-func (r *RecordAccess) Record() interface{} {
- return r.record
-}
-
-// SubQuery allows you to run another query in the same transaction for each
-// record in a parent query
-func (r *RecordAccess) SubQuery(result interface{}, query *Query) error {
- query.subquery = true
- query.bookmark = r.query.bookmark
- return findQuery(r.query.tx, result, query)
-}
-
-// SubAggregateQuery allows you to run another aggregate query in the same transaction for each
-// record in a parent query
-func (r *RecordAccess) SubAggregateQuery(query *Query, groupBy ...string) ([]*AggregateResult, error) {
- query.subquery = true
- query.bookmark = r.query.bookmark
- return aggregateQuery(r.query.tx, r.record, query, groupBy...)
-}
-
-// MatchFunc will test if a field matches the passed in function
-func (c *Criterion) MatchFunc(match MatchFunc) *Query {
- if c.query.currentField == Key {
- panic("Match func cannot be used against Keys, as the Key type is unknown at runtime, and there is " +
- "no value compare against")
- }
-
- return c.op(fn, match)
-}
-
-// test if the criterion passes with the passed in value
-func (c *Criterion) test(testValue interface{}, encoded bool, keyType string, currentRow interface{}) (bool, error) {
- var value interface{}
- if encoded {
- if len(testValue.([]byte)) != 0 {
- if c.operator == in {
- // value is a slice of values, use c.inValues
- value = reflect.New(reflect.TypeOf(c.inValues[0])).Interface()
- err := decode(testValue.([]byte), value)
- if err != nil {
- return false, err
- }
-
- } else {
- // used with keys
- value = reflect.New(reflect.TypeOf(c.value)).Interface()
- if keyType != "" {
- err := decodeKey(testValue.([]byte), value, keyType)
- if err != nil {
- return false, err
- }
- } else {
- err := decode(testValue.([]byte), value)
- if err != nil {
- return false, err
- }
- }
- }
- }
- } else {
- value = testValue
- }
-
- switch c.operator {
- case in:
- for i := range c.inValues {
- result, err := c.compare(value, c.inValues[i], currentRow)
- if err != nil {
- return false, err
- }
- if result == 0 {
- return true, nil
- }
- }
-
- return false, nil
- case re:
- return c.value.(*regexp.Regexp).Match([]byte(fmt.Sprintf("%s", value))), nil
- case fn:
- return c.value.(MatchFunc)(&RecordAccess{
- field: value,
- record: currentRow,
- query: c.query,
- })
- case isnil:
- return reflect.ValueOf(value).IsNil(), nil
- case sw:
- return strings.HasPrefix(fmt.Sprintf("%s", value), fmt.Sprintf("%s", c.value)), nil
- case ew:
- return strings.HasSuffix(fmt.Sprintf("%s", value), fmt.Sprintf("%s", c.value)), nil
- default:
- // comparison operators
- result, err := c.compare(value, c.value, currentRow)
- if err != nil {
- return false, err
- }
-
- switch c.operator {
- case eq:
- return result == 0, nil
- case ne:
- return result != 0, nil
- case gt:
- return result > 0, nil
- case lt:
- return result < 0, nil
- case le:
- return result < 0 || result == 0, nil
- case ge:
- return result > 0 || result == 0, nil
- default:
- panic("invalid operator")
- }
- }
-}
-
-func matchesAllCriteria(criteria []*Criterion, value interface{}, encoded bool, keyType string,
- currentRow interface{}) (bool, error) {
-
- for i := range criteria {
- ok, err := criteria[i].test(value, encoded, keyType, currentRow)
- if err != nil {
- return false, err
- }
- if !ok {
- return false, nil
- }
- }
-
- return true, nil
-}
-
-func startsUpper(str string) bool {
- if str == "" {
- return true
- }
-
- for _, r := range str {
- return unicode.IsUpper(r)
- }
-
- return false
-}
-
-func (q *Query) String() string {
- s := ""
-
- if q.index != "" {
- s += "Using Index [" + q.index + "] "
- }
-
- s += "Where "
- for field, criteria := range q.fieldCriteria {
- for i := range criteria {
- s += field + " " + criteria[i].String()
- s += "\n\tAND "
- }
- }
-
- // remove last AND
- s = s[:len(s)-6]
-
- for i := range q.ors {
- s += "\nOr " + q.ors[i].String()
- }
-
- return s
-}
-
-func (c *Criterion) String() string {
- s := ""
- switch c.operator {
- case eq:
- s += "=="
- case ne:
- s += "!="
- case gt:
- s += ">"
- case lt:
- s += "<"
- case le:
- s += "<="
- case ge:
- s += ">="
- case in:
- return "in " + fmt.Sprintf("%v", c.inValues)
- case re:
- s += "matches the regular expression"
- case fn:
- s += "matches the function"
- case isnil:
- return "is nil"
- case sw:
- return "starts with " + fmt.Sprintf("%+v", c.value)
- case ew:
- return "ends with " + fmt.Sprintf("%+v", c.value)
- default:
- panic("invalid operator")
- }
- return s + " " + fmt.Sprintf("%v", c.value)
-}
-
-type record struct {
- key []byte
- value reflect.Value
-}
-
-func runQuery(tx *badger.Txn, dataType interface{}, query *Query, retrievedKeys keyList, skip int,
- action func(r *record) error) error {
- storer := newStorer(dataType)
-
- tp := dataType
-
- for reflect.TypeOf(tp).Kind() == reflect.Ptr {
- tp = reflect.ValueOf(tp).Elem().Interface()
- }
-
- query.dataType = reflect.TypeOf(tp)
-
- if len(query.sort) > 0 {
- return runQuerySort(tx, dataType, query, action)
- }
-
- iter := newIterator(tx, storer.Type(), query, query.bookmark)
- if (query.writable || query.subquery) && query.bookmark == nil {
- query.bookmark = iter.createBookmark()
- }
-
- defer func() {
- iter.Close()
- query.bookmark = nil
- }()
-
- if query.index != "" && query.badIndex {
- return fmt.Errorf("The index %s does not exist", query.index)
- }
-
- newKeys := make(keyList, 0)
-
- limit := query.limit - len(retrievedKeys)
-
- for k, v := iter.Next(); k != nil; k, v = iter.Next() {
- if len(retrievedKeys) != 0 {
- // don't check this record if it's already been retrieved
- if retrievedKeys.in(k) {
- continue
- }
- }
-
- val := reflect.New(reflect.TypeOf(tp))
-
- err := decode(v, val.Interface())
- if err != nil {
- return err
- }
-
- query.tx = tx
-
- ok, err := query.matchesAllFields(k, val, val.Interface())
- if err != nil {
- return err
- }
-
- if ok {
- if skip > 0 {
- skip--
- continue
- }
-
- err = action(&record{
- key: k,
- value: val,
- })
- if err != nil {
- return err
- }
-
- // track that this key's entry has been added to the result list
- newKeys.add(k)
-
- if query.limit != 0 {
- limit--
- if limit == 0 {
- break
- }
- }
- }
-
- }
-
- if iter.Error() != nil {
- return iter.Error()
- }
-
- if query.limit != 0 && limit == 0 {
- return nil
- }
-
- if len(query.ors) > 0 {
- iter.Close()
- for i := range newKeys {
- retrievedKeys.add(newKeys[i])
- }
-
- for i := range query.ors {
- err := runQuery(tx, tp, query.ors[i], retrievedKeys, skip, action)
- if err != nil {
- return err
- }
- }
- }
-
- return nil
-}
-
-// runQuerySort runs the query without sort, skip, or limit, then applies them to the entire result set
-func runQuerySort(tx *badger.Txn, dataType interface{}, query *Query, action func(r *record) error) error {
- // Validate sort fields
- for _, field := range query.sort {
- fields := strings.Split(field, ".")
-
- current := query.dataType
- for i := range fields {
- var structField reflect.StructField
- found := false
- if current.Kind() == reflect.Ptr {
- structField, found = current.Elem().FieldByName(fields[i])
- } else {
- structField, found = current.FieldByName(fields[i])
- }
-
- if !found {
- return fmt.Errorf("The field %s does not exist in the type %s", field, query.dataType)
- }
- current = structField.Type
- }
- }
-
- // Run query without sort, skip or limit
- // apply sort, skip and limit to entire dataset
- qCopy := *query
- qCopy.sort = nil
- qCopy.limit = 0
- qCopy.skip = 0
-
- var records []*record
- err := runQuery(tx, dataType, &qCopy, nil, 0,
- func(r *record) error {
- records = append(records, r)
-
- return nil
- })
-
- if err != nil {
- return err
- }
-
- sort.Slice(records, func(i, j int) bool {
- for _, field := range query.sort {
- val, err := fieldValue(records[i].value.Elem(), field)
- if err != nil {
- panic(err.Error()) // shouldn't happen due to field check above
- }
- value := val.Interface()
-
- val, err = fieldValue(records[j].value.Elem(), field)
- if err != nil {
- panic(err.Error()) // shouldn't happen due to field check above
- }
-
- other := val.Interface()
-
- if query.reverse {
- value, other = other, value
- }
-
- cmp, cerr := compare(value, other)
- if cerr != nil {
- // if for some reason there is an error on compare, fallback to a lexicographic compare
- valS := fmt.Sprintf("%s", value)
- otherS := fmt.Sprintf("%s", other)
- if valS < otherS {
- return true
- } else if valS == otherS {
- continue
- }
- return false
- }
-
- if cmp == -1 {
- return true
- } else if cmp == 0 {
- continue
- }
- return false
- }
- return false
- })
-
- // apply skip and limit
- limit := query.limit
- skip := query.skip
-
- if skip > len(records) {
- records = records[0:0]
- } else {
- records = records[skip:]
- }
-
- if limit > 0 && limit <= len(records) {
- records = records[:limit]
- }
-
- for i := range records {
- err = action(records[i])
- if err != nil {
- return err
- }
- }
-
- return nil
-
-}
-
-func findQuery(tx *badger.Txn, result interface{}, query *Query) error {
- if query == nil {
- query = &Query{}
- }
-
- query.writable = false
-
- resultVal := reflect.ValueOf(result)
- if resultVal.Kind() != reflect.Ptr || resultVal.Elem().Kind() != reflect.Slice {
- panic("result argument must be a slice address")
- }
-
- sliceVal := resultVal.Elem()
-
- elType := sliceVal.Type().Elem()
-
- tp := elType
-
- for tp.Kind() == reflect.Ptr {
- tp = tp.Elem()
- }
-
- var keyType reflect.Type
- var keyField string
-
- for i := 0; i < tp.NumField(); i++ {
- if strings.Contains(string(tp.Field(i).Tag), BadgerholdKeyTag) ||
- tp.Field(i).Tag.Get(badgerholdPrefixTag) == badgerholdPrefixKeyValue {
- keyType = tp.Field(i).Type
- keyField = tp.Field(i).Name
- break
- }
- }
-
- val := reflect.New(tp)
-
- err := runQuery(tx, val.Interface(), query, nil, query.skip,
- func(r *record) error {
- var rowValue reflect.Value
-
- if elType.Kind() == reflect.Ptr {
- rowValue = r.value
- } else {
- rowValue = r.value.Elem()
- }
-
- if keyType != nil {
- rowKey := rowValue
- for rowKey.Kind() == reflect.Ptr {
- rowKey = rowKey.Elem()
- }
- err := decodeKey(r.key, rowKey.FieldByName(keyField).Addr().Interface(), tp.Name())
- if err != nil {
- return err
- }
- }
-
- sliceVal = reflect.Append(sliceVal, rowValue)
-
- return nil
- })
-
- if err != nil {
- return err
- }
-
- resultVal.Elem().Set(sliceVal.Slice(0, sliceVal.Len()))
-
- return nil
-}
-
-func deleteQuery(tx *badger.Txn, dataType interface{}, query *Query) error {
- if query == nil {
- query = &Query{}
- }
- query.writable = true
-
- var records []*record
-
- err := runQuery(tx, dataType, query, nil, query.skip,
- func(r *record) error {
- records = append(records, r)
-
- return nil
- })
-
- if err != nil {
- return err
- }
-
- storer := newStorer(dataType)
-
- for i := range records {
- err := tx.Delete(records[i].key)
- if err != nil {
- return err
- }
-
- // remove any indexes
- err = indexDelete(storer, tx, records[i].key, records[i].value.Interface())
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func updateQuery(tx *badger.Txn, dataType interface{}, query *Query, update func(record interface{}) error) error {
- if query == nil {
- query = &Query{}
- }
-
- query.writable = true
- var records []*record
-
- err := runQuery(tx, dataType, query, nil, query.skip,
- func(r *record) error {
- records = append(records, r)
-
- return nil
-
- })
-
- if err != nil {
- return err
- }
-
- storer := newStorer(dataType)
- for i := range records {
- upVal := records[i].value.Interface()
-
- // delete any existing indexes bad on original value
- err := indexDelete(storer, tx, records[i].key, upVal)
- if err != nil {
- return err
- }
-
- err = update(upVal)
- if err != nil {
- return err
- }
-
- encVal, err := encode(upVal)
- if err != nil {
- return err
- }
-
- err = tx.Set(records[i].key, encVal)
- if err != nil {
- return err
- }
-
- // insert any new indexes
- err = indexAdd(storer, tx, records[i].key, upVal)
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func aggregateQuery(tx *badger.Txn, dataType interface{}, query *Query, groupBy ...string) ([]*AggregateResult, error) {
- if query == nil {
- query = &Query{}
- }
-
- query.writable = false
- var result []*AggregateResult
-
- if len(groupBy) == 0 {
- result = append(result, &AggregateResult{})
- }
-
- err := runQuery(tx, dataType, query, nil, query.skip,
- func(r *record) error {
- if len(groupBy) == 0 {
- result[0].reduction = append(result[0].reduction, r.value)
- return nil
- }
-
- grouping := make([]reflect.Value, len(groupBy))
-
- for i := range groupBy {
- fVal := r.value.Elem().FieldByName(groupBy[i])
- if !fVal.IsValid() {
- return fmt.Errorf("The field %s does not exist in the type %s", groupBy[i],
- r.value.Type())
- }
-
- grouping[i] = fVal
- }
-
- var err error
- var c int
- var allEqual bool
-
- i := sort.Search(len(result), func(i int) bool {
- for j := range grouping {
- c, err = compare(result[i].group[j].Interface(), grouping[j].Interface())
- if err != nil {
- return true
- }
- if c != 0 {
- return c >= 0
- }
- // if group part is equal, compare the next group part
- }
- allEqual = true
- return true
- })
-
- if err != nil {
- return err
- }
-
- if i < len(result) {
- if allEqual {
- // group already exists, append results to reduction
- result[i].reduction = append(result[i].reduction, r.value)
- return nil
- }
- }
-
- // group not found, create another grouping at i
- result = append(result, nil)
- copy(result[i+1:], result[i:])
- result[i] = &AggregateResult{
- group: grouping,
- reduction: []reflect.Value{r.value},
- }
-
- return nil
- })
-
- if err != nil {
- return nil, err
- }
-
- return result, nil
-}
diff --git a/vendor/github.com/timshannon/badgerhold/store.go b/vendor/github.com/timshannon/badgerhold/store.go
deleted file mode 100644
index be1e701a..00000000
--- a/vendor/github.com/timshannon/badgerhold/store.go
+++ /dev/null
@@ -1,205 +0,0 @@
-// Copyright 2019 Tim Shannon. All rights reserved.
-// Use of this source code is governed by the MIT license
-// that can be found in the LICENSE file.
-
-package badgerhold
-
-import (
- "reflect"
- "strings"
- "sync"
-
- "github.com/dgraph-io/badger"
-)
-
-const (
- // BadgerHoldIndexTag is the struct tag used to define an a field as indexable for a badgerhold
- BadgerHoldIndexTag = "badgerholdIndex"
-
- // BadgerholdKeyTag is the struct tag used to define an a field as a key for use in a Find query
- BadgerholdKeyTag = "badgerholdKey"
-
- // badgerholdPrefixTag is the prefix for an alternate (more standard) version of a struct tag
- badgerholdPrefixTag = "badgerhold"
- badgerholdPrefixIndexValue = "index"
- badgerholdPrefixKeyValue = "key"
- badgerholdPrefixUniqueValue = "unique"
-)
-
-// Store is a badgerhold wrapper around a badger DB
-type Store struct {
- db *badger.DB
- sequenceBandwith uint64
- sequences *sync.Map
-}
-
-// Options allows you set different options from the defaults
-// For example the encoding and decoding funcs which default to Gob
-type Options struct {
- Encoder EncodeFunc
- Decoder DecodeFunc
- SequenceBandwith uint64
- badger.Options
-}
-
-// DefaultOptions are a default set of options for opening a BadgerHold database
-// Includes badgers own default options
-var DefaultOptions = Options{
- Options: badger.DefaultOptions(""),
- Encoder: DefaultEncode,
- Decoder: DefaultDecode,
- SequenceBandwith: 100,
-}
-
-// Open opens or creates a badgerhold file.
-func Open(options Options) (*Store, error) {
- encode = options.Encoder
- decode = options.Decoder
-
- db, err := badger.Open(options.Options)
- if err != nil {
- return nil, err
- }
-
- return &Store{
- db: db,
- sequenceBandwith: options.SequenceBandwith,
- sequences: &sync.Map{},
- }, nil
-}
-
-// Badger returns the underlying Badger DB the badgerhold is based on
-func (s *Store) Badger() *badger.DB {
- return s.db
-}
-
-// Close closes the badger db
-func (s *Store) Close() error {
- var err error
- s.sequences.Range(func(key, value interface{}) bool {
- err = value.(*badger.Sequence).Release()
- if err != nil {
- return false
- }
- return true
- })
- if err != nil {
- return err
- }
- return s.db.Close()
-}
-
-/*
- NOTE: Not going to implement ReIndex and Remove index
- I had originally created these to make the transition from a plain bolt or badger DB easier
- but there is too much chance for lost data, and it's probably better that any conversion be
- done by the developer so they can directly manage how they want data to be migrated.
- If you disagree, feel free to open an issue and we can revisit this.
-*/
-
-// Storer is the Interface to implement to skip reflect calls on all data passed into the badgerhold
-type Storer interface {
- Type() string // used as the badgerdb index prefix
- Indexes() map[string]Index //[indexname]indexFunc
-}
-
-// anonType is created from a reflection of an unknown interface
-type anonStorer struct {
- rType reflect.Type
- indexes map[string]Index
-}
-
-// Type returns the name of the type as determined from the reflect package
-func (t *anonStorer) Type() string {
- return t.rType.Name()
-}
-
-// Indexes returns the Indexes determined by the reflect package on this type
-func (t *anonStorer) Indexes() map[string]Index {
- return t.indexes
-}
-
-// newStorer creates a type which satisfies the Storer interface based on reflection of the passed in dataType
-// if the Type doesn't meet the requirements of a Storer (i.e. doesn't have a name) it panics
-// You can avoid any reflection costs, by implementing the Storer interface on a type
-func newStorer(dataType interface{}) Storer {
- s, ok := dataType.(Storer)
-
- if ok {
- return s
- }
-
- tp := reflect.TypeOf(dataType)
-
- for tp.Kind() == reflect.Ptr {
- tp = tp.Elem()
- }
-
- storer := &anonStorer{
- rType: tp,
- indexes: make(map[string]Index),
- }
-
- if storer.rType.Name() == "" {
- panic("Invalid Type for Storer. Type is unnamed")
- }
-
- if storer.rType.Kind() != reflect.Struct {
- panic("Invalid Type for Storer. BadgerHold only works with structs")
- }
-
- for i := 0; i < storer.rType.NumField(); i++ {
-
- indexName := ""
- unique := false
-
- if strings.Contains(string(storer.rType.Field(i).Tag), BadgerHoldIndexTag) {
- indexName = storer.rType.Field(i).Tag.Get(BadgerHoldIndexTag)
-
- if indexName != "" {
- indexName = storer.rType.Field(i).Name
- }
- } else if tag := storer.rType.Field(i).Tag.Get(badgerholdPrefixTag); tag != "" {
- if tag == badgerholdPrefixIndexValue {
- indexName = storer.rType.Field(i).Name
- } else if tag == badgerholdPrefixUniqueValue {
- indexName = storer.rType.Field(i).Name
- unique = true
- }
- }
-
- if indexName != "" {
- storer.indexes[indexName] = Index{
- IndexFunc: func(name string, value interface{}) ([]byte, error) {
- tp := reflect.ValueOf(value)
- for tp.Kind() == reflect.Ptr {
- tp = tp.Elem()
- }
-
- return encode(tp.FieldByName(name).Interface())
- },
- Unique: unique,
- }
- }
- }
-
- return storer
-}
-
-func (s *Store) getSequence(typeName string) (uint64, error) {
- seq, ok := s.sequences.Load(typeName)
- if !ok {
- newSeq, err := s.Badger().GetSequence([]byte(typeName), s.sequenceBandwith)
- if err != nil {
- return 0, err
- }
- s.sequences.Store(typeName, newSeq)
- seq = newSeq
- }
-
- return seq.(*badger.Sequence).Next()
-}
-
-func typePrefix(typeName string) []byte {
- return []byte("bh_" + typeName)
-}
diff --git a/vendor/github.com/yuin/gopher-lua/.travis.yml b/vendor/github.com/yuin/gopher-lua/.travis.yml
deleted file mode 100644
index 68df5e7b..00000000
--- a/vendor/github.com/yuin/gopher-lua/.travis.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-language: go
-
-go:
- - "1.9.x"
- - "1.10.x"
- - "1.11.x"
-env:
- global:
- GO111MODULE=off
-
-before_install:
- - go get github.com/axw/gocov/gocov
- - go get github.com/mattn/goveralls
- - if ! go get code.google.com/p/go.tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi
-install:
- - go get -u -v $(go list -f '{{join .Imports "\n"}}{{"\n"}}{{join .TestImports "\n"}}' ./... | sort | uniq | grep '\.' | grep -v gopher-lua)
-script:
- - $HOME/gopath/bin/goveralls -service=travis-ci
diff --git a/vendor/github.com/yuin/gopher-lua/LICENSE b/vendor/github.com/yuin/gopher-lua/LICENSE
deleted file mode 100644
index 4daf480a..00000000
--- a/vendor/github.com/yuin/gopher-lua/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2015 Yusuke Inuzuka
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
diff --git a/vendor/github.com/yuin/gopher-lua/Makefile b/vendor/github.com/yuin/gopher-lua/Makefile
deleted file mode 100644
index 6d9e55c3..00000000
--- a/vendor/github.com/yuin/gopher-lua/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
-.PHONY: build test glua
-
-build:
- ./_tools/go-inline *.go && go fmt . && go build
-
-glua: *.go pm/*.go cmd/glua/glua.go
- ./_tools/go-inline *.go && go fmt . && go build cmd/glua/glua.go
-
-test:
- ./_tools/go-inline *.go && go fmt . && go test
diff --git a/vendor/github.com/yuin/gopher-lua/README.rst b/vendor/github.com/yuin/gopher-lua/README.rst
deleted file mode 100644
index b479e463..00000000
--- a/vendor/github.com/yuin/gopher-lua/README.rst
+++ /dev/null
@@ -1,887 +0,0 @@
-
-===============================================================================
-GopherLua: VM and compiler for Lua in Go.
-===============================================================================
-
-.. image:: https://godoc.org/github.com/yuin/gopher-lua?status.svg
- :target: http://godoc.org/github.com/yuin/gopher-lua
-
-.. image:: https://travis-ci.org/yuin/gopher-lua.svg
- :target: https://travis-ci.org/yuin/gopher-lua
-
-.. image:: https://coveralls.io/repos/yuin/gopher-lua/badge.svg
- :target: https://coveralls.io/r/yuin/gopher-lua
-
-.. image:: https://badges.gitter.im/Join%20Chat.svg
- :alt: Join the chat at https://gitter.im/yuin/gopher-lua
- :target: https://gitter.im/yuin/gopher-lua?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge
-
-|
-
-
-GopherLua is a Lua5.1 VM and compiler written in Go. GopherLua has a same goal
-with Lua: **Be a scripting language with extensible semantics** . It provides
-Go APIs that allow you to easily embed a scripting language to your Go host
-programs.
-
-.. contents::
- :depth: 1
-
-----------------------------------------------------------------
-Design principle
-----------------------------------------------------------------
-
-- Be a scripting language with extensible semantics.
-- User-friendly Go API
- - The stack based API like the one used in the original Lua
- implementation will cause a performance improvements in GopherLua
- (It will reduce memory allocations and concrete type <-> interface conversions).
- GopherLua API is **not** the stack based API.
- GopherLua give preference to the user-friendliness over the performance.
-
-----------------------------------------------------------------
-How about performance?
-----------------------------------------------------------------
-GopherLua is not fast but not too slow, I think.
-
-GopherLua has almost equivalent ( or little bit better ) performance as Python3 on micro benchmarks.
-
-There are some benchmarks on the `wiki page `_ .
-
-----------------------------------------------------------------
-Installation
-----------------------------------------------------------------
-
-.. code-block:: bash
-
- go get github.com/yuin/gopher-lua
-
-GopherLua supports >= Go1.9.
-
-----------------------------------------------------------------
-Usage
-----------------------------------------------------------------
-GopherLua APIs perform in much the same way as Lua, **but the stack is used only
-for passing arguments and receiving returned values.**
-
-GopherLua supports channel operations. See **"Goroutines"** section.
-
-Import a package.
-
-.. code-block:: go
-
- import (
- "github.com/yuin/gopher-lua"
- )
-
-Run scripts in the VM.
-
-.. code-block:: go
-
- L := lua.NewState()
- defer L.Close()
- if err := L.DoString(`print("hello")`); err != nil {
- panic(err)
- }
-
-.. code-block:: go
-
- L := lua.NewState()
- defer L.Close()
- if err := L.DoFile("hello.lua"); err != nil {
- panic(err)
- }
-
-Refer to `Lua Reference Manual `_ and `Go doc `_ for further information.
-
-Note that elements that are not commented in `Go doc `_ equivalent to `Lua Reference Manual `_ , except GopherLua uses objects instead of Lua stack indices.
-
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Data model
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-All data in a GopherLua program is an ``LValue`` . ``LValue`` is an interface
-type that has following methods.
-
-- ``String() string``
-- ``Type() LValueType``
-
-
-Objects implement an LValue interface are
-
-================ ========================= ================== =======================
- Type name Go type Type() value Constants
-================ ========================= ================== =======================
- ``LNilType`` (constants) ``LTNil`` ``LNil``
- ``LBool`` (constants) ``LTBool`` ``LTrue``, ``LFalse``
- ``LNumber`` float64 ``LTNumber`` ``-``
- ``LString`` string ``LTString`` ``-``
- ``LFunction`` struct pointer ``LTFunction`` ``-``
- ``LUserData`` struct pointer ``LTUserData`` ``-``
- ``LState`` struct pointer ``LTThread`` ``-``
- ``LTable`` struct pointer ``LTTable`` ``-``
- ``LChannel`` chan LValue ``LTChannel`` ``-``
-================ ========================= ================== =======================
-
-You can test an object type in Go way(type assertion) or using a ``Type()`` value.
-
-.. code-block:: go
-
- lv := L.Get(-1) // get the value at the top of the stack
- if str, ok := lv.(lua.LString); ok {
- // lv is LString
- fmt.Println(string(str))
- }
- if lv.Type() != lua.LTString {
- panic("string required.")
- }
-
-.. code-block:: go
-
- lv := L.Get(-1) // get the value at the top of the stack
- if tbl, ok := lv.(*lua.LTable); ok {
- // lv is LTable
- fmt.Println(L.ObjLen(tbl))
- }
-
-Note that ``LBool`` , ``LNumber`` , ``LString`` is not a pointer.
-
-To test ``LNilType`` and ``LBool``, You **must** use pre-defined constants.
-
-.. code-block:: go
-
- lv := L.Get(-1) // get the value at the top of the stack
-
- if lv == lua.LTrue { // correct
- }
-
- if bl, ok := lv.(lua.LBool); ok && bool(bl) { // wrong
- }
-
-In Lua, both ``nil`` and ``false`` make a condition false. ``LVIsFalse`` and ``LVAsBool`` implement this specification.
-
-.. code-block:: go
-
- lv := L.Get(-1) // get the value at the top of the stack
- if lua.LVIsFalse(lv) { // lv is nil or false
- }
-
- if lua.LVAsBool(lv) { // lv is neither nil nor false
- }
-
-Objects that based on go structs(``LFunction``. ``LUserData``, ``LTable``)
-have some public methods and fields. You can use these methods and fields for
-performance and debugging, but there are some limitations.
-
-- Metatable does not work.
-- No error handlings.
-
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Callstack & Registry size
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-The size of an ``LState``'s callstack controls the maximum call depth for Lua functions within a script (Go function calls do not count).
-
-The registry of an ``LState`` implements stack storage for calling functions (both Lua and Go functions) and also for temporary variables in expressions. Its storage requirements will increase with callstack usage and also with code complexity.
-
-Both the registry and the callstack can be set to either a fixed size or to auto size.
-
-When you have a large number of ``LStates`` instantiated in a process, it's worth taking the time to tune the registry and callstack options.
-
-+++++++++
-Registry
-+++++++++
-
-The registry can have an initial size, a maximum size and a step size configured on a per ``LState`` basis. This will allow the registry to grow as needed. It will not shrink again after growing.
-
-.. code-block:: go
-
- L := lua.NewState(lua.Options{
- RegistrySize: 1024 * 20, // this is the initial size of the registry
- RegistryMaxSize: 1024 * 80, // this is the maximum size that the registry can grow to. If set to `0` (the default) then the registry will not auto grow
- RegistryGrowStep: 32, // this is how much to step up the registry by each time it runs out of space. The default is `32`.
- })
- defer L.Close()
-
-A registry which is too small for a given script will ultimately result in a panic. A registry which is too big will waste memory (which can be significant if many ``LStates`` are instantiated).
-Auto growing registries incur a small performance hit at the point they are resized but will not otherwise affect performance.
-
-+++++++++
-Callstack
-+++++++++
-
-The callstack can operate in two different modes, fixed or auto size.
-A fixed size callstack has the highest performance and has a fixed memory overhead.
-An auto sizing callstack will allocate and release callstack pages on demand which will ensure the minimum amount of memory is in use at any time. The downside is it will incur a small performance impact every time a new page of callframes is allocated.
-By default an ``LState`` will allocate and free callstack frames in pages of 8, so the allocation overhead is not incurred on every function call. It is very likely that the performance impact of an auto resizing callstack will be negligible for most use cases.
-
-.. code-block:: go
-
- L := lua.NewState(lua.Options{
- CallStackSize: 120, // this is the maximum callstack size of this LState
- MinimizeStackMemory: true, // Defaults to `false` if not specified. If set, the callstack will auto grow and shrink as needed up to a max of `CallStackSize`. If not set, the callstack will be fixed at `CallStackSize`.
- })
- defer L.Close()
-
-++++++++++++++++
-Option defaults
-++++++++++++++++
-
-The above examples show how to customize the callstack and registry size on a per ``LState`` basis. You can also adjust some defaults for when options are not specified by altering the values of ``lua.RegistrySize``, ``lua.RegistryGrowStep`` and ``lua.CallStackSize``.
-
-An ``LState`` object that has been created by ``*LState#NewThread()`` inherits the callstack & registry size from the parent ``LState`` object.
-
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Miscellaneous lua.NewState options
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-- **Options.SkipOpenLibs bool(default false)**
- - By default, GopherLua opens all built-in libraries when new LState is created.
- - You can skip this behaviour by setting this to ``true`` .
- - Using the various `OpenXXX(L *LState) int` functions you can open only those libraries that you require, for an example see below.
-- **Options.IncludeGoStackTrace bool(default false)**
- - By default, GopherLua does not show Go stack traces when panics occur.
- - You can get Go stack traces by setting this to ``true`` .
-
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-API
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Refer to `Lua Reference Manual `_ and `Go doc(LState methods) `_ for further information.
-
-+++++++++++++++++++++++++++++++++++++++++
-Calling Go from Lua
-+++++++++++++++++++++++++++++++++++++++++
-
-.. code-block:: go
-
- func Double(L *lua.LState) int {
- lv := L.ToInt(1) /* get argument */
- L.Push(lua.LNumber(lv * 2)) /* push result */
- return 1 /* number of results */
- }
-
- func main() {
- L := lua.NewState()
- defer L.Close()
- L.SetGlobal("double", L.NewFunction(Double)) /* Original lua_setglobal uses stack... */
- }
-
-.. code-block:: lua
-
- print(double(20)) -- > "40"
-
-Any function registered with GopherLua is a ``lua.LGFunction``, defined in ``value.go``
-
-.. code-block:: go
-
- type LGFunction func(*LState) int
-
-Working with coroutines.
-
-.. code-block:: go
-
- co, _ := L.NewThread() /* create a new thread */
- fn := L.GetGlobal("coro").(*lua.LFunction) /* get function from lua */
- for {
- st, err, values := L.Resume(co, fn)
- if st == lua.ResumeError {
- fmt.Println("yield break(error)")
- fmt.Println(err.Error())
- break
- }
-
- for i, lv := range values {
- fmt.Printf("%v : %v\n", i, lv)
- }
-
- if st == lua.ResumeOK {
- fmt.Println("yield break(ok)")
- break
- }
- }
-
-+++++++++++++++++++++++++++++++++++++++++
-Opening a subset of builtin modules
-+++++++++++++++++++++++++++++++++++++++++
-
-The following demonstrates how to open a subset of the built-in modules in Lua, say for example to avoid enabling modules with access to local files or system calls.
-
-main.go
-
-.. code-block:: go
-
- func main() {
- L := lua.NewState(lua.Options{SkipOpenLibs: true})
- defer L.Close()
- for _, pair := range []struct {
- n string
- f lua.LGFunction
- }{
- {lua.LoadLibName, lua.OpenPackage}, // Must be first
- {lua.BaseLibName, lua.OpenBase},
- {lua.TabLibName, lua.OpenTable},
- } {
- if err := L.CallByParam(lua.P{
- Fn: L.NewFunction(pair.f),
- NRet: 0,
- Protect: true,
- }, lua.LString(pair.n)); err != nil {
- panic(err)
- }
- }
- if err := L.DoFile("main.lua"); err != nil {
- panic(err)
- }
- }
-
-+++++++++++++++++++++++++++++++++++++++++
-Creating a module by Go
-+++++++++++++++++++++++++++++++++++++++++
-
-mymodule.go
-
-.. code-block:: go
-
- package mymodule
-
- import (
- "github.com/yuin/gopher-lua"
- )
-
- func Loader(L *lua.LState) int {
- // register functions to the table
- mod := L.SetFuncs(L.NewTable(), exports)
- // register other stuff
- L.SetField(mod, "name", lua.LString("value"))
-
- // returns the module
- L.Push(mod)
- return 1
- }
-
- var exports = map[string]lua.LGFunction{
- "myfunc": myfunc,
- }
-
- func myfunc(L *lua.LState) int {
- return 0
- }
-
-mymain.go
-
-.. code-block:: go
-
- package main
-
- import (
- "./mymodule"
- "github.com/yuin/gopher-lua"
- )
-
- func main() {
- L := lua.NewState()
- defer L.Close()
- L.PreloadModule("mymodule", mymodule.Loader)
- if err := L.DoFile("main.lua"); err != nil {
- panic(err)
- }
- }
-
-main.lua
-
-.. code-block:: lua
-
- local m = require("mymodule")
- m.myfunc()
- print(m.name)
-
-
-+++++++++++++++++++++++++++++++++++++++++
-Calling Lua from Go
-+++++++++++++++++++++++++++++++++++++++++
-
-.. code-block:: go
-
- L := lua.NewState()
- defer L.Close()
- if err := L.DoFile("double.lua"); err != nil {
- panic(err)
- }
- if err := L.CallByParam(lua.P{
- Fn: L.GetGlobal("double"),
- NRet: 1,
- Protect: true,
- }, lua.LNumber(10)); err != nil {
- panic(err)
- }
- ret := L.Get(-1) // returned value
- L.Pop(1) // remove received value
-
-If ``Protect`` is false, GopherLua will panic instead of returning an ``error`` value.
-
-+++++++++++++++++++++++++++++++++++++++++
-User-Defined types
-+++++++++++++++++++++++++++++++++++++++++
-You can extend GopherLua with new types written in Go.
-``LUserData`` is provided for this purpose.
-
-.. code-block:: go
-
- type Person struct {
- Name string
- }
-
- const luaPersonTypeName = "person"
-
- // Registers my person type to given L.
- func registerPersonType(L *lua.LState) {
- mt := L.NewTypeMetatable(luaPersonTypeName)
- L.SetGlobal("person", mt)
- // static attributes
- L.SetField(mt, "new", L.NewFunction(newPerson))
- // methods
- L.SetField(mt, "__index", L.SetFuncs(L.NewTable(), personMethods))
- }
-
- // Constructor
- func newPerson(L *lua.LState) int {
- person := &Person{L.CheckString(1)}
- ud := L.NewUserData()
- ud.Value = person
- L.SetMetatable(ud, L.GetTypeMetatable(luaPersonTypeName))
- L.Push(ud)
- return 1
- }
-
- // Checks whether the first lua argument is a *LUserData with *Person and returns this *Person.
- func checkPerson(L *lua.LState) *Person {
- ud := L.CheckUserData(1)
- if v, ok := ud.Value.(*Person); ok {
- return v
- }
- L.ArgError(1, "person expected")
- return nil
- }
-
- var personMethods = map[string]lua.LGFunction{
- "name": personGetSetName,
- }
-
- // Getter and setter for the Person#Name
- func personGetSetName(L *lua.LState) int {
- p := checkPerson(L)
- if L.GetTop() == 2 {
- p.Name = L.CheckString(2)
- return 0
- }
- L.Push(lua.LString(p.Name))
- return 1
- }
-
- func main() {
- L := lua.NewState()
- defer L.Close()
- registerPersonType(L)
- if err := L.DoString(`
- p = person.new("Steeve")
- print(p:name()) -- "Steeve"
- p:name("Alice")
- print(p:name()) -- "Alice"
- `); err != nil {
- panic(err)
- }
- }
-
-+++++++++++++++++++++++++++++++++++++++++
-Terminating a running LState
-+++++++++++++++++++++++++++++++++++++++++
-GopherLua supports the `Go Concurrency Patterns: Context `_ .
-
-
-.. code-block:: go
-
- L := lua.NewState()
- defer L.Close()
- ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
- defer cancel()
- // set the context to our LState
- L.SetContext(ctx)
- err := L.DoString(`
- local clock = os.clock
- function sleep(n) -- seconds
- local t0 = clock()
- while clock() - t0 <= n do end
- end
- sleep(3)
- `)
- // err.Error() contains "context deadline exceeded"
-
-With coroutines
-
-.. code-block:: go
-
- L := lua.NewState()
- defer L.Close()
- ctx, cancel := context.WithCancel(context.Background())
- L.SetContext(ctx)
- defer cancel()
- L.DoString(`
- function coro()
- local i = 0
- while true do
- coroutine.yield(i)
- i = i+1
- end
- return i
- end
- `)
- co, cocancel := L.NewThread()
- defer cocancel()
- fn := L.GetGlobal("coro").(*LFunction)
-
- _, err, values := L.Resume(co, fn) // err is nil
-
- cancel() // cancel the parent context
-
- _, err, values = L.Resume(co, fn) // err is NOT nil : child context was canceled
-
-**Note that using a context causes performance degradation.**
-
-.. code-block::
-
- time ./glua-with-context.exe fib.lua
- 9227465
- 0.01s user 0.11s system 1% cpu 7.505 total
-
- time ./glua-without-context.exe fib.lua
- 9227465
- 0.01s user 0.01s system 0% cpu 5.306 total
-
-+++++++++++++++++++++++++++++++++++++++++
-Sharing Lua byte code between LStates
-+++++++++++++++++++++++++++++++++++++++++
-Calling ``DoFile`` will load a Lua script, compile it to byte code and run the byte code in a ``LState``.
-
-If you have multiple ``LStates`` which are all required to run the same script, you can share the byte code between them,
-which will save on memory.
-Sharing byte code is safe as it is read only and cannot be altered by lua scripts.
-
-.. code-block:: go
-
- // CompileLua reads the passed lua file from disk and compiles it.
- func CompileLua(filePath string) (*lua.FunctionProto, error) {
- file, err := os.Open(filePath)
- defer file.Close()
- if err != nil {
- return nil, err
- }
- reader := bufio.NewReader(file)
- chunk, err := parse.Parse(reader, filePath)
- if err != nil {
- return nil, err
- }
- proto, err := lua.Compile(chunk, filePath)
- if err != nil {
- return nil, err
- }
- return proto, nil
- }
-
- // DoCompiledFile takes a FunctionProto, as returned by CompileLua, and runs it in the LState. It is equivalent
- // to calling DoFile on the LState with the original source file.
- func DoCompiledFile(L *lua.LState, proto *lua.FunctionProto) error {
- lfunc := L.NewFunctionFromProto(proto)
- L.Push(lfunc)
- return L.PCall(0, lua.MultRet, nil)
- }
-
- // Example shows how to share the compiled byte code from a lua script between multiple VMs.
- func Example() {
- codeToShare := CompileLua("mylua.lua")
- a := lua.NewState()
- b := lua.NewState()
- c := lua.NewState()
- DoCompiledFile(a, codeToShare)
- DoCompiledFile(b, codeToShare)
- DoCompiledFile(c, codeToShare)
- }
-
-+++++++++++++++++++++++++++++++++++++++++
-Goroutines
-+++++++++++++++++++++++++++++++++++++++++
-The ``LState`` is not goroutine-safe. It is recommended to use one LState per goroutine and communicate between goroutines by using channels.
-
-Channels are represented by ``channel`` objects in GopherLua. And a ``channel`` table provides functions for performing channel operations.
-
-Some objects can not be sent over channels due to having non-goroutine-safe objects inside itself.
-
-- a thread(state)
-- a function
-- an userdata
-- a table with a metatable
-
-You **must not** send these objects from Go APIs to channels.
-
-
-
-.. code-block:: go
-
- func receiver(ch, quit chan lua.LValue) {
- L := lua.NewState()
- defer L.Close()
- L.SetGlobal("ch", lua.LChannel(ch))
- L.SetGlobal("quit", lua.LChannel(quit))
- if err := L.DoString(`
- local exit = false
- while not exit do
- channel.select(
- {"|<-", ch, function(ok, v)
- if not ok then
- print("channel closed")
- exit = true
- else
- print("received:", v)
- end
- end},
- {"|<-", quit, function(ok, v)
- print("quit")
- exit = true
- end}
- )
- end
- `); err != nil {
- panic(err)
- }
- }
-
- func sender(ch, quit chan lua.LValue) {
- L := lua.NewState()
- defer L.Close()
- L.SetGlobal("ch", lua.LChannel(ch))
- L.SetGlobal("quit", lua.LChannel(quit))
- if err := L.DoString(`
- ch:send("1")
- ch:send("2")
- `); err != nil {
- panic(err)
- }
- ch <- lua.LString("3")
- quit <- lua.LTrue
- }
-
- func main() {
- ch := make(chan lua.LValue)
- quit := make(chan lua.LValue)
- go receiver(ch, quit)
- go sender(ch, quit)
- time.Sleep(3 * time.Second)
- }
-
-'''''''''''''''
-Go API
-'''''''''''''''
-
-``ToChannel``, ``CheckChannel``, ``OptChannel`` are available.
-
-Refer to `Go doc(LState methods) `_ for further information.
-
-'''''''''''''''
-Lua API
-'''''''''''''''
-
-- **channel.make([buf:int]) -> ch:channel**
- - Create new channel that has a buffer size of ``buf``. By default, ``buf`` is 0.
-
-- **channel.select(case:table [, case:table, case:table ...]) -> {index:int, recv:any, ok}**
- - Same as the ``select`` statement in Go. It returns the index of the chosen case and, if that
- case was a receive operation, the value received and a boolean indicating whether the channel has been closed.
- - ``case`` is a table that outlined below.
- - receiving: `{"|<-", ch:channel [, handler:func(ok, data:any)]}`
- - sending: `{"<-|", ch:channel, data:any [, handler:func(data:any)]}`
- - default: `{"default" [, handler:func()]}`
-
-``channel.select`` examples:
-
-.. code-block:: lua
-
- local idx, recv, ok = channel.select(
- {"|<-", ch1},
- {"|<-", ch2}
- )
- if not ok then
- print("closed")
- elseif idx == 1 then -- received from ch1
- print(recv)
- elseif idx == 2 then -- received from ch2
- print(recv)
- end
-
-.. code-block:: lua
-
- channel.select(
- {"|<-", ch1, function(ok, data)
- print(ok, data)
- end},
- {"<-|", ch2, "value", function(data)
- print(data)
- end},
- {"default", function()
- print("default action")
- end}
- )
-
-- **channel:send(data:any)**
- - Send ``data`` over the channel.
-- **channel:receive() -> ok:bool, data:any**
- - Receive some data over the channel.
-- **channel:close()**
- - Close the channel.
-
-''''''''''''''''''''''''''''''
-The LState pool pattern
-''''''''''''''''''''''''''''''
-To create per-thread LState instances, You can use the ``sync.Pool`` like mechanism.
-
-.. code-block:: go
-
- type lStatePool struct {
- m sync.Mutex
- saved []*lua.LState
- }
-
- func (pl *lStatePool) Get() *lua.LState {
- pl.m.Lock()
- defer pl.m.Unlock()
- n := len(pl.saved)
- if n == 0 {
- return pl.New()
- }
- x := pl.saved[n-1]
- pl.saved = pl.saved[0 : n-1]
- return x
- }
-
- func (pl *lStatePool) New() *lua.LState {
- L := lua.NewState()
- // setting the L up here.
- // load scripts, set global variables, share channels, etc...
- return L
- }
-
- func (pl *lStatePool) Put(L *lua.LState) {
- pl.m.Lock()
- defer pl.m.Unlock()
- pl.saved = append(pl.saved, L)
- }
-
- func (pl *lStatePool) Shutdown() {
- for _, L := range pl.saved {
- L.Close()
- }
- }
-
- // Global LState pool
- var luaPool = &lStatePool{
- saved: make([]*lua.LState, 0, 4),
- }
-
-Now, you can get per-thread LState objects from the ``luaPool`` .
-
-.. code-block:: go
-
- func MyWorker() {
- L := luaPool.Get()
- defer luaPool.Put(L)
- /* your code here */
- }
-
- func main() {
- defer luaPool.Shutdown()
- go MyWorker()
- go MyWorker()
- /* etc... */
- }
-
-
-----------------------------------------------------------------
-Differences between Lua and GopherLua
-----------------------------------------------------------------
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Goroutines
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-- GopherLua supports channel operations.
- - GopherLua has a type named ``channel``.
- - The ``channel`` table provides functions for performing channel operations.
-
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Unsupported functions
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-- ``string.dump``
-- ``os.setlocale``
-- ``lua_Debug.namewhat``
-- ``package.loadlib``
-- debug hooks
-
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Miscellaneous notes
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-- ``collectgarbage`` does not take any arguments and runs the garbage collector for the entire Go program.
-- ``file:setvbuf`` does not support a line buffering.
-- Daylight saving time is not supported.
-- GopherLua has a function to set an environment variable : ``os.setenv(name, value)``
-
-----------------------------------------------------------------
-Standalone interpreter
-----------------------------------------------------------------
-Lua has an interpreter called ``lua`` . GopherLua has an interpreter called ``glua`` .
-
-.. code-block:: bash
-
- go get github.com/yuin/gopher-lua/cmd/glua
-
-``glua`` has same options as ``lua`` .
-
-----------------------------------------------------------------
-How to Contribute
-----------------------------------------------------------------
-See `Guidlines for contributors `_ .
-
-----------------------------------------------------------------
-Libraries for GopherLua
-----------------------------------------------------------------
-
-- `gopher-luar `_ : Simplifies data passing to and from gopher-lua
-- `gluamapper `_ : Mapping a Lua table to a Go struct
-- `gluare `_ : Regular expressions for gopher-lua
-- `gluahttp `_ : HTTP request module for gopher-lua
-- `gopher-json `_ : A simple JSON encoder/decoder for gopher-lua
-- `gluayaml `_ : Yaml parser for gopher-lua
-- `glua-lfs `_ : Partially implements the luafilesystem module for gopher-lua
-- `gluaurl `_ : A url parser/builder module for gopher-lua
-- `gluahttpscrape `_ : A simple HTML scraper module for gopher-lua
-- `gluaxmlpath `_ : An xmlpath module for gopher-lua
-- `gmoonscript `_ : Moonscript Compiler for the Gopher Lua VM
-- `loguago `_ : Zerolog wrapper for Gopher-Lua
-- `gluacrypto `_ : A native Go implementation of crypto library for the GopherLua VM.
-- `gluasql `_ : A native Go implementation of SQL client for the GopherLua VM.
-- `purr `_ : A http mock testing tool.
-- `vadv/gopher-lua-libs `_ : Some usefull libraries for GopherLua VM.
-- `gluaperiphery `_ : A periphery library for the GopherLua VM (GPIO, SPI, I2C, MMIO, and Serial peripheral I/O for Linux).
-- `glua-async `_ : An async/await implement for gopher-lua.
-- `gopherlua-debugger `_ : A debugger for gopher-lua
-----------------------------------------------------------------
-Donation
-----------------------------------------------------------------
-
-BTC: 1NEDSyUmo4SMTDP83JJQSWi1MvQUGGNMZB
-
-----------------------------------------------------------------
-License
-----------------------------------------------------------------
-MIT
-
-----------------------------------------------------------------
-Author
-----------------------------------------------------------------
-Yusuke Inuzuka
diff --git a/vendor/github.com/yuin/gopher-lua/_state.go b/vendor/github.com/yuin/gopher-lua/_state.go
deleted file mode 100644
index 960e8810..00000000
--- a/vendor/github.com/yuin/gopher-lua/_state.go
+++ /dev/null
@@ -1,2085 +0,0 @@
-package lua
-
-import (
- "context"
- "fmt"
- "io"
- "math"
- "os"
- "runtime"
- "strings"
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/yuin/gopher-lua/parse"
-)
-
-const MultRet = -1
-const RegistryIndex = -10000
-const EnvironIndex = -10001
-const GlobalsIndex = -10002
-
-/* ApiError {{{ */
-
-type ApiError struct {
- Type ApiErrorType
- Object LValue
- StackTrace string
- // Underlying error. This attribute is set only if the Type is ApiErrorFile or ApiErrorSyntax
- Cause error
-}
-
-func newApiError(code ApiErrorType, object LValue) *ApiError {
- return &ApiError{code, object, "", nil}
-}
-
-func newApiErrorS(code ApiErrorType, message string) *ApiError {
- return newApiError(code, LString(message))
-}
-
-func newApiErrorE(code ApiErrorType, err error) *ApiError {
- return &ApiError{code, LString(err.Error()), "", err}
-}
-
-func (e *ApiError) Error() string {
- if len(e.StackTrace) > 0 {
- return fmt.Sprintf("%s\n%s", e.Object.String(), e.StackTrace)
- }
- return e.Object.String()
-}
-
-type ApiErrorType int
-
-const (
- ApiErrorSyntax ApiErrorType = iota
- ApiErrorFile
- ApiErrorRun
- ApiErrorError
- ApiErrorPanic
-)
-
-/* }}} */
-
-/* ResumeState {{{ */
-
-type ResumeState int
-
-const (
- ResumeOK ResumeState = iota
- ResumeYield
- ResumeError
-)
-
-/* }}} */
-
-/* P {{{ */
-
-type P struct {
- Fn LValue
- NRet int
- Protect bool
- Handler *LFunction
-}
-
-/* }}} */
-
-/* Options {{{ */
-
-// Options is a configuration that is used to create a new LState.
-type Options struct {
- // Call stack size. This defaults to `lua.CallStackSize`.
- CallStackSize int
- // Data stack size. This defaults to `lua.RegistrySize`.
- RegistrySize int
- // Allow the registry to grow from the registry size specified up to a value of RegistryMaxSize. A value of 0
- // indicates no growth is permitted. The registry will not shrink again after any growth.
- RegistryMaxSize int
- // If growth is enabled, step up by an additional `RegistryGrowStep` each time to avoid having to resize too often.
- // This defaults to `lua.RegistryGrowStep`
- RegistryGrowStep int
- // Controls whether or not libraries are opened by default
- SkipOpenLibs bool
- // Tells whether a Go stacktrace should be included in a Lua stacktrace when panics occur.
- IncludeGoStackTrace bool
- // If `MinimizeStackMemory` is set, the call stack will be automatically grown or shrank up to a limit of
- // `CallStackSize` in order to minimize memory usage. This does incur a slight performance penalty.
- MinimizeStackMemory bool
-}
-
-/* }}} */
-
-/* Debug {{{ */
-
-type Debug struct {
- frame *callFrame
- Name string
- What string
- Source string
- CurrentLine int
- NUpvalues int
- LineDefined int
- LastLineDefined int
-}
-
-/* }}} */
-
-/* callFrame {{{ */
-
-type callFrame struct {
- Idx int
- Fn *LFunction
- Parent *callFrame
- Pc int
- Base int
- LocalBase int
- ReturnBase int
- NArgs int
- NRet int
- TailCall int
-}
-
-type callFrameStack interface {
- Push(v callFrame)
- Pop() *callFrame
- Last() *callFrame
-
- SetSp(sp int)
- Sp() int
- At(sp int) *callFrame
-
- IsFull() bool
- IsEmpty() bool
-
- FreeAll()
-}
-
-type fixedCallFrameStack struct {
- array []callFrame
- sp int
-}
-
-func newFixedCallFrameStack(size int) callFrameStack {
- return &fixedCallFrameStack{
- array: make([]callFrame, size),
- sp: 0,
- }
-}
-
-func (cs *fixedCallFrameStack) IsEmpty() bool { return cs.sp == 0 }
-
-func (cs *fixedCallFrameStack) IsFull() bool { return cs.sp == len(cs.array) }
-
-func (cs *fixedCallFrameStack) Clear() {
- cs.sp = 0
-}
-
-func (cs *fixedCallFrameStack) Push(v callFrame) {
- cs.array[cs.sp] = v
- cs.array[cs.sp].Idx = cs.sp
- cs.sp++
-}
-
-func (cs *fixedCallFrameStack) Sp() int {
- return cs.sp
-}
-
-func (cs *fixedCallFrameStack) SetSp(sp int) {
- cs.sp = sp
-}
-
-func (cs *fixedCallFrameStack) Last() *callFrame {
- if cs.sp == 0 {
- return nil
- }
- return &cs.array[cs.sp-1]
-}
-
-func (cs *fixedCallFrameStack) At(sp int) *callFrame {
- return &cs.array[sp]
-}
-
-func (cs *fixedCallFrameStack) Pop() *callFrame {
- cs.sp--
- return &cs.array[cs.sp]
-}
-
-func (cs *fixedCallFrameStack) FreeAll() {
- // nothing to do for fixed callframestack
-}
-
-// FramesPerSegment should be a power of 2 constant for performance reasons. It will allow the go compiler to change
-// the divs and mods into bitshifts. Max is 256 due to current use of uint8 to count how many frames in a segment are
-// used.
-const FramesPerSegment = 8
-
-type callFrameStackSegment struct {
- array [FramesPerSegment]callFrame
-}
-type segIdx uint16
-type autoGrowingCallFrameStack struct {
- segments []*callFrameStackSegment
- segIdx segIdx
- // segSp is the number of frames in the current segment which are used. Full 'sp' value is segIdx * FramesPerSegment + segSp.
- // It points to the next stack slot to use, so 0 means to use the 0th element in the segment, and a value of
- // FramesPerSegment indicates that the segment is full and cannot accommodate another frame.
- segSp uint8
-}
-
-var segmentPool sync.Pool
-
-func newCallFrameStackSegment() *callFrameStackSegment {
- seg := segmentPool.Get()
- if seg == nil {
- return &callFrameStackSegment{}
- }
- return seg.(*callFrameStackSegment)
-}
-
-func freeCallFrameStackSegment(seg *callFrameStackSegment) {
- segmentPool.Put(seg)
-}
-
-// newCallFrameStack allocates a new stack for a lua state, which will auto grow up to a max size of at least maxSize.
-// it will actually grow up to the next segment size multiple after maxSize, where the segment size is dictated by
-// FramesPerSegment.
-func newAutoGrowingCallFrameStack(maxSize int) callFrameStack {
- cs := &autoGrowingCallFrameStack{
- segments: make([]*callFrameStackSegment, (maxSize+(FramesPerSegment-1))/FramesPerSegment),
- segIdx: 0,
- }
- cs.segments[0] = newCallFrameStackSegment()
- return cs
-}
-
-func (cs *autoGrowingCallFrameStack) IsEmpty() bool {
- return cs.segIdx == 0 && cs.segSp == 0
-}
-
-// IsFull returns true if the stack cannot receive any more stack pushes without overflowing
-func (cs *autoGrowingCallFrameStack) IsFull() bool {
- return int(cs.segIdx) == len(cs.segments) && cs.segSp >= FramesPerSegment
-}
-
-func (cs *autoGrowingCallFrameStack) Clear() {
- for i := segIdx(1); i <= cs.segIdx; i++ {
- freeCallFrameStackSegment(cs.segments[i])
- cs.segments[i] = nil
- }
- cs.segIdx = 0
- cs.segSp = 0
-}
-
-func (cs *autoGrowingCallFrameStack) FreeAll() {
- for i := segIdx(0); i <= cs.segIdx; i++ {
- freeCallFrameStackSegment(cs.segments[i])
- cs.segments[i] = nil
- }
-}
-
-// Push pushes the passed callFrame onto the stack. it panics if the stack is full, caller should call IsFull() before
-// invoking this to avoid this.
-func (cs *autoGrowingCallFrameStack) Push(v callFrame) {
- curSeg := cs.segments[cs.segIdx]
- if cs.segSp >= FramesPerSegment {
- // segment full, push new segment if allowed
- if cs.segIdx < segIdx(len(cs.segments)-1) {
- curSeg = newCallFrameStackSegment()
- cs.segIdx++
- cs.segments[cs.segIdx] = curSeg
- cs.segSp = 0
- } else {
- panic("lua callstack overflow")
- }
- }
- curSeg.array[cs.segSp] = v
- curSeg.array[cs.segSp].Idx = int(cs.segSp) + FramesPerSegment*int(cs.segIdx)
- cs.segSp++
-}
-
-// Sp retrieves the current stack depth, which is the number of frames currently pushed on the stack.
-func (cs *autoGrowingCallFrameStack) Sp() int {
- return int(cs.segSp) + int(cs.segIdx)*FramesPerSegment
-}
-
-// SetSp can be used to rapidly unwind the stack, freeing all stack frames on the way. It should not be used to
-// allocate new stack space, use Push() for that.
-func (cs *autoGrowingCallFrameStack) SetSp(sp int) {
- desiredSegIdx := segIdx(sp / FramesPerSegment)
- desiredFramesInLastSeg := uint8(sp % FramesPerSegment)
- for {
- if cs.segIdx <= desiredSegIdx {
- break
- }
- freeCallFrameStackSegment(cs.segments[cs.segIdx])
- cs.segments[cs.segIdx] = nil
- cs.segIdx--
- }
- cs.segSp = desiredFramesInLastSeg
-}
-
-func (cs *autoGrowingCallFrameStack) Last() *callFrame {
- curSeg := cs.segments[cs.segIdx]
- segSp := cs.segSp
- if segSp == 0 {
- if cs.segIdx == 0 {
- return nil
- }
- curSeg = cs.segments[cs.segIdx-1]
- segSp = FramesPerSegment
- }
- return &curSeg.array[segSp-1]
-}
-
-func (cs *autoGrowingCallFrameStack) At(sp int) *callFrame {
- segIdx := segIdx(sp / FramesPerSegment)
- frameIdx := uint8(sp % FramesPerSegment)
- return &cs.segments[segIdx].array[frameIdx]
-}
-
-// Pop pops off the most recent stack frame and returns it
-func (cs *autoGrowingCallFrameStack) Pop() *callFrame {
- curSeg := cs.segments[cs.segIdx]
- if cs.segSp == 0 {
- if cs.segIdx == 0 {
- // stack empty
- return nil
- }
- freeCallFrameStackSegment(curSeg)
- cs.segments[cs.segIdx] = nil
- cs.segIdx--
- cs.segSp = FramesPerSegment
- curSeg = cs.segments[cs.segIdx]
- }
- cs.segSp--
- return &curSeg.array[cs.segSp]
-}
-
-/* }}} */
-
-/* registry {{{ */
-
-type registryHandler interface {
- registryOverflow()
-}
-type registry struct {
- array []LValue
- top int
- growBy int
- maxSize int
- alloc *allocator
- handler registryHandler
-}
-
-func newRegistry(handler registryHandler, initialSize int, growBy int, maxSize int, alloc *allocator) *registry {
- return ®istry{make([]LValue, initialSize), 0, growBy, maxSize, alloc, handler}
-}
-
-func (rg *registry) checkSize(requiredSize int) { // +inline-start
- if requiredSize > cap(rg.array) {
- rg.resize(requiredSize)
- }
-} // +inline-end
-
-func (rg *registry) resize(requiredSize int) { // +inline-start
- newSize := requiredSize + rg.growBy // give some padding
- if newSize > rg.maxSize {
- newSize = rg.maxSize
- }
- if newSize < requiredSize {
- rg.handler.registryOverflow()
- return
- }
- rg.forceResize(newSize)
-} // +inline-end
-
-func (rg *registry) forceResize(newSize int) {
- newSlice := make([]LValue, newSize)
- copy(newSlice, rg.array[:rg.top]) // should we copy the area beyond top? there shouldn't be any valid values there so it shouldn't be necessary.
- rg.array = newSlice
-}
-func (rg *registry) SetTop(top int) {
- // +inline-call rg.checkSize top
- oldtop := rg.top
- rg.top = top
- for i := oldtop; i < rg.top; i++ {
- rg.array[i] = LNil
- }
- // values beyond top don't need to be valid LValues, so setting them to nil is fine
- // setting them to nil rather than LNil lets us invoke the golang memclr opto
- if rg.top < oldtop {
- nilRange := rg.array[rg.top:oldtop]
- for i := range nilRange {
- nilRange[i] = nil
- }
- }
- //for i := rg.top; i < oldtop; i++ {
- // rg.array[i] = LNil
- //}
-}
-
-func (rg *registry) Top() int {
- return rg.top
-}
-
-func (rg *registry) Push(v LValue) {
- newSize := rg.top + 1
- // +inline-call rg.checkSize newSize
- rg.array[rg.top] = v
- rg.top++
-}
-
-func (rg *registry) Pop() LValue {
- v := rg.array[rg.top-1]
- rg.array[rg.top-1] = LNil
- rg.top--
- return v
-}
-
-func (rg *registry) Get(reg int) LValue {
- return rg.array[reg]
-}
-
-// CopyRange will move a section of values from index `start` to index `regv`
-// It will move `n` values.
-// `limit` specifies the maximum end range that can be copied from. If it's set to -1, then it defaults to stopping at
-// the top of the registry (values beyond the top are not initialized, so if specifying an alternative `limit` you should
-// pass a value <= rg.top.
-// If start+n is beyond the limit, then nil values will be copied to the destination slots.
-// After the copy, the registry is truncated to be at the end of the copied range, ie the original of the copied values
-// are nilled out. (So top will be regv+n)
-// CopyRange should ideally be renamed to MoveRange.
-func (rg *registry) CopyRange(regv, start, limit, n int) { // +inline-start
- newSize := regv + n
- // +inline-call rg.checkSize newSize
- if limit == -1 || limit > rg.top {
- limit = rg.top
- }
- for i := 0; i < n; i++ {
- srcIdx := start + i
- if srcIdx >= limit || srcIdx < 0 {
- rg.array[regv+i] = LNil
- } else {
- rg.array[regv+i] = rg.array[srcIdx]
- }
- }
-
- // values beyond top don't need to be valid LValues, so setting them to nil is fine
- // setting them to nil rather than LNil lets us invoke the golang memclr opto
- oldtop := rg.top
- rg.top = regv + n
- if rg.top < oldtop {
- nilRange := rg.array[rg.top:oldtop]
- for i := range nilRange {
- nilRange[i] = nil
- }
- }
-} // +inline-end
-
-// FillNil fills the registry with nil values from regm to regm+n and then sets the registry top to regm+n
-func (rg *registry) FillNil(regm, n int) { // +inline-start
- newSize := regm + n
- // +inline-call rg.checkSize newSize
- for i := 0; i < n; i++ {
- rg.array[regm+i] = LNil
- }
- // values beyond top don't need to be valid LValues, so setting them to nil is fine
- // setting them to nil rather than LNil lets us invoke the golang memclr opto
- oldtop := rg.top
- rg.top = regm + n
- if rg.top < oldtop {
- nilRange := rg.array[rg.top:oldtop]
- for i := range nilRange {
- nilRange[i] = nil
- }
- }
-} // +inline-end
-
-func (rg *registry) Insert(value LValue, reg int) {
- top := rg.Top()
- if reg >= top {
- rg.Set(reg, value)
- return
- }
- top--
- for ; top >= reg; top-- {
- // FIXME consider using copy() here if Insert() is called enough
- rg.Set(top+1, rg.Get(top))
- }
- rg.Set(reg, value)
-}
-
-func (rg *registry) Set(reg int, val LValue) {
- newSize := reg + 1
- // +inline-call rg.checkSize newSize
- rg.array[reg] = val
- if reg >= rg.top {
- rg.top = reg + 1
- }
-}
-
-func (rg *registry) SetNumber(reg int, val LNumber) {
- newSize := reg + 1
- // +inline-call rg.checkSize newSize
- rg.array[reg] = rg.alloc.LNumber2I(val)
- if reg >= rg.top {
- rg.top = reg + 1
- }
-}
-
-func (rg *registry) IsFull() bool {
- return rg.top >= cap(rg.array)
-}
-
-/* }}} */
-
-/* Global {{{ */
-
-func newGlobal() *Global {
- return &Global{
- MainThread: nil,
- Registry: newLTable(0, 32),
- Global: newLTable(0, 64),
- builtinMts: make(map[int]LValue),
- tempFiles: make([]*os.File, 0, 10),
- }
-}
-
-/* }}} */
-
-/* package local methods {{{ */
-
-func panicWithTraceback(L *LState) {
- err := newApiError(ApiErrorRun, L.Get(-1))
- err.StackTrace = L.stackTrace(0)
- panic(err)
-}
-
-func panicWithoutTraceback(L *LState) {
- err := newApiError(ApiErrorRun, L.Get(-1))
- panic(err)
-}
-
-func newLState(options Options) *LState {
- al := newAllocator(32)
- ls := &LState{
- G: newGlobal(),
- Parent: nil,
- Panic: panicWithTraceback,
- Dead: false,
- Options: options,
-
- stop: 0,
- alloc: al,
- currentFrame: nil,
- wrapped: false,
- uvcache: nil,
- hasErrorFunc: false,
- mainLoop: mainLoop,
- ctx: nil,
- }
- if options.MinimizeStackMemory {
- ls.stack = newAutoGrowingCallFrameStack(options.CallStackSize)
- } else {
- ls.stack = newFixedCallFrameStack(options.CallStackSize)
- }
- ls.reg = newRegistry(ls, options.RegistrySize, options.RegistryGrowStep, options.RegistryMaxSize, al)
- ls.Env = ls.G.Global
- return ls
-}
-
-func (ls *LState) printReg() {
- println("-------------------------")
- println("thread:", ls)
- println("top:", ls.reg.Top())
- if ls.currentFrame != nil {
- println("function base:", ls.currentFrame.Base)
- println("return base:", ls.currentFrame.ReturnBase)
- } else {
- println("(vm not started)")
- }
- println("local base:", ls.currentLocalBase())
- for i := 0; i < ls.reg.Top(); i++ {
- println(i, ls.reg.Get(i).String())
- }
- println("-------------------------")
-}
-
-func (ls *LState) printCallStack() {
- println("-------------------------")
- for i := 0; i < ls.stack.Sp(); i++ {
- print(i)
- print(" ")
- frame := ls.stack.At(i)
- if frame == nil {
- break
- }
- if frame.Fn.IsG {
- println("IsG:", true, "Frame:", frame, "Fn:", frame.Fn)
- } else {
- println("IsG:", false, "Frame:", frame, "Fn:", frame.Fn, "pc:", frame.Pc)
- }
- }
- println("-------------------------")
-}
-
-func (ls *LState) closeAllUpvalues() { // +inline-start
- for cf := ls.currentFrame; cf != nil; cf = cf.Parent {
- if !cf.Fn.IsG {
- ls.closeUpvalues(cf.LocalBase)
- }
- }
-} // +inline-end
-
-func (ls *LState) raiseError(level int, format string, args ...interface{}) {
- if !ls.hasErrorFunc {
- ls.closeAllUpvalues()
- }
- message := format
- if len(args) > 0 {
- message = fmt.Sprintf(format, args...)
- }
- if level > 0 {
- message = fmt.Sprintf("%v %v", ls.where(level-1, true), message)
- }
- if ls.reg.IsFull() {
- // if the registry is full then it won't be possible to push a value, in this case, force a larger size
- ls.reg.forceResize(ls.reg.Top() + 1)
- }
- ls.reg.Push(LString(message))
- ls.Panic(ls)
-}
-
-func (ls *LState) findLocal(frame *callFrame, no int) string {
- fn := frame.Fn
- if !fn.IsG {
- if name, ok := fn.LocalName(no, frame.Pc-1); ok {
- return name
- }
- }
- var top int
- if ls.currentFrame == frame {
- top = ls.reg.Top()
- } else if frame.Idx+1 < ls.stack.Sp() {
- top = ls.stack.At(frame.Idx + 1).Base
- } else {
- return ""
- }
- if top-frame.LocalBase >= no {
- return "(*temporary)"
- }
- return ""
-}
-
-func (ls *LState) where(level int, skipg bool) string {
- dbg, ok := ls.GetStack(level)
- if !ok {
- return ""
- }
- cf := dbg.frame
- proto := cf.Fn.Proto
- sourcename := "[G]"
- if proto != nil {
- sourcename = proto.SourceName
- } else if skipg {
- return ls.where(level+1, skipg)
- }
- line := ""
- if proto != nil {
- line = fmt.Sprintf("%v:", proto.DbgSourcePositions[cf.Pc-1])
- }
- return fmt.Sprintf("%v:%v", sourcename, line)
-}
-
-func (ls *LState) stackTrace(level int) string {
- buf := []string{}
- header := "stack traceback:"
- if ls.currentFrame != nil {
- i := 0
- for dbg, ok := ls.GetStack(i); ok; dbg, ok = ls.GetStack(i) {
- cf := dbg.frame
- buf = append(buf, fmt.Sprintf("\t%v in %v", ls.Where(i), ls.formattedFrameFuncName(cf)))
- if !cf.Fn.IsG && cf.TailCall > 0 {
- for tc := cf.TailCall; tc > 0; tc-- {
- buf = append(buf, "\t(tailcall): ?")
- i++
- }
- }
- i++
- }
- }
- buf = append(buf, fmt.Sprintf("\t%v: %v", "[G]", "?"))
- buf = buf[intMax(0, intMin(level, len(buf))):len(buf)]
- if len(buf) > 20 {
- newbuf := make([]string, 0, 20)
- newbuf = append(newbuf, buf[0:7]...)
- newbuf = append(newbuf, "\t...")
- newbuf = append(newbuf, buf[len(buf)-7:len(buf)]...)
- buf = newbuf
- }
- return fmt.Sprintf("%s\n%s", header, strings.Join(buf, "\n"))
-}
-
-func (ls *LState) formattedFrameFuncName(fr *callFrame) string {
- name, ischunk := ls.frameFuncName(fr)
- if ischunk {
- return name
- }
- if name[0] != '(' && name[0] != '<' {
- return fmt.Sprintf("function '%s'", name)
- }
- return fmt.Sprintf("function %s", name)
-}
-
-func (ls *LState) rawFrameFuncName(fr *callFrame) string {
- name, _ := ls.frameFuncName(fr)
- return name
-}
-
-func (ls *LState) frameFuncName(fr *callFrame) (string, bool) {
- frame := fr.Parent
- if frame == nil {
- if ls.Parent == nil {
- return "main chunk", true
- } else {
- return "corountine", true
- }
- }
- if !frame.Fn.IsG {
- pc := frame.Pc - 1
- for _, call := range frame.Fn.Proto.DbgCalls {
- if call.Pc == pc {
- name := call.Name
- if (name == "?" || fr.TailCall > 0) && !fr.Fn.IsG {
- name = fmt.Sprintf("<%v:%v>", fr.Fn.Proto.SourceName, fr.Fn.Proto.LineDefined)
- }
- return name, false
- }
- }
- }
- if !fr.Fn.IsG {
- return fmt.Sprintf("<%v:%v>", fr.Fn.Proto.SourceName, fr.Fn.Proto.LineDefined), false
- }
- return "(anonymous)", false
-}
-
-func (ls *LState) isStarted() bool {
- return ls.currentFrame != nil
-}
-
-func (ls *LState) kill() {
- ls.Dead = true
-}
-
-func (ls *LState) indexToReg(idx int) int {
- base := ls.currentLocalBase()
- if idx > 0 {
- return base + idx - 1
- } else if idx == 0 {
- return -1
- } else {
- tidx := ls.reg.Top() + idx
- if tidx < base {
- return -1
- }
- return tidx
- }
-}
-
-func (ls *LState) currentLocalBase() int {
- base := 0
- if ls.currentFrame != nil {
- base = ls.currentFrame.LocalBase
- }
- return base
-}
-
-func (ls *LState) currentEnv() *LTable {
- return ls.Env
- /*
- if ls.currentFrame == nil {
- return ls.Env
- }
- return ls.currentFrame.Fn.Env
- */
-}
-
-func (ls *LState) rkValue(idx int) LValue {
- /*
- if OpIsK(idx) {
- return ls.currentFrame.Fn.Proto.Constants[opIndexK(idx)]
- }
- return ls.reg.Get(ls.currentFrame.LocalBase + idx)
- */
- if (idx & opBitRk) != 0 {
- return ls.currentFrame.Fn.Proto.Constants[idx & ^opBitRk]
- }
- return ls.reg.array[ls.currentFrame.LocalBase+idx]
-}
-
-func (ls *LState) rkString(idx int) string {
- if (idx & opBitRk) != 0 {
- return ls.currentFrame.Fn.Proto.stringConstants[idx & ^opBitRk]
- }
- return string(ls.reg.array[ls.currentFrame.LocalBase+idx].(LString))
-}
-
-func (ls *LState) closeUpvalues(idx int) { // +inline-start
- if ls.uvcache != nil {
- var prev *Upvalue
- for uv := ls.uvcache; uv != nil; uv = uv.next {
- if uv.index >= idx {
- if prev != nil {
- prev.next = nil
- } else {
- ls.uvcache = nil
- }
- uv.Close()
- }
- prev = uv
- }
- }
-} // +inline-end
-
-func (ls *LState) findUpvalue(idx int) *Upvalue {
- var prev *Upvalue
- var next *Upvalue
- if ls.uvcache != nil {
- for uv := ls.uvcache; uv != nil; uv = uv.next {
- if uv.index == idx {
- return uv
- }
- if uv.index > idx {
- next = uv
- break
- }
- prev = uv
- }
- }
- uv := &Upvalue{reg: ls.reg, index: idx, closed: false}
- if prev != nil {
- prev.next = uv
- } else {
- ls.uvcache = uv
- }
- if next != nil {
- uv.next = next
- }
- return uv
-}
-
-func (ls *LState) metatable(lvalue LValue, rawget bool) LValue {
- var metatable LValue = LNil
- switch obj := lvalue.(type) {
- case *LTable:
- metatable = obj.Metatable
- case *LUserData:
- metatable = obj.Metatable
- default:
- if table, ok := ls.G.builtinMts[int(obj.Type())]; ok {
- metatable = table
- }
- }
-
- if !rawget && metatable != LNil {
- oldmt := metatable
- if tb, ok := metatable.(*LTable); ok {
- metatable = tb.RawGetString("__metatable")
- if metatable == LNil {
- metatable = oldmt
- }
- }
- }
-
- return metatable
-}
-
-func (ls *LState) metaOp1(lvalue LValue, event string) LValue {
- if mt := ls.metatable(lvalue, true); mt != LNil {
- if tb, ok := mt.(*LTable); ok {
- return tb.RawGetString(event)
- }
- }
- return LNil
-}
-
-func (ls *LState) metaOp2(value1, value2 LValue, event string) LValue {
- if mt := ls.metatable(value1, true); mt != LNil {
- if tb, ok := mt.(*LTable); ok {
- if ret := tb.RawGetString(event); ret != LNil {
- return ret
- }
- }
- }
- if mt := ls.metatable(value2, true); mt != LNil {
- if tb, ok := mt.(*LTable); ok {
- return tb.RawGetString(event)
- }
- }
- return LNil
-}
-
-func (ls *LState) metaCall(lvalue LValue) (*LFunction, bool) {
- if fn, ok := lvalue.(*LFunction); ok {
- return fn, false
- }
- if fn, ok := ls.metaOp1(lvalue, "__call").(*LFunction); ok {
- return fn, true
- }
- return nil, false
-}
-
-func (ls *LState) initCallFrame(cf *callFrame) { // +inline-start
- if cf.Fn.IsG {
- ls.reg.SetTop(cf.LocalBase + cf.NArgs)
- } else {
- proto := cf.Fn.Proto
- nargs := cf.NArgs
- np := int(proto.NumParameters)
- if nargs < np {
- // default any missing arguments to nil
- newSize := cf.LocalBase + np
- // +inline-call ls.reg.checkSize newSize
- for i := nargs; i < np; i++ {
- ls.reg.array[cf.LocalBase+i] = LNil
- }
- nargs = np
- ls.reg.top = newSize
- }
-
- if (proto.IsVarArg & VarArgIsVarArg) == 0 {
- if nargs < int(proto.NumUsedRegisters) {
- nargs = int(proto.NumUsedRegisters)
- }
- newSize := cf.LocalBase + nargs
- // +inline-call ls.reg.checkSize newSize
- for i := np; i < nargs; i++ {
- ls.reg.array[cf.LocalBase+i] = LNil
- }
- ls.reg.top = cf.LocalBase + int(proto.NumUsedRegisters)
- } else {
- /* swap vararg positions:
- closure
- namedparam1 <- lbase
- namedparam2
- vararg1
- vararg2
-
- TO
-
- closure
- nil
- nil
- vararg1
- vararg2
- namedparam1 <- lbase
- namedparam2
- */
- nvarargs := nargs - np
- if nvarargs < 0 {
- nvarargs = 0
- }
-
- ls.reg.SetTop(cf.LocalBase + nargs + np)
- for i := 0; i < np; i++ {
- //ls.reg.Set(cf.LocalBase+nargs+i, ls.reg.Get(cf.LocalBase+i))
- ls.reg.array[cf.LocalBase+nargs+i] = ls.reg.array[cf.LocalBase+i]
- //ls.reg.Set(cf.LocalBase+i, LNil)
- ls.reg.array[cf.LocalBase+i] = LNil
- }
-
- if CompatVarArg {
- ls.reg.SetTop(cf.LocalBase + nargs + np + 1)
- if (proto.IsVarArg & VarArgNeedsArg) != 0 {
- argtb := newLTable(nvarargs, 0)
- for i := 0; i < nvarargs; i++ {
- argtb.RawSetInt(i+1, ls.reg.Get(cf.LocalBase+np+i))
- }
- argtb.RawSetString("n", LNumber(nvarargs))
- //ls.reg.Set(cf.LocalBase+nargs+np, argtb)
- ls.reg.array[cf.LocalBase+nargs+np] = argtb
- } else {
- ls.reg.array[cf.LocalBase+nargs+np] = LNil
- }
- }
- cf.LocalBase += nargs
- maxreg := cf.LocalBase + int(proto.NumUsedRegisters)
- ls.reg.SetTop(maxreg)
- }
- }
-} // +inline-end
-
-func (ls *LState) pushCallFrame(cf callFrame, fn LValue, meta bool) { // +inline-start
- if meta {
- cf.NArgs++
- ls.reg.Insert(fn, cf.LocalBase)
- }
- if cf.Fn == nil {
- ls.RaiseError("attempt to call a non-function object")
- }
- if ls.stack.IsFull() {
- ls.RaiseError("stack overflow")
- }
- ls.stack.Push(cf)
- newcf := ls.stack.Last()
- // +inline-call ls.initCallFrame newcf
- ls.currentFrame = newcf
-} // +inline-end
-
-func (ls *LState) callR(nargs, nret, rbase int) {
- base := ls.reg.Top() - nargs - 1
- if rbase < 0 {
- rbase = base
- }
- lv := ls.reg.Get(base)
- fn, meta := ls.metaCall(lv)
- ls.pushCallFrame(callFrame{
- Fn: fn,
- Pc: 0,
- Base: base,
- LocalBase: base + 1,
- ReturnBase: rbase,
- NArgs: nargs,
- NRet: nret,
- Parent: ls.currentFrame,
- TailCall: 0,
- }, lv, meta)
- if ls.G.MainThread == nil {
- ls.G.MainThread = ls
- ls.G.CurrentThread = ls
- ls.mainLoop(ls, nil)
- } else {
- ls.mainLoop(ls, ls.currentFrame)
- }
- if nret != MultRet {
- ls.reg.SetTop(rbase + nret)
- }
-}
-
-func (ls *LState) getField(obj LValue, key LValue) LValue {
- curobj := obj
- for i := 0; i < MaxTableGetLoop; i++ {
- tb, istable := curobj.(*LTable)
- if istable {
- ret := tb.RawGet(key)
- if ret != LNil {
- return ret
- }
- }
- metaindex := ls.metaOp1(curobj, "__index")
- if metaindex == LNil {
- if !istable {
- ls.RaiseError("attempt to index a non-table object(%v) with key '%s'", curobj.Type().String(), key.String())
- }
- return LNil
- }
- if metaindex.Type() == LTFunction {
- ls.reg.Push(metaindex)
- ls.reg.Push(curobj)
- ls.reg.Push(key)
- ls.Call(2, 1)
- return ls.reg.Pop()
- } else {
- curobj = metaindex
- }
- }
- ls.RaiseError("too many recursions in gettable")
- return nil
-}
-
-func (ls *LState) getFieldString(obj LValue, key string) LValue {
- curobj := obj
- for i := 0; i < MaxTableGetLoop; i++ {
- tb, istable := curobj.(*LTable)
- if istable {
- ret := tb.RawGetString(key)
- if ret != LNil {
- return ret
- }
- }
- metaindex := ls.metaOp1(curobj, "__index")
- if metaindex == LNil {
- if !istable {
- ls.RaiseError("attempt to index a non-table object(%v) with key '%s'", curobj.Type().String(), key)
- }
- return LNil
- }
- if metaindex.Type() == LTFunction {
- ls.reg.Push(metaindex)
- ls.reg.Push(curobj)
- ls.reg.Push(LString(key))
- ls.Call(2, 1)
- return ls.reg.Pop()
- } else {
- curobj = metaindex
- }
- }
- ls.RaiseError("too many recursions in gettable")
- return nil
-}
-
-func (ls *LState) setField(obj LValue, key LValue, value LValue) {
- curobj := obj
- for i := 0; i < MaxTableGetLoop; i++ {
- tb, istable := curobj.(*LTable)
- if istable {
- if tb.RawGet(key) != LNil {
- ls.RawSet(tb, key, value)
- return
- }
- }
- metaindex := ls.metaOp1(curobj, "__newindex")
- if metaindex == LNil {
- if !istable {
- ls.RaiseError("attempt to index a non-table object(%v) with key '%s'", curobj.Type().String(), key.String())
- }
- ls.RawSet(tb, key, value)
- return
- }
- if metaindex.Type() == LTFunction {
- ls.reg.Push(metaindex)
- ls.reg.Push(curobj)
- ls.reg.Push(key)
- ls.reg.Push(value)
- ls.Call(3, 0)
- return
- } else {
- curobj = metaindex
- }
- }
- ls.RaiseError("too many recursions in settable")
-}
-
-func (ls *LState) setFieldString(obj LValue, key string, value LValue) {
- curobj := obj
- for i := 0; i < MaxTableGetLoop; i++ {
- tb, istable := curobj.(*LTable)
- if istable {
- if tb.RawGetString(key) != LNil {
- tb.RawSetString(key, value)
- return
- }
- }
- metaindex := ls.metaOp1(curobj, "__newindex")
- if metaindex == LNil {
- if !istable {
- ls.RaiseError("attempt to index a non-table object(%v) with key '%s'", curobj.Type().String(), key)
- }
- tb.RawSetString(key, value)
- return
- }
- if metaindex.Type() == LTFunction {
- ls.reg.Push(metaindex)
- ls.reg.Push(curobj)
- ls.reg.Push(LString(key))
- ls.reg.Push(value)
- ls.Call(3, 0)
- return
- } else {
- curobj = metaindex
- }
- }
- ls.RaiseError("too many recursions in settable")
-}
-
-/* }}} */
-
-/* api methods {{{ */
-
-func NewState(opts ...Options) *LState {
- var ls *LState
- if len(opts) == 0 {
- ls = newLState(Options{
- CallStackSize: CallStackSize,
- RegistrySize: RegistrySize,
- })
- ls.OpenLibs()
- } else {
- if opts[0].CallStackSize < 1 {
- opts[0].CallStackSize = CallStackSize
- }
- if opts[0].RegistrySize < 128 {
- opts[0].RegistrySize = RegistrySize
- }
- if opts[0].RegistryMaxSize < opts[0].RegistrySize {
- opts[0].RegistryMaxSize = 0 // disable growth if max size is smaller than initial size
- } else {
- // if growth enabled, grow step is set
- if opts[0].RegistryGrowStep < 1 {
- opts[0].RegistryGrowStep = RegistryGrowStep
- }
- }
- ls = newLState(opts[0])
- if !opts[0].SkipOpenLibs {
- ls.OpenLibs()
- }
- }
- return ls
-}
-
-func (ls *LState) IsClosed() bool {
- return ls.stack == nil
-}
-
-func (ls *LState) Close() {
- atomic.AddInt32(&ls.stop, 1)
- for _, file := range ls.G.tempFiles {
- // ignore errors in these operations
- file.Close()
- os.Remove(file.Name())
- }
- ls.stack.FreeAll()
- ls.stack = nil
-}
-
-/* registry operations {{{ */
-
-func (ls *LState) GetTop() int {
- return ls.reg.Top() - ls.currentLocalBase()
-}
-
-func (ls *LState) SetTop(idx int) {
- base := ls.currentLocalBase()
- newtop := ls.indexToReg(idx) + 1
- if newtop < base {
- ls.reg.SetTop(base)
- } else {
- ls.reg.SetTop(newtop)
- }
-}
-
-func (ls *LState) Replace(idx int, value LValue) {
- base := ls.currentLocalBase()
- if idx > 0 {
- reg := base + idx - 1
- if reg < ls.reg.Top() {
- ls.reg.Set(reg, value)
- }
- } else if idx == 0 {
- } else if idx > RegistryIndex {
- if tidx := ls.reg.Top() + idx; tidx >= base {
- ls.reg.Set(tidx, value)
- }
- } else {
- switch idx {
- case RegistryIndex:
- if tb, ok := value.(*LTable); ok {
- ls.G.Registry = tb
- } else {
- ls.RaiseError("registry must be a table(%v)", value.Type().String())
- }
- case EnvironIndex:
- if ls.currentFrame == nil {
- ls.RaiseError("no calling environment")
- }
- if tb, ok := value.(*LTable); ok {
- ls.currentFrame.Fn.Env = tb
- } else {
- ls.RaiseError("environment must be a table(%v)", value.Type().String())
- }
- case GlobalsIndex:
- if tb, ok := value.(*LTable); ok {
- ls.G.Global = tb
- } else {
- ls.RaiseError("_G must be a table(%v)", value.Type().String())
- }
- default:
- fn := ls.currentFrame.Fn
- index := GlobalsIndex - idx - 1
- if index < len(fn.Upvalues) {
- fn.Upvalues[index].SetValue(value)
- }
- }
- }
-}
-
-func (ls *LState) Get(idx int) LValue {
- base := ls.currentLocalBase()
- if idx > 0 {
- reg := base + idx - 1
- if reg < ls.reg.Top() {
- return ls.reg.Get(reg)
- }
- return LNil
- } else if idx == 0 {
- return LNil
- } else if idx > RegistryIndex {
- tidx := ls.reg.Top() + idx
- if tidx < base {
- return LNil
- }
- return ls.reg.Get(tidx)
- } else {
- switch idx {
- case RegistryIndex:
- return ls.G.Registry
- case EnvironIndex:
- if ls.currentFrame == nil {
- return ls.Env
- }
- return ls.currentFrame.Fn.Env
- case GlobalsIndex:
- return ls.G.Global
- default:
- fn := ls.currentFrame.Fn
- index := GlobalsIndex - idx - 1
- if index < len(fn.Upvalues) {
- return fn.Upvalues[index].Value()
- }
- return LNil
- }
- }
- return LNil
-}
-
-func (ls *LState) Push(value LValue) {
- ls.reg.Push(value)
-}
-
-func (ls *LState) Pop(n int) {
- for i := 0; i < n; i++ {
- if ls.GetTop() == 0 {
- ls.RaiseError("register underflow")
- }
- ls.reg.Pop()
- }
-}
-
-func (ls *LState) Insert(value LValue, index int) {
- reg := ls.indexToReg(index)
- top := ls.reg.Top()
- if reg >= top {
- ls.reg.Set(reg, value)
- return
- }
- if reg <= ls.currentLocalBase() {
- reg = ls.currentLocalBase()
- }
- top--
- for ; top >= reg; top-- {
- ls.reg.Set(top+1, ls.reg.Get(top))
- }
- ls.reg.Set(reg, value)
-}
-
-func (ls *LState) Remove(index int) {
- reg := ls.indexToReg(index)
- top := ls.reg.Top()
- switch {
- case reg >= top:
- return
- case reg < ls.currentLocalBase():
- return
- case reg == top-1:
- ls.Pop(1)
- return
- }
- for i := reg; i < top-1; i++ {
- ls.reg.Set(i, ls.reg.Get(i+1))
- }
- ls.reg.SetTop(top - 1)
-}
-
-/* }}} */
-
-/* object allocation {{{ */
-
-func (ls *LState) NewTable() *LTable {
- return newLTable(defaultArrayCap, defaultHashCap)
-}
-
-func (ls *LState) CreateTable(acap, hcap int) *LTable {
- return newLTable(acap, hcap)
-}
-
-// NewThread returns a new LState that shares with the original state all global objects.
-// If the original state has context.Context, the new state has a new child context of the original state and this function returns its cancel function.
-func (ls *LState) NewThread() (*LState, context.CancelFunc) {
- thread := newLState(ls.Options)
- thread.G = ls.G
- thread.Env = ls.Env
- var f context.CancelFunc = nil
- if ls.ctx != nil {
- thread.mainLoop = mainLoopWithContext
- thread.ctx, f = context.WithCancel(ls.ctx)
- }
- return thread, f
-}
-
-func (ls *LState) NewFunctionFromProto(proto *FunctionProto) *LFunction {
- return newLFunctionL(proto, ls.Env, int(proto.NumUpvalues))
-}
-
-func (ls *LState) NewUserData() *LUserData {
- return &LUserData{
- Env: ls.currentEnv(),
- Metatable: LNil,
- }
-}
-
-func (ls *LState) NewFunction(fn LGFunction) *LFunction {
- return newLFunctionG(fn, ls.currentEnv(), 0)
-}
-
-func (ls *LState) NewClosure(fn LGFunction, upvalues ...LValue) *LFunction {
- cl := newLFunctionG(fn, ls.currentEnv(), len(upvalues))
- for i, lv := range upvalues {
- cl.Upvalues[i] = &Upvalue{}
- cl.Upvalues[i].Close()
- cl.Upvalues[i].SetValue(lv)
- }
- return cl
-}
-
-/* }}} */
-
-/* toType {{{ */
-
-func (ls *LState) ToBool(n int) bool {
- return LVAsBool(ls.Get(n))
-}
-
-func (ls *LState) ToInt(n int) int {
- if lv, ok := ls.Get(n).(LNumber); ok {
- return int(lv)
- }
- if lv, ok := ls.Get(n).(LString); ok {
- if num, err := parseNumber(string(lv)); err == nil {
- return int(num)
- }
- }
- return 0
-}
-
-func (ls *LState) ToInt64(n int) int64 {
- if lv, ok := ls.Get(n).(LNumber); ok {
- return int64(lv)
- }
- if lv, ok := ls.Get(n).(LString); ok {
- if num, err := parseNumber(string(lv)); err == nil {
- return int64(num)
- }
- }
- return 0
-}
-
-func (ls *LState) ToNumber(n int) LNumber {
- return LVAsNumber(ls.Get(n))
-}
-
-func (ls *LState) ToString(n int) string {
- return LVAsString(ls.Get(n))
-}
-
-func (ls *LState) ToTable(n int) *LTable {
- if lv, ok := ls.Get(n).(*LTable); ok {
- return lv
- }
- return nil
-}
-
-func (ls *LState) ToFunction(n int) *LFunction {
- if lv, ok := ls.Get(n).(*LFunction); ok {
- return lv
- }
- return nil
-}
-
-func (ls *LState) ToUserData(n int) *LUserData {
- if lv, ok := ls.Get(n).(*LUserData); ok {
- return lv
- }
- return nil
-}
-
-func (ls *LState) ToThread(n int) *LState {
- if lv, ok := ls.Get(n).(*LState); ok {
- return lv
- }
- return nil
-}
-
-/* }}} */
-
-/* error & debug operations {{{ */
-
-func (ls *LState) registryOverflow() {
- ls.RaiseError("registry overflow")
-}
-
-// This function is equivalent to luaL_error( http://www.lua.org/manual/5.1/manual.html#luaL_error ).
-func (ls *LState) RaiseError(format string, args ...interface{}) {
- ls.raiseError(1, format, args...)
-}
-
-// This function is equivalent to lua_error( http://www.lua.org/manual/5.1/manual.html#lua_error ).
-func (ls *LState) Error(lv LValue, level int) {
- if str, ok := lv.(LString); ok {
- ls.raiseError(level, string(str))
- } else {
- if !ls.hasErrorFunc {
- ls.closeAllUpvalues()
- }
- ls.Push(lv)
- ls.Panic(ls)
- }
-}
-
-func (ls *LState) GetInfo(what string, dbg *Debug, fn LValue) (LValue, error) {
- if !strings.HasPrefix(what, ">") {
- fn = dbg.frame.Fn
- } else {
- what = what[1:]
- }
- f, ok := fn.(*LFunction)
- if !ok {
- return LNil, newApiErrorS(ApiErrorRun, "can not get debug info(an object in not a function)")
- }
-
- retfn := false
- for _, c := range what {
- switch c {
- case 'f':
- retfn = true
- case 'S':
- if dbg.frame != nil && dbg.frame.Parent == nil {
- dbg.What = "main"
- } else if f.IsG {
- dbg.What = "G"
- } else if dbg.frame != nil && dbg.frame.TailCall > 0 {
- dbg.What = "tail"
- } else {
- dbg.What = "Lua"
- }
- if !f.IsG {
- dbg.Source = f.Proto.SourceName
- dbg.LineDefined = f.Proto.LineDefined
- dbg.LastLineDefined = f.Proto.LastLineDefined
- }
- case 'l':
- if !f.IsG && dbg.frame != nil {
- if dbg.frame.Pc > 0 {
- dbg.CurrentLine = f.Proto.DbgSourcePositions[dbg.frame.Pc-1]
- }
- } else {
- dbg.CurrentLine = -1
- }
- case 'u':
- dbg.NUpvalues = len(f.Upvalues)
- case 'n':
- if dbg.frame != nil {
- dbg.Name = ls.rawFrameFuncName(dbg.frame)
- }
- default:
- return LNil, newApiErrorS(ApiErrorRun, "invalid what: "+string(c))
- }
- }
-
- if retfn {
- return f, nil
- }
- return LNil, nil
-
-}
-
-func (ls *LState) GetStack(level int) (*Debug, bool) {
- frame := ls.currentFrame
- for ; level > 0 && frame != nil; frame = frame.Parent {
- level--
- if !frame.Fn.IsG {
- level -= frame.TailCall
- }
- }
-
- if level == 0 && frame != nil {
- return &Debug{frame: frame}, true
- } else if level < 0 && ls.stack.Sp() > 0 {
- return &Debug{frame: ls.stack.At(0)}, true
- }
- return &Debug{}, false
-}
-
-func (ls *LState) GetLocal(dbg *Debug, no int) (string, LValue) {
- frame := dbg.frame
- if name := ls.findLocal(frame, no); len(name) > 0 {
- return name, ls.reg.Get(frame.LocalBase + no - 1)
- }
- return "", LNil
-}
-
-func (ls *LState) SetLocal(dbg *Debug, no int, lv LValue) string {
- frame := dbg.frame
- if name := ls.findLocal(frame, no); len(name) > 0 {
- ls.reg.Set(frame.LocalBase+no-1, lv)
- return name
- }
- return ""
-}
-
-func (ls *LState) GetUpvalue(fn *LFunction, no int) (string, LValue) {
- if fn.IsG {
- return "", LNil
- }
-
- no--
- if no >= 0 && no < len(fn.Upvalues) {
- return fn.Proto.DbgUpvalues[no], fn.Upvalues[no].Value()
- }
- return "", LNil
-}
-
-func (ls *LState) SetUpvalue(fn *LFunction, no int, lv LValue) string {
- if fn.IsG {
- return ""
- }
-
- no--
- if no >= 0 && no < len(fn.Upvalues) {
- fn.Upvalues[no].SetValue(lv)
- return fn.Proto.DbgUpvalues[no]
- }
- return ""
-}
-
-/* }}} */
-
-/* env operations {{{ */
-
-func (ls *LState) GetFEnv(obj LValue) LValue {
- switch lv := obj.(type) {
- case *LFunction:
- return lv.Env
- case *LUserData:
- return lv.Env
- case *LState:
- return lv.Env
- }
- return LNil
-}
-
-func (ls *LState) SetFEnv(obj LValue, env LValue) {
- tb, ok := env.(*LTable)
- if !ok {
- ls.RaiseError("cannot use %v as an environment", env.Type().String())
- }
-
- switch lv := obj.(type) {
- case *LFunction:
- lv.Env = tb
- case *LUserData:
- lv.Env = tb
- case *LState:
- lv.Env = tb
- }
- /* do nothing */
-}
-
-/* }}} */
-
-/* table operations {{{ */
-
-func (ls *LState) RawGet(tb *LTable, key LValue) LValue {
- return tb.RawGet(key)
-}
-
-func (ls *LState) RawGetInt(tb *LTable, key int) LValue {
- return tb.RawGetInt(key)
-}
-
-func (ls *LState) GetField(obj LValue, skey string) LValue {
- return ls.getFieldString(obj, skey)
-}
-
-func (ls *LState) GetTable(obj LValue, key LValue) LValue {
- return ls.getField(obj, key)
-}
-
-func (ls *LState) RawSet(tb *LTable, key LValue, value LValue) {
- if n, ok := key.(LNumber); ok && math.IsNaN(float64(n)) {
- ls.RaiseError("table index is NaN")
- } else if key == LNil {
- ls.RaiseError("table index is nil")
- }
- tb.RawSet(key, value)
-}
-
-func (ls *LState) RawSetInt(tb *LTable, key int, value LValue) {
- tb.RawSetInt(key, value)
-}
-
-func (ls *LState) SetField(obj LValue, key string, value LValue) {
- ls.setFieldString(obj, key, value)
-}
-
-func (ls *LState) SetTable(obj LValue, key LValue, value LValue) {
- ls.setField(obj, key, value)
-}
-
-func (ls *LState) ForEach(tb *LTable, cb func(LValue, LValue)) {
- tb.ForEach(cb)
-}
-
-func (ls *LState) GetGlobal(name string) LValue {
- return ls.GetField(ls.Get(GlobalsIndex), name)
-}
-
-func (ls *LState) SetGlobal(name string, value LValue) {
- ls.SetField(ls.Get(GlobalsIndex), name, value)
-}
-
-func (ls *LState) Next(tb *LTable, key LValue) (LValue, LValue) {
- return tb.Next(key)
-}
-
-/* }}} */
-
-/* unary operations {{{ */
-
-func (ls *LState) ObjLen(v1 LValue) int {
- if v1.Type() == LTString {
- return len(string(v1.(LString)))
- }
- op := ls.metaOp1(v1, "__len")
- if op.Type() == LTFunction {
- ls.Push(op)
- ls.Push(v1)
- ls.Call(1, 1)
- ret := ls.reg.Pop()
- if ret.Type() == LTNumber {
- return int(ret.(LNumber))
- }
- } else if v1.Type() == LTTable {
- return v1.(*LTable).Len()
- }
- return 0
-}
-
-/* }}} */
-
-/* binary operations {{{ */
-
-func (ls *LState) Concat(values ...LValue) string {
- top := ls.reg.Top()
- for _, value := range values {
- ls.reg.Push(value)
- }
- ret := stringConcat(ls, len(values), ls.reg.Top()-1)
- ls.reg.SetTop(top)
- return LVAsString(ret)
-}
-
-func (ls *LState) LessThan(lhs, rhs LValue) bool {
- return lessThan(ls, lhs, rhs)
-}
-
-func (ls *LState) Equal(lhs, rhs LValue) bool {
- return equals(ls, lhs, rhs, false)
-}
-
-func (ls *LState) RawEqual(lhs, rhs LValue) bool {
- return equals(ls, lhs, rhs, true)
-}
-
-/* }}} */
-
-/* register operations {{{ */
-
-func (ls *LState) Register(name string, fn LGFunction) {
- ls.SetGlobal(name, ls.NewFunction(fn))
-}
-
-/* }}} */
-
-/* load and function call operations {{{ */
-
-func (ls *LState) Load(reader io.Reader, name string) (*LFunction, error) {
- chunk, err := parse.Parse(reader, name)
- if err != nil {
- return nil, newApiErrorE(ApiErrorSyntax, err)
- }
- proto, err := Compile(chunk, name)
- if err != nil {
- return nil, newApiErrorE(ApiErrorSyntax, err)
- }
- return newLFunctionL(proto, ls.currentEnv(), 0), nil
-}
-
-func (ls *LState) Call(nargs, nret int) {
- ls.callR(nargs, nret, -1)
-}
-
-func (ls *LState) PCall(nargs, nret int, errfunc *LFunction) (err error) {
- err = nil
- sp := ls.stack.Sp()
- base := ls.reg.Top() - nargs - 1
- oldpanic := ls.Panic
- ls.Panic = panicWithoutTraceback
- if errfunc != nil {
- ls.hasErrorFunc = true
- }
- defer func() {
- ls.Panic = oldpanic
- ls.hasErrorFunc = false
- rcv := recover()
- if rcv != nil {
- if _, ok := rcv.(*ApiError); !ok {
- err = newApiErrorS(ApiErrorPanic, fmt.Sprint(rcv))
- if ls.Options.IncludeGoStackTrace {
- buf := make([]byte, 4096)
- runtime.Stack(buf, false)
- err.(*ApiError).StackTrace = strings.Trim(string(buf), "\000") + "\n" + ls.stackTrace(0)
- }
- } else {
- err = rcv.(*ApiError)
- }
- if errfunc != nil {
- ls.Push(errfunc)
- ls.Push(err.(*ApiError).Object)
- ls.Panic = panicWithoutTraceback
- defer func() {
- ls.Panic = oldpanic
- rcv := recover()
- if rcv != nil {
- if _, ok := rcv.(*ApiError); !ok {
- err = newApiErrorS(ApiErrorPanic, fmt.Sprint(rcv))
- if ls.Options.IncludeGoStackTrace {
- buf := make([]byte, 4096)
- runtime.Stack(buf, false)
- err.(*ApiError).StackTrace = strings.Trim(string(buf), "\000") + ls.stackTrace(0)
- }
- } else {
- err = rcv.(*ApiError)
- err.(*ApiError).StackTrace = ls.stackTrace(0)
- }
- }
- }()
- ls.Call(1, 1)
- err = newApiError(ApiErrorError, ls.Get(-1))
- } else if len(err.(*ApiError).StackTrace) == 0 {
- err.(*ApiError).StackTrace = ls.stackTrace(0)
- }
- ls.stack.SetSp(sp)
- ls.currentFrame = ls.stack.Last()
- ls.reg.SetTop(base)
- }
- ls.stack.SetSp(sp)
- if sp == 0 {
- ls.currentFrame = nil
- }
- }()
-
- ls.Call(nargs, nret)
-
- return
-}
-
-func (ls *LState) GPCall(fn LGFunction, data LValue) error {
- ls.Push(newLFunctionG(fn, ls.currentEnv(), 0))
- ls.Push(data)
- return ls.PCall(1, MultRet, nil)
-}
-
-func (ls *LState) CallByParam(cp P, args ...LValue) error {
- ls.Push(cp.Fn)
- for _, arg := range args {
- ls.Push(arg)
- }
-
- if cp.Protect {
- return ls.PCall(len(args), cp.NRet, cp.Handler)
- }
- ls.Call(len(args), cp.NRet)
- return nil
-}
-
-/* }}} */
-
-/* metatable operations {{{ */
-
-func (ls *LState) GetMetatable(obj LValue) LValue {
- return ls.metatable(obj, false)
-}
-
-func (ls *LState) SetMetatable(obj LValue, mt LValue) {
- switch mt.(type) {
- case *LNilType, *LTable:
- default:
- ls.RaiseError("metatable must be a table or nil, but got %v", mt.Type().String())
- }
-
- switch v := obj.(type) {
- case *LTable:
- v.Metatable = mt
- case *LUserData:
- v.Metatable = mt
- default:
- ls.G.builtinMts[int(obj.Type())] = mt
- }
-}
-
-/* }}} */
-
-/* coroutine operations {{{ */
-
-func (ls *LState) Status(th *LState) string {
- status := "suspended"
- if th.Dead {
- status = "dead"
- } else if ls.G.CurrentThread == th {
- status = "running"
- } else if ls.Parent == th {
- status = "normal"
- }
- return status
-}
-
-func (ls *LState) Resume(th *LState, fn *LFunction, args ...LValue) (ResumeState, error, []LValue) {
- isstarted := th.isStarted()
- if !isstarted {
- base := 0
- th.stack.Push(callFrame{
- Fn: fn,
- Pc: 0,
- Base: base,
- LocalBase: base + 1,
- ReturnBase: base,
- NArgs: 0,
- NRet: MultRet,
- Parent: nil,
- TailCall: 0,
- })
- }
-
- if ls.G.CurrentThread == th {
- return ResumeError, newApiErrorS(ApiErrorRun, "can not resume a running thread"), nil
- }
- if th.Dead {
- return ResumeError, newApiErrorS(ApiErrorRun, "can not resume a dead thread"), nil
- }
- th.Parent = ls
- ls.G.CurrentThread = th
- if !isstarted {
- cf := th.stack.Last()
- th.currentFrame = cf
- th.SetTop(0)
- for _, arg := range args {
- th.Push(arg)
- }
- cf.NArgs = len(args)
- th.initCallFrame(cf)
- th.Panic = panicWithoutTraceback
- } else {
- for _, arg := range args {
- th.Push(arg)
- }
- }
- top := ls.GetTop()
- threadRun(th)
- haserror := LVIsFalse(ls.Get(top + 1))
- ret := make([]LValue, 0, ls.GetTop())
- for idx := top + 2; idx <= ls.GetTop(); idx++ {
- ret = append(ret, ls.Get(idx))
- }
- if len(ret) == 0 {
- ret = append(ret, LNil)
- }
- ls.SetTop(top)
-
- if haserror {
- return ResumeError, newApiError(ApiErrorRun, ret[0]), nil
- } else if th.stack.IsEmpty() {
- return ResumeOK, nil, ret
- }
- return ResumeYield, nil, ret
-}
-
-func (ls *LState) Yield(values ...LValue) int {
- ls.SetTop(0)
- for _, lv := range values {
- ls.Push(lv)
- }
- return -1
-}
-
-func (ls *LState) XMoveTo(other *LState, n int) {
- if ls == other {
- return
- }
- top := ls.GetTop()
- n = intMin(n, top)
- for i := n; i > 0; i-- {
- other.Push(ls.Get(top - i + 1))
- }
- ls.SetTop(top - n)
-}
-
-/* }}} */
-
-/* GopherLua original APIs {{{ */
-
-// Set maximum memory size. This function can only be called from the main thread.
-func (ls *LState) SetMx(mx int) {
- if ls.Parent != nil {
- ls.RaiseError("sub threads are not allowed to set a memory limit")
- }
- go func() {
- limit := uint64(mx * 1024 * 1024) //MB
- var s runtime.MemStats
- for atomic.LoadInt32(&ls.stop) == 0 {
- runtime.ReadMemStats(&s)
- if s.Alloc >= limit {
- fmt.Println("out of memory")
- os.Exit(3)
- }
- time.Sleep(100 * time.Millisecond)
- }
- }()
-}
-
-// SetContext set a context ctx to this LState. The provided ctx must be non-nil.
-func (ls *LState) SetContext(ctx context.Context) {
- ls.mainLoop = mainLoopWithContext
- ls.ctx = ctx
-}
-
-// Context returns the LState's context. To change the context, use WithContext.
-func (ls *LState) Context() context.Context {
- return ls.ctx
-}
-
-// RemoveContext removes the context associated with this LState and returns this context.
-func (ls *LState) RemoveContext() context.Context {
- oldctx := ls.ctx
- ls.mainLoop = mainLoop
- ls.ctx = nil
- return oldctx
-}
-
-// Converts the Lua value at the given acceptable index to the chan LValue.
-func (ls *LState) ToChannel(n int) chan LValue {
- if lv, ok := ls.Get(n).(LChannel); ok {
- return (chan LValue)(lv)
- }
- return nil
-}
-
-// RemoveCallerFrame removes the stack frame above the current stack frame. This is useful in tail calls. It returns
-// the new current frame.
-func (ls *LState) RemoveCallerFrame() *callFrame {
- cs := ls.stack
- sp := cs.Sp()
- parentFrame := cs.At(sp - 2)
- currentFrame := cs.At(sp - 1)
- parentsParentFrame := parentFrame.Parent
- *parentFrame = *currentFrame
- parentFrame.Parent = parentsParentFrame
- parentFrame.Idx = sp - 2
- cs.Pop()
- return parentFrame
-}
-
-/* }}} */
-
-/* }}} */
-
-//
diff --git a/vendor/github.com/yuin/gopher-lua/_vm.go b/vendor/github.com/yuin/gopher-lua/_vm.go
deleted file mode 100644
index 874ed9aa..00000000
--- a/vendor/github.com/yuin/gopher-lua/_vm.go
+++ /dev/null
@@ -1,1033 +0,0 @@
-package lua
-
-import (
- "fmt"
- "math"
- "strings"
-)
-
-func mainLoop(L *LState, baseframe *callFrame) {
- var inst uint32
- var cf *callFrame
-
- if L.stack.IsEmpty() {
- return
- }
-
- L.currentFrame = L.stack.Last()
- if L.currentFrame.Fn.IsG {
- callGFunction(L, false)
- return
- }
-
- for {
- cf = L.currentFrame
- inst = cf.Fn.Proto.Code[cf.Pc]
- cf.Pc++
- if jumpTable[int(inst>>26)](L, inst, baseframe) == 1 {
- return
- }
- }
-}
-
-func mainLoopWithContext(L *LState, baseframe *callFrame) {
- var inst uint32
- var cf *callFrame
-
- if L.stack.IsEmpty() {
- return
- }
-
- L.currentFrame = L.stack.Last()
- if L.currentFrame.Fn.IsG {
- callGFunction(L, false)
- return
- }
-
- for {
- cf = L.currentFrame
- inst = cf.Fn.Proto.Code[cf.Pc]
- cf.Pc++
- select {
- case <-L.ctx.Done():
- L.RaiseError(L.ctx.Err().Error())
- return
- default:
- if jumpTable[int(inst>>26)](L, inst, baseframe) == 1 {
- return
- }
- }
- }
-}
-
-// regv is the first target register to copy the return values to.
-// It can be reg.top, indicating that the copied values are going into new registers, or it can be below reg.top
-// Indicating that the values should be within the existing registers.
-// b is the available number of return values + 1.
-// n is the desired number of return values.
-// If n more than the available return values then the extra values are set to nil.
-// When this function returns the top of the registry will be set to regv+n.
-func copyReturnValues(L *LState, regv, start, n, b int) { // +inline-start
- if b == 1 {
- // +inline-call L.reg.FillNil regv n
- } else {
- // +inline-call L.reg.CopyRange regv start -1 n
- if b > 1 && n > (b-1) {
- // +inline-call L.reg.FillNil regv+b-1 n-(b-1)
- }
- }
-} // +inline-end
-
-func switchToParentThread(L *LState, nargs int, haserror bool, kill bool) {
- parent := L.Parent
- if parent == nil {
- L.RaiseError("can not yield from outside of a coroutine")
- }
- L.G.CurrentThread = parent
- L.Parent = nil
- if !L.wrapped {
- if haserror {
- parent.Push(LFalse)
- } else {
- parent.Push(LTrue)
- }
- }
- L.XMoveTo(parent, nargs)
- L.stack.Pop()
- offset := L.currentFrame.LocalBase - L.currentFrame.ReturnBase
- L.currentFrame = L.stack.Last()
- L.reg.SetTop(L.reg.Top() - offset) // remove 'yield' function(including tailcalled functions)
- if kill {
- L.kill()
- }
-}
-
-func callGFunction(L *LState, tailcall bool) bool {
- frame := L.currentFrame
- gfnret := frame.Fn.GFunction(L)
- if tailcall {
- L.currentFrame = L.RemoveCallerFrame()
- }
-
- if gfnret < 0 {
- switchToParentThread(L, L.GetTop(), false, false)
- return true
- }
-
- wantret := frame.NRet
- if wantret == MultRet {
- wantret = gfnret
- }
-
- if tailcall && L.Parent != nil && L.stack.Sp() == 1 {
- switchToParentThread(L, wantret, false, true)
- return true
- }
-
- // +inline-call L.reg.CopyRange frame.ReturnBase L.reg.Top()-gfnret -1 wantret
- L.stack.Pop()
- L.currentFrame = L.stack.Last()
- return false
-}
-
-func threadRun(L *LState) {
- if L.stack.IsEmpty() {
- return
- }
-
- defer func() {
- if rcv := recover(); rcv != nil {
- var lv LValue
- if v, ok := rcv.(*ApiError); ok {
- lv = v.Object
- } else {
- lv = LString(fmt.Sprint(rcv))
- }
- if parent := L.Parent; parent != nil {
- if L.wrapped {
- L.Push(lv)
- parent.Panic(L)
- } else {
- L.SetTop(0)
- L.Push(lv)
- switchToParentThread(L, 1, true, true)
- }
- } else {
- panic(rcv)
- }
- }
- }()
- L.mainLoop(L, nil)
-}
-
-type instFunc func(*LState, uint32, *callFrame) int
-
-var jumpTable [opCodeMax + 1]instFunc
-
-func init() {
- jumpTable = [opCodeMax + 1]instFunc{
- func(L *LState, inst uint32, baseframe *callFrame) int { //OP_MOVE
- reg := L.reg
- cf := L.currentFrame
- lbase := cf.LocalBase
- A := int(inst>>18) & 0xff //GETA
- RA := lbase + A
- B := int(inst & 0x1ff) //GETB
- reg.Set(RA, reg.Get(lbase+B))
- return 0
- },
- func(L *LState, inst uint32, baseframe *callFrame) int { //OP_MOVEN
- reg := L.reg
- cf := L.currentFrame
- lbase := cf.LocalBase
- A := int(inst>>18) & 0xff //GETA
- B := int(inst & 0x1ff) //GETB
- C := int(inst>>9) & 0x1ff //GETC
- reg.Set(lbase+A, reg.Get(lbase+B))
- code := cf.Fn.Proto.Code
- pc := cf.Pc
- for i := 0; i < C; i++ {
- inst = code[pc]
- pc++
- A = int(inst>>18) & 0xff //GETA
- B = int(inst & 0x1ff) //GETB
- reg.Set(lbase+A, reg.Get(lbase+B))
- }
- cf.Pc = pc
- return 0
- },
- func(L *LState, inst uint32, baseframe *callFrame) int { //OP_LOADK
- reg := L.reg
- cf := L.currentFrame
- lbase := cf.LocalBase
- A := int(inst>>18) & 0xff //GETA
- RA := lbase + A
- Bx := int(inst & 0x3ffff) //GETBX
- reg.Set(RA, cf.Fn.Proto.Constants[Bx])
- return 0
- },
- func(L *LState, inst uint32, baseframe *callFrame) int { //OP_LOADBOOL
- reg := L.reg
- cf := L.currentFrame
- lbase := cf.LocalBase
- A := int(inst>>18) & 0xff //GETA
- RA := lbase + A
- B := int(inst & 0x1ff) //GETB
- C := int(inst>>9) & 0x1ff //GETC
- if B != 0 {
- reg.Set(RA, LTrue)
- } else {
- reg.Set(RA, LFalse)
- }
- if C != 0 {
- cf.Pc++
- }
- return 0
- },
- func(L *LState, inst uint32, baseframe *callFrame) int { //OP_LOADNIL
- reg := L.reg
- cf := L.currentFrame
- lbase := cf.LocalBase
- A := int(inst>>18) & 0xff //GETA
- RA := lbase + A
- B := int(inst & 0x1ff) //GETB
- for i := RA; i <= lbase+B; i++ {
- reg.Set(i, LNil)
- }
- return 0
- },
- func(L *LState, inst uint32, baseframe *callFrame) int { //OP_GETUPVAL
- reg := L.reg
- cf := L.currentFrame
- lbase := cf.LocalBase
- A := int(inst>>18) & 0xff //GETA
- RA := lbase + A
- B := int(inst & 0x1ff) //GETB
- reg.Set(RA, cf.Fn.Upvalues[B].Value())
- return 0
- },
- func(L *LState, inst uint32, baseframe *callFrame) int { //OP_GETGLOBAL
- reg := L.reg
- cf := L.currentFrame
- lbase := cf.LocalBase
- A := int(inst>>18) & 0xff //GETA
- RA := lbase + A
- Bx := int(inst & 0x3ffff) //GETBX
- //reg.Set(RA, L.getField(cf.Fn.Env, cf.Fn.Proto.Constants[Bx]))
- reg.Set(RA, L.getFieldString(cf.Fn.Env, cf.Fn.Proto.stringConstants[Bx]))
- return 0
- },
- func(L *LState, inst uint32, baseframe *callFrame) int { //OP_GETTABLE
- reg := L.reg
- cf := L.currentFrame
- lbase := cf.LocalBase
- A := int(inst>>18) & 0xff //GETA
- RA := lbase + A
- B := int(inst & 0x1ff) //GETB
- C := int(inst>>9) & 0x1ff //GETC
- reg.Set(RA, L.getField(reg.Get(lbase+B), L.rkValue(C)))
- return 0
- },
- func(L *LState, inst uint32, baseframe *callFrame) int { //OP_GETTABLEKS
- reg := L.reg
- cf := L.currentFrame
- lbase := cf.LocalBase
- A := int(inst>>18) & 0xff //GETA
- RA := lbase + A
- B := int(inst & 0x1ff) //GETB
- C := int(inst>>9) & 0x1ff //GETC
- reg.Set(RA, L.getFieldString(reg.Get(lbase+B), L.rkString(C)))
- return 0
- },
- func(L *LState, inst uint32, baseframe *callFrame) int { //OP_SETGLOBAL
- reg := L.reg
- cf := L.currentFrame
- lbase := cf.LocalBase
- A := int(inst>>18) & 0xff //GETA
- RA := lbase + A
- Bx := int(inst & 0x3ffff) //GETBX
- //L.setField(cf.Fn.Env, cf.Fn.Proto.Constants[Bx], reg.Get(RA))
- L.setFieldString(cf.Fn.Env, cf.Fn.Proto.stringConstants[Bx], reg.Get(RA))
- return 0
- },
- func(L *LState, inst uint32, baseframe *callFrame) int { //OP_SETUPVAL
- reg := L.reg
- cf := L.currentFrame
- lbase := cf.LocalBase
- A := int(inst>>18) & 0xff //GETA
- RA := lbase + A
- B := int(inst & 0x1ff) //GETB
- cf.Fn.Upvalues[B].SetValue(reg.Get(RA))
- return 0
- },
- func(L *LState, inst uint32, baseframe *callFrame) int { //OP_SETTABLE
- reg := L.reg
- cf := L.currentFrame
- lbase := cf.LocalBase
- A := int(inst>>18) & 0xff //GETA
- RA := lbase + A
- B := int(inst & 0x1ff) //GETB
- C := int(inst>>9) & 0x1ff //GETC
- L.setField(reg.Get(RA), L.rkValue(B), L.rkValue(C))
- return 0
- },
- func(L *LState, inst uint32, baseframe *callFrame) int { //OP_SETTABLEKS
- reg := L.reg
- cf := L.currentFrame
- lbase := cf.LocalBase
- A := int(inst>>18) & 0xff //GETA
- RA := lbase + A
- B := int(inst & 0x1ff) //GETB
- C := int(inst>>9) & 0x1ff //GETC
- L.setFieldString(reg.Get(RA), L.rkString(B), L.rkValue(C))
- return 0
- },
- func(L *LState, inst uint32, baseframe *callFrame) int { //OP_NEWTABLE
- reg := L.reg
- cf := L.currentFrame
- lbase := cf.LocalBase
- A := int(inst>>18) & 0xff //GETA
- RA := lbase + A
- B := int(inst & 0x1ff) //GETB
- C := int(inst>>9) & 0x1ff //GETC
- reg.Set(RA, newLTable(B, C))
- return 0
- },
- func(L *LState, inst uint32, baseframe *callFrame) int { //OP_SELF
- reg := L.reg
- cf := L.currentFrame
- lbase := cf.LocalBase
- A := int(inst>>18) & 0xff //GETA
- RA := lbase + A
- B := int(inst & 0x1ff) //GETB
- C := int(inst>>9) & 0x1ff //GETC
- selfobj := reg.Get(lbase + B)
- reg.Set(RA, L.getFieldString(selfobj, L.rkString(C)))
- reg.Set(RA+1, selfobj)
- return 0
- },
- opArith, // OP_ADD
- opArith, // OP_SUB
- opArith, // OP_MUL
- opArith, // OP_DIV
- opArith, // OP_MOD
- opArith, // OP_POW
- func(L *LState, inst uint32, baseframe *callFrame) int { //OP_UNM
- reg := L.reg
- cf := L.currentFrame
- lbase := cf.LocalBase
- A := int(inst>>18) & 0xff //GETA
- RA := lbase + A
- B := int(inst & 0x1ff) //GETB
- unaryv := L.rkValue(B)
- if nm, ok := unaryv.(LNumber); ok {
- reg.SetNumber(RA, -nm)
- } else {
- op := L.metaOp1(unaryv, "__unm")
- if op.Type() == LTFunction {
- reg.Push(op)
- reg.Push(unaryv)
- L.Call(1, 1)
- reg.Set(RA, reg.Pop())
- } else if str, ok1 := unaryv.(LString); ok1 {
- if num, err := parseNumber(string(str)); err == nil {
- reg.Set(RA, -num)
- } else {
- L.RaiseError("__unm undefined")
- }
- } else {
- L.RaiseError("__unm undefined")
- }
- }
- return 0
- },
- func(L *LState, inst uint32, baseframe *callFrame) int { //OP_NOT
- reg := L.reg
- cf := L.currentFrame
- lbase := cf.LocalBase
- A := int(inst>>18) & 0xff //GETA
- RA := lbase + A
- B := int(inst & 0x1ff) //GETB
- if LVIsFalse(reg.Get(lbase + B)) {
- reg.Set(RA, LTrue)
- } else {
- reg.Set(RA, LFalse)
- }
- return 0
- },
- func(L *LState, inst uint32, baseframe *callFrame) int { //OP_LEN
- reg := L.reg
- cf := L.currentFrame
- lbase := cf.LocalBase
- A := int(inst>>18) & 0xff //GETA
- RA := lbase + A
- B := int(inst & 0x1ff) //GETB
- switch lv := L.rkValue(B).(type) {
- case LString:
- reg.SetNumber(RA, LNumber(len(lv)))
- default:
- op := L.metaOp1(lv, "__len")
- if op.Type() == LTFunction {
- reg.Push(op)
- reg.Push(lv)
- L.Call(1, 1)
- ret := reg.Pop()
- if ret.Type() == LTNumber {
- reg.SetNumber(RA, ret.(LNumber))
- } else {
- reg.SetNumber(RA, LNumber(0))
- }
- } else if lv.Type() == LTTable {
- reg.SetNumber(RA, LNumber(lv.(*LTable).Len()))
- } else {
- L.RaiseError("__len undefined")
- }
- }
- return 0
- },
- func(L *LState, inst uint32, baseframe *callFrame) int { //OP_CONCAT
- reg := L.reg
- cf := L.currentFrame
- lbase := cf.LocalBase
- A := int(inst>>18) & 0xff //GETA
- RA := lbase + A
- B := int(inst & 0x1ff) //GETB
- C := int(inst>>9) & 0x1ff //GETC
- RC := lbase + C
- RB := lbase + B
- reg.Set(RA, stringConcat(L, RC-RB+1, RC))
- return 0
- },
- func(L *LState, inst uint32, baseframe *callFrame) int { //OP_JMP
- cf := L.currentFrame
- Sbx := int(inst&0x3ffff) - opMaxArgSbx //GETSBX
- cf.Pc += Sbx
- return 0
- },
- func(L *LState, inst uint32, baseframe *callFrame) int { //OP_EQ
- cf := L.currentFrame
- A := int(inst>>18) & 0xff //GETA
- B := int(inst & 0x1ff) //GETB
- C := int(inst>>9) & 0x1ff //GETC
- ret := equals(L, L.rkValue(B), L.rkValue(C), false)
- v := 1
- if ret {
- v = 0
- }
- if v == A {
- cf.Pc++
- }
- return 0
- },
- func(L *LState, inst uint32, baseframe *callFrame) int { //OP_LT
- cf := L.currentFrame
- A := int(inst>>18) & 0xff //GETA
- B := int(inst & 0x1ff) //GETB
- C := int(inst>>9) & 0x1ff //GETC
- ret := lessThan(L, L.rkValue(B), L.rkValue(C))
- v := 1
- if ret {
- v = 0
- }
- if v == A {
- cf.Pc++
- }
- return 0
- },
- func(L *LState, inst uint32, baseframe *callFrame) int { //OP_LE
- cf := L.currentFrame
- A := int(inst>>18) & 0xff //GETA
- B := int(inst & 0x1ff) //GETB
- C := int(inst>>9) & 0x1ff //GETC
- lhs := L.rkValue(B)
- rhs := L.rkValue(C)
- ret := false
-
- if v1, ok1 := lhs.assertFloat64(); ok1 {
- if v2, ok2 := rhs.assertFloat64(); ok2 {
- ret = v1 <= v2
- } else {
- L.RaiseError("attempt to compare %v with %v", lhs.Type().String(), rhs.Type().String())
- }
- } else {
- if lhs.Type() != rhs.Type() {
- L.RaiseError("attempt to compare %v with %v", lhs.Type().String(), rhs.Type().String())
- }
- switch lhs.Type() {
- case LTString:
- ret = strCmp(string(lhs.(LString)), string(rhs.(LString))) <= 0
- default:
- switch objectRational(L, lhs, rhs, "__le") {
- case 1:
- ret = true
- case 0:
- ret = false
- default:
- ret = !objectRationalWithError(L, rhs, lhs, "__lt")
- }
- }
- }
-
- v := 1
- if ret {
- v = 0
- }
- if v == A {
- cf.Pc++
- }
- return 0
- },
- func(L *LState, inst uint32, baseframe *callFrame) int { //OP_TEST
- reg := L.reg
- cf := L.currentFrame
- lbase := cf.LocalBase
- A := int(inst>>18) & 0xff //GETA
- RA := lbase + A
- C := int(inst>>9) & 0x1ff //GETC
- if LVAsBool(reg.Get(RA)) == (C == 0) {
- cf.Pc++
- }
- return 0
- },
- func(L *LState, inst uint32, baseframe *callFrame) int { //OP_TESTSET
- reg := L.reg
- cf := L.currentFrame
- lbase := cf.LocalBase
- A := int(inst>>18) & 0xff //GETA
- RA := lbase + A
- B := int(inst & 0x1ff) //GETB
- C := int(inst>>9) & 0x1ff //GETC
- if value := reg.Get(lbase + B); LVAsBool(value) != (C == 0) {
- reg.Set(RA, value)
- } else {
- cf.Pc++
- }
- return 0
- },
- func(L *LState, inst uint32, baseframe *callFrame) int { //OP_CALL
- reg := L.reg
- cf := L.currentFrame
- lbase := cf.LocalBase
- A := int(inst>>18) & 0xff //GETA
- RA := lbase + A
- B := int(inst & 0x1ff) //GETB
- C := int(inst>>9) & 0x1ff //GETC
- nargs := B - 1
- if B == 0 {
- nargs = reg.Top() - (RA + 1)
- }
- lv := reg.Get(RA)
- nret := C - 1
- var callable *LFunction
- var meta bool
- if fn, ok := lv.assertFunction(); ok {
- callable = fn
- meta = false
- } else {
- callable, meta = L.metaCall(lv)
- }
- // +inline-call L.pushCallFrame callFrame{Fn:callable,Pc:0,Base:RA,LocalBase:RA+1,ReturnBase:RA,NArgs:nargs,NRet:nret,Parent:cf,TailCall:0} lv meta
- if callable.IsG && callGFunction(L, false) {
- return 1
- }
- return 0
- },
- func(L *LState, inst uint32, baseframe *callFrame) int { //OP_TAILCALL
- reg := L.reg
- cf := L.currentFrame
- lbase := cf.LocalBase
- A := int(inst>>18) & 0xff //GETA
- RA := lbase + A
- B := int(inst & 0x1ff) //GETB
- nargs := B - 1
- if B == 0 {
- nargs = reg.Top() - (RA + 1)
- }
- lv := reg.Get(RA)
- var callable *LFunction
- var meta bool
- if fn, ok := lv.assertFunction(); ok {
- callable = fn
- meta = false
- } else {
- callable, meta = L.metaCall(lv)
- }
- if callable == nil {
- L.RaiseError("attempt to call a non-function object")
- }
- // +inline-call L.closeUpvalues lbase
- if callable.IsG {
- luaframe := cf
- L.pushCallFrame(callFrame{
- Fn: callable,
- Pc: 0,
- Base: RA,
- LocalBase: RA + 1,
- ReturnBase: cf.ReturnBase,
- NArgs: nargs,
- NRet: cf.NRet,
- Parent: cf,
- TailCall: 0,
- }, lv, meta)
- if callGFunction(L, true) {
- return 1
- }
- if L.currentFrame == nil || L.currentFrame.Fn.IsG || luaframe == baseframe {
- return 1
- }
- } else {
- base := cf.Base
- cf.Fn = callable
- cf.Pc = 0
- cf.Base = RA
- cf.LocalBase = RA + 1
- cf.ReturnBase = cf.ReturnBase
- cf.NArgs = nargs
- cf.NRet = cf.NRet
- cf.TailCall++
- lbase := cf.LocalBase
- if meta {
- cf.NArgs++
- L.reg.Insert(lv, cf.LocalBase)
- }
- // +inline-call L.initCallFrame cf
- // +inline-call L.reg.CopyRange base RA -1 reg.Top()-RA-1
- cf.Base = base
- cf.LocalBase = base + (cf.LocalBase - lbase + 1)
- }
- return 0
- },
- func(L *LState, inst uint32, baseframe *callFrame) int { //OP_RETURN
- reg := L.reg
- cf := L.currentFrame
- lbase := cf.LocalBase
- A := int(inst>>18) & 0xff //GETA
- RA := lbase + A
- B := int(inst & 0x1ff) //GETB
- // +inline-call L.closeUpvalues lbase
- nret := B - 1
- if B == 0 {
- nret = reg.Top() - RA
- }
- n := cf.NRet
- if cf.NRet == MultRet {
- n = nret
- }
-
- if L.Parent != nil && L.stack.Sp() == 1 {
- // +inline-call copyReturnValues L reg.Top() RA n B
- switchToParentThread(L, n, false, true)
- return 1
- }
- islast := baseframe == L.stack.Pop() || L.stack.IsEmpty()
- // +inline-call copyReturnValues L cf.ReturnBase RA n B
- L.currentFrame = L.stack.Last()
- if islast || L.currentFrame == nil || L.currentFrame.Fn.IsG {
- return 1
- }
- return 0
- },
- func(L *LState, inst uint32, baseframe *callFrame) int { //OP_FORLOOP
- reg := L.reg
- cf := L.currentFrame
- lbase := cf.LocalBase
- A := int(inst>>18) & 0xff //GETA
- RA := lbase + A
- if init, ok1 := reg.Get(RA).assertFloat64(); ok1 {
- if limit, ok2 := reg.Get(RA + 1).assertFloat64(); ok2 {
- if step, ok3 := reg.Get(RA + 2).assertFloat64(); ok3 {
- init += step
- reg.SetNumber(RA, LNumber(init))
- if (step > 0 && init <= limit) || (step <= 0 && init >= limit) {
- Sbx := int(inst&0x3ffff) - opMaxArgSbx //GETSBX
- cf.Pc += Sbx
- reg.SetNumber(RA+3, LNumber(init))
- } else {
- reg.SetTop(RA + 1)
- }
- } else {
- L.RaiseError("for statement step must be a number")
- }
- } else {
- L.RaiseError("for statement limit must be a number")
- }
- } else {
- L.RaiseError("for statement init must be a number")
- }
- return 0
- },
- func(L *LState, inst uint32, baseframe *callFrame) int { //OP_FORPREP
- reg := L.reg
- cf := L.currentFrame
- lbase := cf.LocalBase
- A := int(inst>>18) & 0xff //GETA
- RA := lbase + A
- Sbx := int(inst&0x3ffff) - opMaxArgSbx //GETSBX
- if init, ok1 := reg.Get(RA).assertFloat64(); ok1 {
- if step, ok2 := reg.Get(RA + 2).assertFloat64(); ok2 {
- reg.SetNumber(RA, LNumber(init-step))
- } else {
- L.RaiseError("for statement step must be a number")
- }
- } else {
- L.RaiseError("for statement init must be a number")
- }
- cf.Pc += Sbx
- return 0
- },
- func(L *LState, inst uint32, baseframe *callFrame) int { //OP_TFORLOOP
- reg := L.reg
- cf := L.currentFrame
- lbase := cf.LocalBase
- A := int(inst>>18) & 0xff //GETA
- RA := lbase + A
- C := int(inst>>9) & 0x1ff //GETC
- nret := C
- reg.SetTop(RA + 3 + 2)
- reg.Set(RA+3+2, reg.Get(RA+2))
- reg.Set(RA+3+1, reg.Get(RA+1))
- reg.Set(RA+3, reg.Get(RA))
- L.callR(2, nret, RA+3)
- if value := reg.Get(RA + 3); value != LNil {
- reg.Set(RA+2, value)
- pc := cf.Fn.Proto.Code[cf.Pc]
- cf.Pc += int(pc&0x3ffff) - opMaxArgSbx
- }
- cf.Pc++
- return 0
- },
- func(L *LState, inst uint32, baseframe *callFrame) int { //OP_SETLIST
- reg := L.reg
- cf := L.currentFrame
- lbase := cf.LocalBase
- A := int(inst>>18) & 0xff //GETA
- RA := lbase + A
- B := int(inst & 0x1ff) //GETB
- C := int(inst>>9) & 0x1ff //GETC
- if C == 0 {
- C = int(cf.Fn.Proto.Code[cf.Pc])
- cf.Pc++
- }
- offset := (C - 1) * FieldsPerFlush
- table := reg.Get(RA).(*LTable)
- nelem := B
- if B == 0 {
- nelem = reg.Top() - RA - 1
- }
- for i := 1; i <= nelem; i++ {
- table.RawSetInt(offset+i, reg.Get(RA+i))
- }
- return 0
- },
- func(L *LState, inst uint32, baseframe *callFrame) int { //OP_CLOSE
- cf := L.currentFrame
- lbase := cf.LocalBase
- A := int(inst>>18) & 0xff //GETA
- RA := lbase + A
- // +inline-call L.closeUpvalues RA
- return 0
- },
- func(L *LState, inst uint32, baseframe *callFrame) int { //OP_CLOSURE
- reg := L.reg
- cf := L.currentFrame
- lbase := cf.LocalBase
- A := int(inst>>18) & 0xff //GETA
- RA := lbase + A
- Bx := int(inst & 0x3ffff) //GETBX
- proto := cf.Fn.Proto.FunctionPrototypes[Bx]
- closure := newLFunctionL(proto, cf.Fn.Env, int(proto.NumUpvalues))
- reg.Set(RA, closure)
- for i := 0; i < int(proto.NumUpvalues); i++ {
- inst = cf.Fn.Proto.Code[cf.Pc]
- cf.Pc++
- B := opGetArgB(inst)
- switch opGetOpCode(inst) {
- case OP_MOVE:
- closure.Upvalues[i] = L.findUpvalue(lbase + B)
- case OP_GETUPVAL:
- closure.Upvalues[i] = cf.Fn.Upvalues[B]
- }
- }
- return 0
- },
- func(L *LState, inst uint32, baseframe *callFrame) int { //OP_VARARG
- reg := L.reg
- cf := L.currentFrame
- lbase := cf.LocalBase
- A := int(inst>>18) & 0xff //GETA
- RA := lbase + A
- B := int(inst & 0x1ff) //GETB
- nparams := int(cf.Fn.Proto.NumParameters)
- nvarargs := cf.NArgs - nparams
- if nvarargs < 0 {
- nvarargs = 0
- }
- nwant := B - 1
- if B == 0 {
- nwant = nvarargs
- }
- // +inline-call reg.CopyRange RA cf.Base+nparams+1 cf.LocalBase nwant
- return 0
- },
- func(L *LState, inst uint32, baseframe *callFrame) int { //OP_NOP
- return 0
- },
- }
-}
-
-func opArith(L *LState, inst uint32, baseframe *callFrame) int { //OP_ADD, OP_SUB, OP_MUL, OP_DIV, OP_MOD, OP_POW
- reg := L.reg
- cf := L.currentFrame
- lbase := cf.LocalBase
- A := int(inst>>18) & 0xff //GETA
- RA := lbase + A
- opcode := int(inst >> 26) //GETOPCODE
- B := int(inst & 0x1ff) //GETB
- C := int(inst>>9) & 0x1ff //GETC
- lhs := L.rkValue(B)
- rhs := L.rkValue(C)
- v1, ok1 := lhs.assertFloat64()
- v2, ok2 := rhs.assertFloat64()
- if ok1 && ok2 {
- reg.SetNumber(RA, numberArith(L, opcode, LNumber(v1), LNumber(v2)))
- } else {
- reg.Set(RA, objectArith(L, opcode, lhs, rhs))
- }
- return 0
-}
-
-func luaModulo(lhs, rhs LNumber) LNumber {
- flhs := float64(lhs)
- frhs := float64(rhs)
- v := math.Mod(flhs, frhs)
- if flhs < 0 || frhs < 0 && !(flhs < 0 && frhs < 0) {
- v += frhs
- }
- return LNumber(v)
-}
-
-func numberArith(L *LState, opcode int, lhs, rhs LNumber) LNumber {
- switch opcode {
- case OP_ADD:
- return lhs + rhs
- case OP_SUB:
- return lhs - rhs
- case OP_MUL:
- return lhs * rhs
- case OP_DIV:
- return lhs / rhs
- case OP_MOD:
- return luaModulo(lhs, rhs)
- case OP_POW:
- flhs := float64(lhs)
- frhs := float64(rhs)
- return LNumber(math.Pow(flhs, frhs))
- }
- panic("should not reach here")
- return LNumber(0)
-}
-
-func objectArith(L *LState, opcode int, lhs, rhs LValue) LValue {
- event := ""
- switch opcode {
- case OP_ADD:
- event = "__add"
- case OP_SUB:
- event = "__sub"
- case OP_MUL:
- event = "__mul"
- case OP_DIV:
- event = "__div"
- case OP_MOD:
- event = "__mod"
- case OP_POW:
- event = "__pow"
- }
- op := L.metaOp2(lhs, rhs, event)
- if op.Type() == LTFunction {
- L.reg.Push(op)
- L.reg.Push(lhs)
- L.reg.Push(rhs)
- L.Call(2, 1)
- return L.reg.Pop()
- }
- if str, ok := lhs.(LString); ok {
- if lnum, err := parseNumber(string(str)); err == nil {
- lhs = lnum
- }
- }
- if str, ok := rhs.(LString); ok {
- if rnum, err := parseNumber(string(str)); err == nil {
- rhs = rnum
- }
- }
- if v1, ok1 := lhs.assertFloat64(); ok1 {
- if v2, ok2 := rhs.assertFloat64(); ok2 {
- return numberArith(L, opcode, LNumber(v1), LNumber(v2))
- }
- }
- L.RaiseError(fmt.Sprintf("cannot perform %v operation between %v and %v",
- strings.TrimLeft(event, "_"), lhs.Type().String(), rhs.Type().String()))
-
- return LNil
-}
-
-func stringConcat(L *LState, total, last int) LValue {
- rhs := L.reg.Get(last)
- total--
- for i := last - 1; total > 0; {
- lhs := L.reg.Get(i)
- if !(LVCanConvToString(lhs) && LVCanConvToString(rhs)) {
- op := L.metaOp2(lhs, rhs, "__concat")
- if op.Type() == LTFunction {
- L.reg.Push(op)
- L.reg.Push(lhs)
- L.reg.Push(rhs)
- L.Call(2, 1)
- rhs = L.reg.Pop()
- total--
- i--
- } else {
- L.RaiseError("cannot perform concat operation between %v and %v", lhs.Type().String(), rhs.Type().String())
- return LNil
- }
- } else {
- buf := make([]string, total+1)
- buf[total] = LVAsString(rhs)
- for total > 0 {
- lhs = L.reg.Get(i)
- if !LVCanConvToString(lhs) {
- break
- }
- buf[total-1] = LVAsString(lhs)
- i--
- total--
- }
- rhs = LString(strings.Join(buf, ""))
- }
- }
- return rhs
-}
-
-func lessThan(L *LState, lhs, rhs LValue) bool {
- // optimization for numbers
- if v1, ok1 := lhs.assertFloat64(); ok1 {
- if v2, ok2 := rhs.assertFloat64(); ok2 {
- return v1 < v2
- }
- L.RaiseError("attempt to compare %v with %v", lhs.Type().String(), rhs.Type().String())
- }
- if lhs.Type() != rhs.Type() {
- L.RaiseError("attempt to compare %v with %v", lhs.Type().String(), rhs.Type().String())
- return false
- }
- ret := false
- switch lhs.Type() {
- case LTString:
- ret = strCmp(string(lhs.(LString)), string(rhs.(LString))) < 0
- default:
- ret = objectRationalWithError(L, lhs, rhs, "__lt")
- }
- return ret
-}
-
-func equals(L *LState, lhs, rhs LValue, raw bool) bool {
- if lhs.Type() != rhs.Type() {
- return false
- }
-
- ret := false
- switch lhs.Type() {
- case LTNil:
- ret = true
- case LTNumber:
- v1, _ := lhs.assertFloat64()
- v2, _ := rhs.assertFloat64()
- ret = v1 == v2
- case LTBool:
- ret = bool(lhs.(LBool)) == bool(rhs.(LBool))
- case LTString:
- ret = string(lhs.(LString)) == string(rhs.(LString))
- case LTUserData, LTTable:
- if lhs == rhs {
- ret = true
- } else if !raw {
- switch objectRational(L, lhs, rhs, "__eq") {
- case 1:
- ret = true
- default:
- ret = false
- }
- }
- default:
- ret = lhs == rhs
- }
- return ret
-}
-
-func objectRationalWithError(L *LState, lhs, rhs LValue, event string) bool {
- switch objectRational(L, lhs, rhs, event) {
- case 1:
- return true
- case 0:
- return false
- }
- L.RaiseError("attempt to compare %v with %v", lhs.Type().String(), rhs.Type().String())
- return false
-}
-
-func objectRational(L *LState, lhs, rhs LValue, event string) int {
- m1 := L.metaOp1(lhs, event)
- m2 := L.metaOp1(rhs, event)
- if m1.Type() == LTFunction && m1 == m2 {
- L.reg.Push(m1)
- L.reg.Push(lhs)
- L.reg.Push(rhs)
- L.Call(2, 1)
- if LVAsBool(L.reg.Pop()) {
- return 1
- }
- return 0
- }
- return -1
-}
diff --git a/vendor/github.com/yuin/gopher-lua/alloc.go b/vendor/github.com/yuin/gopher-lua/alloc.go
deleted file mode 100644
index 7a8cd63a..00000000
--- a/vendor/github.com/yuin/gopher-lua/alloc.go
+++ /dev/null
@@ -1,79 +0,0 @@
-package lua
-
-import (
- "reflect"
- "unsafe"
-)
-
-// iface is an internal representation of the go-interface.
-type iface struct {
- itab unsafe.Pointer
- word unsafe.Pointer
-}
-
-const preloadLimit LNumber = 128
-
-var _fv float64
-var _uv uintptr
-
-var preloads [int(preloadLimit)]LValue
-
-func init() {
- for i := 0; i < int(preloadLimit); i++ {
- preloads[i] = LNumber(i)
- }
-}
-
-// allocator is a fast bulk memory allocator for the LValue.
-type allocator struct {
- size int
- fptrs []float64
- fheader *reflect.SliceHeader
-
- scratchValue LValue
- scratchValueP *iface
-}
-
-func newAllocator(size int) *allocator {
- al := &allocator{
- size: size,
- fptrs: make([]float64, 0, size),
- fheader: nil,
- }
- al.fheader = (*reflect.SliceHeader)(unsafe.Pointer(&al.fptrs))
- al.scratchValue = LNumber(0)
- al.scratchValueP = (*iface)(unsafe.Pointer(&al.scratchValue))
-
- return al
-}
-
-// LNumber2I takes a number value and returns an interface LValue representing the same number.
-// Converting an LNumber to a LValue naively, by doing:
-// `var val LValue = myLNumber`
-// will result in an individual heap alloc of 8 bytes for the float value. LNumber2I amortizes the cost and memory
-// overhead of these allocs by allocating blocks of floats instead.
-// The downside of this is that all of the floats on a given block have to become eligible for gc before the block
-// as a whole can be gc-ed.
-func (al *allocator) LNumber2I(v LNumber) LValue {
- // first check for shared preloaded numbers
- if v >= 0 && v < preloadLimit && float64(v) == float64(int64(v)) {
- return preloads[int(v)]
- }
-
- // check if we need a new alloc page
- if cap(al.fptrs) == len(al.fptrs) {
- al.fptrs = make([]float64, 0, al.size)
- al.fheader = (*reflect.SliceHeader)(unsafe.Pointer(&al.fptrs))
- }
-
- // alloc a new float, and store our value into it
- al.fptrs = append(al.fptrs, float64(v))
- fptr := &al.fptrs[len(al.fptrs)-1]
-
- // hack our scratch LValue to point to our allocated value
- // this scratch lvalue is copied when this function returns meaning the scratch value can be reused
- // on the next call
- al.scratchValueP.word = unsafe.Pointer(fptr)
-
- return al.scratchValue
-}
diff --git a/vendor/github.com/yuin/gopher-lua/ast/ast.go b/vendor/github.com/yuin/gopher-lua/ast/ast.go
deleted file mode 100644
index f337a294..00000000
--- a/vendor/github.com/yuin/gopher-lua/ast/ast.go
+++ /dev/null
@@ -1,29 +0,0 @@
-package ast
-
-type PositionHolder interface {
- Line() int
- SetLine(int)
- LastLine() int
- SetLastLine(int)
-}
-
-type Node struct {
- line int
- lastline int
-}
-
-func (self *Node) Line() int {
- return self.line
-}
-
-func (self *Node) SetLine(line int) {
- self.line = line
-}
-
-func (self *Node) LastLine() int {
- return self.lastline
-}
-
-func (self *Node) SetLastLine(line int) {
- self.lastline = line
-}
diff --git a/vendor/github.com/yuin/gopher-lua/ast/expr.go b/vendor/github.com/yuin/gopher-lua/ast/expr.go
deleted file mode 100644
index ccda3279..00000000
--- a/vendor/github.com/yuin/gopher-lua/ast/expr.go
+++ /dev/null
@@ -1,137 +0,0 @@
-package ast
-
-type Expr interface {
- PositionHolder
- exprMarker()
-}
-
-type ExprBase struct {
- Node
-}
-
-func (expr *ExprBase) exprMarker() {}
-
-/* ConstExprs {{{ */
-
-type ConstExpr interface {
- Expr
- constExprMarker()
-}
-
-type ConstExprBase struct {
- ExprBase
-}
-
-func (expr *ConstExprBase) constExprMarker() {}
-
-type TrueExpr struct {
- ConstExprBase
-}
-
-type FalseExpr struct {
- ConstExprBase
-}
-
-type NilExpr struct {
- ConstExprBase
-}
-
-type NumberExpr struct {
- ConstExprBase
-
- Value string
-}
-
-type StringExpr struct {
- ConstExprBase
-
- Value string
-}
-
-/* ConstExprs }}} */
-
-type Comma3Expr struct {
- ExprBase
-}
-
-type IdentExpr struct {
- ExprBase
-
- Value string
-}
-
-type AttrGetExpr struct {
- ExprBase
-
- Object Expr
- Key Expr
-}
-
-type TableExpr struct {
- ExprBase
-
- Fields []*Field
-}
-
-type FuncCallExpr struct {
- ExprBase
-
- Func Expr
- Receiver Expr
- Method string
- Args []Expr
- AdjustRet bool
-}
-
-type LogicalOpExpr struct {
- ExprBase
-
- Operator string
- Lhs Expr
- Rhs Expr
-}
-
-type RelationalOpExpr struct {
- ExprBase
-
- Operator string
- Lhs Expr
- Rhs Expr
-}
-
-type StringConcatOpExpr struct {
- ExprBase
-
- Lhs Expr
- Rhs Expr
-}
-
-type ArithmeticOpExpr struct {
- ExprBase
-
- Operator string
- Lhs Expr
- Rhs Expr
-}
-
-type UnaryMinusOpExpr struct {
- ExprBase
- Expr Expr
-}
-
-type UnaryNotOpExpr struct {
- ExprBase
- Expr Expr
-}
-
-type UnaryLenOpExpr struct {
- ExprBase
- Expr Expr
-}
-
-type FunctionExpr struct {
- ExprBase
-
- ParList *ParList
- Stmts []Stmt
-}
diff --git a/vendor/github.com/yuin/gopher-lua/ast/misc.go b/vendor/github.com/yuin/gopher-lua/ast/misc.go
deleted file mode 100644
index d811c042..00000000
--- a/vendor/github.com/yuin/gopher-lua/ast/misc.go
+++ /dev/null
@@ -1,17 +0,0 @@
-package ast
-
-type Field struct {
- Key Expr
- Value Expr
-}
-
-type ParList struct {
- HasVargs bool
- Names []string
-}
-
-type FuncName struct {
- Func Expr
- Receiver Expr
- Method string
-}
diff --git a/vendor/github.com/yuin/gopher-lua/ast/stmt.go b/vendor/github.com/yuin/gopher-lua/ast/stmt.go
deleted file mode 100644
index 56ea6d1a..00000000
--- a/vendor/github.com/yuin/gopher-lua/ast/stmt.go
+++ /dev/null
@@ -1,95 +0,0 @@
-package ast
-
-type Stmt interface {
- PositionHolder
- stmtMarker()
-}
-
-type StmtBase struct {
- Node
-}
-
-func (stmt *StmtBase) stmtMarker() {}
-
-type AssignStmt struct {
- StmtBase
-
- Lhs []Expr
- Rhs []Expr
-}
-
-type LocalAssignStmt struct {
- StmtBase
-
- Names []string
- Exprs []Expr
-}
-
-type FuncCallStmt struct {
- StmtBase
-
- Expr Expr
-}
-
-type DoBlockStmt struct {
- StmtBase
-
- Stmts []Stmt
-}
-
-type WhileStmt struct {
- StmtBase
-
- Condition Expr
- Stmts []Stmt
-}
-
-type RepeatStmt struct {
- StmtBase
-
- Condition Expr
- Stmts []Stmt
-}
-
-type IfStmt struct {
- StmtBase
-
- Condition Expr
- Then []Stmt
- Else []Stmt
-}
-
-type NumberForStmt struct {
- StmtBase
-
- Name string
- Init Expr
- Limit Expr
- Step Expr
- Stmts []Stmt
-}
-
-type GenericForStmt struct {
- StmtBase
-
- Names []string
- Exprs []Expr
- Stmts []Stmt
-}
-
-type FuncDefStmt struct {
- StmtBase
-
- Name *FuncName
- Func *FunctionExpr
-}
-
-type ReturnStmt struct {
- StmtBase
-
- Exprs []Expr
-}
-
-type BreakStmt struct {
- StmtBase
-}
diff --git a/vendor/github.com/yuin/gopher-lua/ast/token.go b/vendor/github.com/yuin/gopher-lua/ast/token.go
deleted file mode 100644
index 820467c9..00000000
--- a/vendor/github.com/yuin/gopher-lua/ast/token.go
+++ /dev/null
@@ -1,22 +0,0 @@
-package ast
-
-import (
- "fmt"
-)
-
-type Position struct {
- Source string
- Line int
- Column int
-}
-
-type Token struct {
- Type int
- Name string
- Str string
- Pos Position
-}
-
-func (self *Token) String() string {
- return fmt.Sprintf("", self.Name, self.Str)
-}
diff --git a/vendor/github.com/yuin/gopher-lua/auxlib.go b/vendor/github.com/yuin/gopher-lua/auxlib.go
deleted file mode 100644
index 61a3b8b6..00000000
--- a/vendor/github.com/yuin/gopher-lua/auxlib.go
+++ /dev/null
@@ -1,460 +0,0 @@
-package lua
-
-import (
- "bufio"
- "fmt"
- "io"
- "os"
- "strings"
-)
-
-/* checkType {{{ */
-
-func (ls *LState) CheckAny(n int) LValue {
- if n > ls.GetTop() {
- ls.ArgError(n, "value expected")
- }
- return ls.Get(n)
-}
-
-func (ls *LState) CheckInt(n int) int {
- v := ls.Get(n)
- if intv, ok := v.(LNumber); ok {
- return int(intv)
- }
- ls.TypeError(n, LTNumber)
- return 0
-}
-
-func (ls *LState) CheckInt64(n int) int64 {
- v := ls.Get(n)
- if intv, ok := v.(LNumber); ok {
- return int64(intv)
- }
- ls.TypeError(n, LTNumber)
- return 0
-}
-
-func (ls *LState) CheckNumber(n int) LNumber {
- v := ls.Get(n)
- if lv, ok := v.(LNumber); ok {
- return lv
- }
- ls.TypeError(n, LTNumber)
- return 0
-}
-
-func (ls *LState) CheckString(n int) string {
- v := ls.Get(n)
- if lv, ok := v.(LString); ok {
- return string(lv)
- } else if LVCanConvToString(v) {
- return ls.ToString(n)
- }
- ls.TypeError(n, LTString)
- return ""
-}
-
-func (ls *LState) CheckBool(n int) bool {
- v := ls.Get(n)
- if lv, ok := v.(LBool); ok {
- return bool(lv)
- }
- ls.TypeError(n, LTBool)
- return false
-}
-
-func (ls *LState) CheckTable(n int) *LTable {
- v := ls.Get(n)
- if lv, ok := v.(*LTable); ok {
- return lv
- }
- ls.TypeError(n, LTTable)
- return nil
-}
-
-func (ls *LState) CheckFunction(n int) *LFunction {
- v := ls.Get(n)
- if lv, ok := v.(*LFunction); ok {
- return lv
- }
- ls.TypeError(n, LTFunction)
- return nil
-}
-
-func (ls *LState) CheckUserData(n int) *LUserData {
- v := ls.Get(n)
- if lv, ok := v.(*LUserData); ok {
- return lv
- }
- ls.TypeError(n, LTUserData)
- return nil
-}
-
-func (ls *LState) CheckThread(n int) *LState {
- v := ls.Get(n)
- if lv, ok := v.(*LState); ok {
- return lv
- }
- ls.TypeError(n, LTThread)
- return nil
-}
-
-func (ls *LState) CheckType(n int, typ LValueType) {
- v := ls.Get(n)
- if v.Type() != typ {
- ls.TypeError(n, typ)
- }
-}
-
-func (ls *LState) CheckTypes(n int, typs ...LValueType) {
- vt := ls.Get(n).Type()
- for _, typ := range typs {
- if vt == typ {
- return
- }
- }
- buf := []string{}
- for _, typ := range typs {
- buf = append(buf, typ.String())
- }
- ls.ArgError(n, strings.Join(buf, " or ")+" expected, got "+ls.Get(n).Type().String())
-}
-
-func (ls *LState) CheckOption(n int, options []string) int {
- str := ls.CheckString(n)
- for i, v := range options {
- if v == str {
- return i
- }
- }
- ls.ArgError(n, fmt.Sprintf("invalid option: %s (must be one of %s)", str, strings.Join(options, ",")))
- return 0
-}
-
-/* }}} */
-
-/* optType {{{ */
-
-func (ls *LState) OptInt(n int, d int) int {
- v := ls.Get(n)
- if v == LNil {
- return d
- }
- if intv, ok := v.(LNumber); ok {
- return int(intv)
- }
- ls.TypeError(n, LTNumber)
- return 0
-}
-
-func (ls *LState) OptInt64(n int, d int64) int64 {
- v := ls.Get(n)
- if v == LNil {
- return d
- }
- if intv, ok := v.(LNumber); ok {
- return int64(intv)
- }
- ls.TypeError(n, LTNumber)
- return 0
-}
-
-func (ls *LState) OptNumber(n int, d LNumber) LNumber {
- v := ls.Get(n)
- if v == LNil {
- return d
- }
- if lv, ok := v.(LNumber); ok {
- return lv
- }
- ls.TypeError(n, LTNumber)
- return 0
-}
-
-func (ls *LState) OptString(n int, d string) string {
- v := ls.Get(n)
- if v == LNil {
- return d
- }
- if lv, ok := v.(LString); ok {
- return string(lv)
- }
- ls.TypeError(n, LTString)
- return ""
-}
-
-func (ls *LState) OptBool(n int, d bool) bool {
- v := ls.Get(n)
- if v == LNil {
- return d
- }
- if lv, ok := v.(LBool); ok {
- return bool(lv)
- }
- ls.TypeError(n, LTBool)
- return false
-}
-
-func (ls *LState) OptTable(n int, d *LTable) *LTable {
- v := ls.Get(n)
- if v == LNil {
- return d
- }
- if lv, ok := v.(*LTable); ok {
- return lv
- }
- ls.TypeError(n, LTTable)
- return nil
-}
-
-func (ls *LState) OptFunction(n int, d *LFunction) *LFunction {
- v := ls.Get(n)
- if v == LNil {
- return d
- }
- if lv, ok := v.(*LFunction); ok {
- return lv
- }
- ls.TypeError(n, LTFunction)
- return nil
-}
-
-func (ls *LState) OptUserData(n int, d *LUserData) *LUserData {
- v := ls.Get(n)
- if v == LNil {
- return d
- }
- if lv, ok := v.(*LUserData); ok {
- return lv
- }
- ls.TypeError(n, LTUserData)
- return nil
-}
-
-/* }}} */
-
-/* error operations {{{ */
-
-func (ls *LState) ArgError(n int, message string) {
- ls.RaiseError("bad argument #%v to %v (%v)", n, ls.rawFrameFuncName(ls.currentFrame), message)
-}
-
-func (ls *LState) TypeError(n int, typ LValueType) {
- ls.RaiseError("bad argument #%v to %v (%v expected, got %v)", n, ls.rawFrameFuncName(ls.currentFrame), typ.String(), ls.Get(n).Type().String())
-}
-
-/* }}} */
-
-/* debug operations {{{ */
-
-func (ls *LState) Where(level int) string {
- return ls.where(level, false)
-}
-
-/* }}} */
-
-/* table operations {{{ */
-
-func (ls *LState) FindTable(obj *LTable, n string, size int) LValue {
- names := strings.Split(n, ".")
- curobj := obj
- for _, name := range names {
- if curobj.Type() != LTTable {
- return LNil
- }
- nextobj := ls.RawGet(curobj, LString(name))
- if nextobj == LNil {
- tb := ls.CreateTable(0, size)
- ls.RawSet(curobj, LString(name), tb)
- curobj = tb
- } else if nextobj.Type() != LTTable {
- return LNil
- } else {
- curobj = nextobj.(*LTable)
- }
- }
- return curobj
-}
-
-/* }}} */
-
-/* register operations {{{ */
-
-func (ls *LState) RegisterModule(name string, funcs map[string]LGFunction) LValue {
- tb := ls.FindTable(ls.Get(RegistryIndex).(*LTable), "_LOADED", 1)
- mod := ls.GetField(tb, name)
- if mod.Type() != LTTable {
- newmod := ls.FindTable(ls.Get(GlobalsIndex).(*LTable), name, len(funcs))
- if newmodtb, ok := newmod.(*LTable); !ok {
- ls.RaiseError("name conflict for module(%v)", name)
- } else {
- for fname, fn := range funcs {
- newmodtb.RawSetString(fname, ls.NewFunction(fn))
- }
- ls.SetField(tb, name, newmodtb)
- return newmodtb
- }
- }
- return mod
-}
-
-func (ls *LState) SetFuncs(tb *LTable, funcs map[string]LGFunction, upvalues ...LValue) *LTable {
- for fname, fn := range funcs {
- tb.RawSetString(fname, ls.NewClosure(fn, upvalues...))
- }
- return tb
-}
-
-/* }}} */
-
-/* metatable operations {{{ */
-
-func (ls *LState) NewTypeMetatable(typ string) *LTable {
- regtable := ls.Get(RegistryIndex)
- mt := ls.GetField(regtable, typ)
- if tb, ok := mt.(*LTable); ok {
- return tb
- }
- mtnew := ls.NewTable()
- ls.SetField(regtable, typ, mtnew)
- return mtnew
-}
-
-func (ls *LState) GetMetaField(obj LValue, event string) LValue {
- return ls.metaOp1(obj, event)
-}
-
-func (ls *LState) GetTypeMetatable(typ string) LValue {
- return ls.GetField(ls.Get(RegistryIndex), typ)
-}
-
-func (ls *LState) CallMeta(obj LValue, event string) LValue {
- op := ls.metaOp1(obj, event)
- if op.Type() == LTFunction {
- ls.reg.Push(op)
- ls.reg.Push(obj)
- ls.Call(1, 1)
- return ls.reg.Pop()
- }
- return LNil
-}
-
-/* }}} */
-
-/* load and function call operations {{{ */
-
-func (ls *LState) LoadFile(path string) (*LFunction, error) {
- var file *os.File
- var err error
- if len(path) == 0 {
- file = os.Stdin
- } else {
- file, err = os.Open(path)
- defer file.Close()
- if err != nil {
- return nil, newApiErrorE(ApiErrorFile, err)
- }
- }
-
- reader := bufio.NewReader(file)
- // get the first character.
- c, err := reader.ReadByte()
- if err != nil && err != io.EOF {
- return nil, newApiErrorE(ApiErrorFile, err)
- }
- if c == byte('#') {
- // Unix exec. file?
- // skip first line
- _, err, _ = readBufioLine(reader)
- if err != nil {
- return nil, newApiErrorE(ApiErrorFile, err)
- }
- }
-
- if err != io.EOF {
- // if the file is not empty,
- // unread the first character of the file or newline character(readBufioLine's last byte).
- err = reader.UnreadByte()
- if err != nil {
- return nil, newApiErrorE(ApiErrorFile, err)
- }
- }
-
- return ls.Load(reader, path)
-}
-
-func (ls *LState) LoadString(source string) (*LFunction, error) {
- return ls.Load(strings.NewReader(source), "")
-}
-
-func (ls *LState) DoFile(path string) error {
- if fn, err := ls.LoadFile(path); err != nil {
- return err
- } else {
- ls.Push(fn)
- return ls.PCall(0, MultRet, nil)
- }
-}
-
-func (ls *LState) DoString(source string) error {
- if fn, err := ls.LoadString(source); err != nil {
- return err
- } else {
- ls.Push(fn)
- return ls.PCall(0, MultRet, nil)
- }
-}
-
-/* }}} */
-
-/* GopherLua original APIs {{{ */
-
-// ToStringMeta returns string representation of given LValue.
-// This method calls the `__tostring` meta method if defined.
-func (ls *LState) ToStringMeta(lv LValue) LValue {
- if fn, ok := ls.metaOp1(lv, "__tostring").assertFunction(); ok {
- ls.Push(fn)
- ls.Push(lv)
- ls.Call(1, 1)
- return ls.reg.Pop()
- } else {
- return LString(lv.String())
- }
-}
-
-// Set a module loader to the package.preload table.
-func (ls *LState) PreloadModule(name string, loader LGFunction) {
- preload := ls.GetField(ls.GetField(ls.Get(EnvironIndex), "package"), "preload")
- if _, ok := preload.(*LTable); !ok {
- ls.RaiseError("package.preload must be a table")
- }
- ls.SetField(preload, name, ls.NewFunction(loader))
-}
-
-// Checks whether the given index is an LChannel and returns this channel.
-func (ls *LState) CheckChannel(n int) chan LValue {
- v := ls.Get(n)
- if ch, ok := v.(LChannel); ok {
- return (chan LValue)(ch)
- }
- ls.TypeError(n, LTChannel)
- return nil
-}
-
-// If the given index is a LChannel, returns this channel. If this argument is absent or is nil, returns ch. Otherwise, raises an error.
-func (ls *LState) OptChannel(n int, ch chan LValue) chan LValue {
- v := ls.Get(n)
- if v == LNil {
- return ch
- }
- if ch, ok := v.(LChannel); ok {
- return (chan LValue)(ch)
- }
- ls.TypeError(n, LTChannel)
- return nil
-}
-
-/* }}} */
-
-//
diff --git a/vendor/github.com/yuin/gopher-lua/baselib.go b/vendor/github.com/yuin/gopher-lua/baselib.go
deleted file mode 100644
index 06c90619..00000000
--- a/vendor/github.com/yuin/gopher-lua/baselib.go
+++ /dev/null
@@ -1,592 +0,0 @@
-package lua
-
-import (
- "fmt"
- "io"
- "os"
- "runtime"
- "strconv"
- "strings"
-)
-
-/* basic functions {{{ */
-
-func OpenBase(L *LState) int {
- global := L.Get(GlobalsIndex).(*LTable)
- L.SetGlobal("_G", global)
- L.SetGlobal("_VERSION", LString(LuaVersion))
- L.SetGlobal("_GOPHER_LUA_VERSION", LString(PackageName+" "+PackageVersion))
- basemod := L.RegisterModule("_G", baseFuncs)
- global.RawSetString("ipairs", L.NewClosure(baseIpairs, L.NewFunction(ipairsaux)))
- global.RawSetString("pairs", L.NewClosure(basePairs, L.NewFunction(pairsaux)))
- L.Push(basemod)
- return 1
-}
-
-var baseFuncs = map[string]LGFunction{
- "assert": baseAssert,
- "collectgarbage": baseCollectGarbage,
- "dofile": baseDoFile,
- "error": baseError,
- "getfenv": baseGetFEnv,
- "getmetatable": baseGetMetatable,
- "load": baseLoad,
- "loadfile": baseLoadFile,
- "loadstring": baseLoadString,
- "next": baseNext,
- "pcall": basePCall,
- "print": basePrint,
- "rawequal": baseRawEqual,
- "rawget": baseRawGet,
- "rawset": baseRawSet,
- "select": baseSelect,
- "_printregs": base_PrintRegs,
- "setfenv": baseSetFEnv,
- "setmetatable": baseSetMetatable,
- "tonumber": baseToNumber,
- "tostring": baseToString,
- "type": baseType,
- "unpack": baseUnpack,
- "xpcall": baseXPCall,
- // loadlib
- "module": loModule,
- "require": loRequire,
- // hidden features
- "newproxy": baseNewProxy,
-}
-
-func baseAssert(L *LState) int {
- if !L.ToBool(1) {
- L.RaiseError(L.OptString(2, "assertion failed!"))
- return 0
- }
- return L.GetTop()
-}
-
-func baseCollectGarbage(L *LState) int {
- runtime.GC()
- return 0
-}
-
-func baseDoFile(L *LState) int {
- src := L.ToString(1)
- top := L.GetTop()
- fn, err := L.LoadFile(src)
- if err != nil {
- L.Push(LString(err.Error()))
- L.Panic(L)
- }
- L.Push(fn)
- L.Call(0, MultRet)
- return L.GetTop() - top
-}
-
-func baseError(L *LState) int {
- obj := L.CheckAny(1)
- level := L.OptInt(2, 1)
- L.Error(obj, level)
- return 0
-}
-
-func baseGetFEnv(L *LState) int {
- var value LValue
- if L.GetTop() == 0 {
- value = LNumber(1)
- } else {
- value = L.Get(1)
- }
-
- if fn, ok := value.(*LFunction); ok {
- if !fn.IsG {
- L.Push(fn.Env)
- } else {
- L.Push(L.G.Global)
- }
- return 1
- }
-
- if number, ok := value.(LNumber); ok {
- level := int(float64(number))
- if level <= 0 {
- L.Push(L.Env)
- } else {
- cf := L.currentFrame
- for i := 0; i < level && cf != nil; i++ {
- cf = cf.Parent
- }
- if cf == nil || cf.Fn.IsG {
- L.Push(L.G.Global)
- } else {
- L.Push(cf.Fn.Env)
- }
- }
- return 1
- }
-
- L.Push(L.G.Global)
- return 1
-}
-
-func baseGetMetatable(L *LState) int {
- L.Push(L.GetMetatable(L.CheckAny(1)))
- return 1
-}
-
-func ipairsaux(L *LState) int {
- tb := L.CheckTable(1)
- i := L.CheckInt(2)
- i++
- v := tb.RawGetInt(i)
- if v == LNil {
- return 0
- } else {
- L.Pop(1)
- L.Push(LNumber(i))
- L.Push(LNumber(i))
- L.Push(v)
- return 2
- }
-}
-
-func baseIpairs(L *LState) int {
- tb := L.CheckTable(1)
- L.Push(L.Get(UpvalueIndex(1)))
- L.Push(tb)
- L.Push(LNumber(0))
- return 3
-}
-
-func loadaux(L *LState, reader io.Reader, chunkname string) int {
- if fn, err := L.Load(reader, chunkname); err != nil {
- L.Push(LNil)
- L.Push(LString(err.Error()))
- return 2
- } else {
- L.Push(fn)
- return 1
- }
-}
-
-func baseLoad(L *LState) int {
- fn := L.CheckFunction(1)
- chunkname := L.OptString(2, "?")
- top := L.GetTop()
- buf := []string{}
- for {
- L.SetTop(top)
- L.Push(fn)
- L.Call(0, 1)
- ret := L.reg.Pop()
- if ret == LNil {
- break
- } else if LVCanConvToString(ret) {
- str := ret.String()
- if len(str) > 0 {
- buf = append(buf, string(str))
- } else {
- break
- }
- } else {
- L.Push(LNil)
- L.Push(LString("reader function must return a string"))
- return 2
- }
- }
- return loadaux(L, strings.NewReader(strings.Join(buf, "")), chunkname)
-}
-
-func baseLoadFile(L *LState) int {
- var reader io.Reader
- var chunkname string
- var err error
- if L.GetTop() < 1 {
- reader = os.Stdin
- chunkname = ""
- } else {
- chunkname = L.CheckString(1)
- reader, err = os.Open(chunkname)
- if err != nil {
- L.Push(LNil)
- L.Push(LString(fmt.Sprintf("can not open file: %v", chunkname)))
- return 2
- }
- defer reader.(*os.File).Close()
- }
- return loadaux(L, reader, chunkname)
-}
-
-func baseLoadString(L *LState) int {
- return loadaux(L, strings.NewReader(L.CheckString(1)), L.OptString(2, ""))
-}
-
-func baseNext(L *LState) int {
- tb := L.CheckTable(1)
- index := LNil
- if L.GetTop() >= 2 {
- index = L.Get(2)
- }
- key, value := tb.Next(index)
- if key == LNil {
- L.Push(LNil)
- return 1
- }
- L.Push(key)
- L.Push(value)
- return 2
-}
-
-func pairsaux(L *LState) int {
- tb := L.CheckTable(1)
- key, value := tb.Next(L.Get(2))
- if key == LNil {
- return 0
- } else {
- L.Pop(1)
- L.Push(key)
- L.Push(key)
- L.Push(value)
- return 2
- }
-}
-
-func basePairs(L *LState) int {
- tb := L.CheckTable(1)
- L.Push(L.Get(UpvalueIndex(1)))
- L.Push(tb)
- L.Push(LNil)
- return 3
-}
-
-func basePCall(L *LState) int {
- L.CheckAny(1)
- v := L.Get(1)
- if v.Type() != LTFunction {
- L.Push(LFalse)
- L.Push(LString("attempt to call a " + v.Type().String() + " value"))
- return 2
- }
- nargs := L.GetTop() - 1
- if err := L.PCall(nargs, MultRet, nil); err != nil {
- L.Push(LFalse)
- if aerr, ok := err.(*ApiError); ok {
- L.Push(aerr.Object)
- } else {
- L.Push(LString(err.Error()))
- }
- return 2
- } else {
- L.Insert(LTrue, 1)
- return L.GetTop()
- }
-}
-
-func basePrint(L *LState) int {
- top := L.GetTop()
- for i := 1; i <= top; i++ {
- fmt.Print(L.ToStringMeta(L.Get(i)).String())
- if i != top {
- fmt.Print("\t")
- }
- }
- fmt.Println("")
- return 0
-}
-
-func base_PrintRegs(L *LState) int {
- L.printReg()
- return 0
-}
-
-func baseRawEqual(L *LState) int {
- if L.CheckAny(1) == L.CheckAny(2) {
- L.Push(LTrue)
- } else {
- L.Push(LFalse)
- }
- return 1
-}
-
-func baseRawGet(L *LState) int {
- L.Push(L.RawGet(L.CheckTable(1), L.CheckAny(2)))
- return 1
-}
-
-func baseRawSet(L *LState) int {
- L.RawSet(L.CheckTable(1), L.CheckAny(2), L.CheckAny(3))
- return 0
-}
-
-func baseSelect(L *LState) int {
- L.CheckTypes(1, LTNumber, LTString)
- switch lv := L.Get(1).(type) {
- case LNumber:
- idx := int(lv)
- num := L.reg.Top() - L.indexToReg(int(lv)) - 1
- if idx < 0 {
- num++
- }
- return num
- case LString:
- if string(lv) != "#" {
- L.ArgError(1, "invalid string '"+string(lv)+"'")
- }
- L.Push(LNumber(L.GetTop() - 1))
- return 1
- }
- return 0
-}
-
-func baseSetFEnv(L *LState) int {
- var value LValue
- if L.GetTop() == 0 {
- value = LNumber(1)
- } else {
- value = L.Get(1)
- }
- env := L.CheckTable(2)
-
- if fn, ok := value.(*LFunction); ok {
- if fn.IsG {
- L.RaiseError("cannot change the environment of given object")
- } else {
- fn.Env = env
- L.Push(fn)
- return 1
- }
- }
-
- if number, ok := value.(LNumber); ok {
- level := int(float64(number))
- if level <= 0 {
- L.Env = env
- return 0
- }
-
- cf := L.currentFrame
- for i := 0; i < level && cf != nil; i++ {
- cf = cf.Parent
- }
- if cf == nil || cf.Fn.IsG {
- L.RaiseError("cannot change the environment of given object")
- } else {
- cf.Fn.Env = env
- L.Push(cf.Fn)
- return 1
- }
- }
-
- L.RaiseError("cannot change the environment of given object")
- return 0
-}
-
-func baseSetMetatable(L *LState) int {
- L.CheckTypes(2, LTNil, LTTable)
- obj := L.Get(1)
- if obj == LNil {
- L.RaiseError("cannot set metatable to a nil object.")
- }
- mt := L.Get(2)
- if m := L.metatable(obj, true); m != LNil {
- if tb, ok := m.(*LTable); ok && tb.RawGetString("__metatable") != LNil {
- L.RaiseError("cannot change a protected metatable")
- }
- }
- L.SetMetatable(obj, mt)
- L.SetTop(1)
- return 1
-}
-
-func baseToNumber(L *LState) int {
- base := L.OptInt(2, 10)
- noBase := L.Get(2) == LNil
-
- switch lv := L.CheckAny(1).(type) {
- case LNumber:
- L.Push(lv)
- case LString:
- str := strings.Trim(string(lv), " \n\t")
- if strings.Index(str, ".") > -1 {
- if v, err := strconv.ParseFloat(str, LNumberBit); err != nil {
- L.Push(LNil)
- } else {
- L.Push(LNumber(v))
- }
- } else {
- if noBase && strings.HasPrefix(strings.ToLower(str), "0x") {
- base, str = 16, str[2:] // Hex number
- }
- if v, err := strconv.ParseInt(str, base, LNumberBit); err != nil {
- L.Push(LNil)
- } else {
- L.Push(LNumber(v))
- }
- }
- default:
- L.Push(LNil)
- }
- return 1
-}
-
-func baseToString(L *LState) int {
- v1 := L.CheckAny(1)
- L.Push(L.ToStringMeta(v1))
- return 1
-}
-
-func baseType(L *LState) int {
- L.Push(LString(L.CheckAny(1).Type().String()))
- return 1
-}
-
-func baseUnpack(L *LState) int {
- tb := L.CheckTable(1)
- start := L.OptInt(2, 1)
- end := L.OptInt(3, tb.Len())
- for i := start; i <= end; i++ {
- L.Push(tb.RawGetInt(i))
- }
- ret := end - start + 1
- if ret < 0 {
- return 0
- }
- return ret
-}
-
-func baseXPCall(L *LState) int {
- fn := L.CheckFunction(1)
- errfunc := L.CheckFunction(2)
-
- top := L.GetTop()
- L.Push(fn)
- if err := L.PCall(0, MultRet, errfunc); err != nil {
- L.Push(LFalse)
- if aerr, ok := err.(*ApiError); ok {
- L.Push(aerr.Object)
- } else {
- L.Push(LString(err.Error()))
- }
- return 2
- } else {
- L.Insert(LTrue, top+1)
- return L.GetTop() - top
- }
-}
-
-/* }}} */
-
-/* load lib {{{ */
-
-func loModule(L *LState) int {
- name := L.CheckString(1)
- loaded := L.GetField(L.Get(RegistryIndex), "_LOADED")
- tb := L.GetField(loaded, name)
- if _, ok := tb.(*LTable); !ok {
- tb = L.FindTable(L.Get(GlobalsIndex).(*LTable), name, 1)
- if tb == LNil {
- L.RaiseError("name conflict for module: %v", name)
- }
- L.SetField(loaded, name, tb)
- }
- if L.GetField(tb, "_NAME") == LNil {
- L.SetField(tb, "_M", tb)
- L.SetField(tb, "_NAME", LString(name))
- names := strings.Split(name, ".")
- pname := ""
- if len(names) > 1 {
- pname = strings.Join(names[:len(names)-1], ".") + "."
- }
- L.SetField(tb, "_PACKAGE", LString(pname))
- }
-
- caller := L.currentFrame.Parent
- if caller == nil {
- L.RaiseError("no calling stack.")
- } else if caller.Fn.IsG {
- L.RaiseError("module() can not be called from GFunctions.")
- }
- L.SetFEnv(caller.Fn, tb)
-
- top := L.GetTop()
- for i := 2; i <= top; i++ {
- L.Push(L.Get(i))
- L.Push(tb)
- L.Call(1, 0)
- }
- L.Push(tb)
- return 1
-}
-
-var loopdetection = &LUserData{}
-
-func loRequire(L *LState) int {
- name := L.CheckString(1)
- loaded := L.GetField(L.Get(RegistryIndex), "_LOADED")
- lv := L.GetField(loaded, name)
- if LVAsBool(lv) {
- if lv == loopdetection {
- L.RaiseError("loop or previous error loading module: %s", name)
- }
- L.Push(lv)
- return 1
- }
- loaders, ok := L.GetField(L.Get(RegistryIndex), "_LOADERS").(*LTable)
- if !ok {
- L.RaiseError("package.loaders must be a table")
- }
- messages := []string{}
- var modasfunc LValue
- for i := 1; ; i++ {
- loader := L.RawGetInt(loaders, i)
- if loader == LNil {
- L.RaiseError("module %s not found:\n\t%s, ", name, strings.Join(messages, "\n\t"))
- }
- L.Push(loader)
- L.Push(LString(name))
- L.Call(1, 1)
- ret := L.reg.Pop()
- switch retv := ret.(type) {
- case *LFunction:
- modasfunc = retv
- goto loopbreak
- case LString:
- messages = append(messages, string(retv))
- }
- }
-loopbreak:
- L.SetField(loaded, name, loopdetection)
- L.Push(modasfunc)
- L.Push(LString(name))
- L.Call(1, 1)
- ret := L.reg.Pop()
- modv := L.GetField(loaded, name)
- if ret != LNil && modv == loopdetection {
- L.SetField(loaded, name, ret)
- L.Push(ret)
- } else if modv == loopdetection {
- L.SetField(loaded, name, LTrue)
- L.Push(LTrue)
- } else {
- L.Push(modv)
- }
- return 1
-}
-
-/* }}} */
-
-/* hidden features {{{ */
-
-func baseNewProxy(L *LState) int {
- ud := L.NewUserData()
- L.SetTop(1)
- if L.Get(1) == LTrue {
- L.SetMetatable(ud, L.NewTable())
- } else if d, ok := L.Get(1).(*LUserData); ok {
- L.SetMetatable(ud, L.GetMetatable(d))
- }
- L.Push(ud)
- return 1
-}
-
-/* }}} */
-
-//
diff --git a/vendor/github.com/yuin/gopher-lua/channellib.go b/vendor/github.com/yuin/gopher-lua/channellib.go
deleted file mode 100644
index a92bf72c..00000000
--- a/vendor/github.com/yuin/gopher-lua/channellib.go
+++ /dev/null
@@ -1,184 +0,0 @@
-package lua
-
-import (
- "reflect"
-)
-
-func checkChannel(L *LState, idx int) reflect.Value {
- ch := L.CheckChannel(idx)
- return reflect.ValueOf(ch)
-}
-
-func checkGoroutineSafe(L *LState, idx int) LValue {
- v := L.CheckAny(2)
- if !isGoroutineSafe(v) {
- L.ArgError(2, "can not send a function, userdata, thread or table that has a metatable")
- }
- return v
-}
-
-func OpenChannel(L *LState) int {
- var mod LValue
- //_, ok := L.G.builtinMts[int(LTChannel)]
- // if !ok {
- mod = L.RegisterModule(ChannelLibName, channelFuncs)
- mt := L.SetFuncs(L.NewTable(), channelMethods)
- mt.RawSetString("__index", mt)
- L.G.builtinMts[int(LTChannel)] = mt
- // }
- L.Push(mod)
- return 1
-}
-
-var channelFuncs = map[string]LGFunction{
- "make": channelMake,
- "select": channelSelect,
-}
-
-func channelMake(L *LState) int {
- buffer := L.OptInt(1, 0)
- L.Push(LChannel(make(chan LValue, buffer)))
- return 1
-}
-
-func channelSelect(L *LState) int {
- //TODO check case table size
- cases := make([]reflect.SelectCase, L.GetTop())
- top := L.GetTop()
- for i := 0; i < top; i++ {
- cas := reflect.SelectCase{
- Dir: reflect.SelectSend,
- Chan: reflect.ValueOf(nil),
- Send: reflect.ValueOf(nil),
- }
- tbl := L.CheckTable(i + 1)
- dir, ok1 := tbl.RawGetInt(1).(LString)
- if !ok1 {
- L.ArgError(i+1, "invalid select case")
- }
- switch string(dir) {
- case "<-|":
- ch, ok := tbl.RawGetInt(2).(LChannel)
- if !ok {
- L.ArgError(i+1, "invalid select case")
- }
- cas.Chan = reflect.ValueOf((chan LValue)(ch))
- v := tbl.RawGetInt(3)
- if !isGoroutineSafe(v) {
- L.ArgError(i+1, "can not send a function, userdata, thread or table that has a metatable")
- }
- cas.Send = reflect.ValueOf(v)
- case "|<-":
- ch, ok := tbl.RawGetInt(2).(LChannel)
- if !ok {
- L.ArgError(i+1, "invalid select case")
- }
- cas.Chan = reflect.ValueOf((chan LValue)(ch))
- cas.Dir = reflect.SelectRecv
- case "default":
- cas.Dir = reflect.SelectDefault
- default:
- L.ArgError(i+1, "invalid channel direction:"+string(dir))
- }
- cases[i] = cas
- }
-
- if L.ctx != nil {
- cases = append(cases, reflect.SelectCase{
- Dir: reflect.SelectRecv,
- Chan: reflect.ValueOf(L.ctx.Done()),
- Send: reflect.ValueOf(nil),
- })
- }
-
- pos, recv, rok := reflect.Select(cases)
-
- if L.ctx != nil && pos == L.GetTop() {
- return 0
- }
-
- lv := LNil
- if recv.Kind() != 0 {
- lv, _ = recv.Interface().(LValue)
- if lv == nil {
- lv = LNil
- }
- }
- tbl := L.Get(pos + 1).(*LTable)
- last := tbl.RawGetInt(tbl.Len())
- if last.Type() == LTFunction {
- L.Push(last)
- switch cases[pos].Dir {
- case reflect.SelectRecv:
- if rok {
- L.Push(LTrue)
- } else {
- L.Push(LFalse)
- }
- L.Push(lv)
- L.Call(2, 0)
- case reflect.SelectSend:
- L.Push(tbl.RawGetInt(3))
- L.Call(1, 0)
- case reflect.SelectDefault:
- L.Call(0, 0)
- }
- }
- L.Push(LNumber(pos + 1))
- L.Push(lv)
- if rok {
- L.Push(LTrue)
- } else {
- L.Push(LFalse)
- }
- return 3
-}
-
-var channelMethods = map[string]LGFunction{
- "receive": channelReceive,
- "send": channelSend,
- "close": channelClose,
-}
-
-func channelReceive(L *LState) int {
- rch := checkChannel(L, 1)
- var v reflect.Value
- var ok bool
- if L.ctx != nil {
- cases := []reflect.SelectCase{{
- Dir: reflect.SelectRecv,
- Chan: reflect.ValueOf(L.ctx.Done()),
- Send: reflect.ValueOf(nil),
- }, {
- Dir: reflect.SelectRecv,
- Chan: rch,
- Send: reflect.ValueOf(nil),
- }}
- _, v, ok = reflect.Select(cases)
- } else {
- v, ok = rch.Recv()
- }
- if ok {
- L.Push(LTrue)
- L.Push(v.Interface().(LValue))
- } else {
- L.Push(LFalse)
- L.Push(LNil)
- }
- return 2
-}
-
-func channelSend(L *LState) int {
- rch := checkChannel(L, 1)
- v := checkGoroutineSafe(L, 2)
- rch.Send(reflect.ValueOf(v))
- return 0
-}
-
-func channelClose(L *LState) int {
- rch := checkChannel(L, 1)
- rch.Close()
- return 0
-}
-
-//
diff --git a/vendor/github.com/yuin/gopher-lua/compile.go b/vendor/github.com/yuin/gopher-lua/compile.go
deleted file mode 100644
index d3c665ae..00000000
--- a/vendor/github.com/yuin/gopher-lua/compile.go
+++ /dev/null
@@ -1,1672 +0,0 @@
-package lua
-
-import (
- "fmt"
- "github.com/yuin/gopher-lua/ast"
- "math"
- "reflect"
-)
-
-/* internal constants & structs {{{ */
-
-const maxRegisters = 200
-
-type expContextType int
-
-const (
- ecGlobal expContextType = iota
- ecUpvalue
- ecLocal
- ecTable
- ecVararg
- ecMethod
- ecNone
-)
-
-const regNotDefined = opMaxArgsA + 1
-const labelNoJump = 0
-
-type expcontext struct {
- ctype expContextType
- reg int
- // varargopt >= 0: wants varargopt+1 results, i.e a = func()
- // varargopt = -1: ignore results i.e func()
- // varargopt = -2: receive all results i.e a = {func()}
- varargopt int
-}
-
-type assigncontext struct {
- ec *expcontext
- keyrk int
- valuerk int
- keyks bool
- needmove bool
-}
-
-type lblabels struct {
- t int
- f int
- e int
- b bool
-}
-
-type constLValueExpr struct {
- ast.ExprBase
-
- Value LValue
-}
-
-// }}}
-
-/* utilities {{{ */
-var _ecnone0 = &expcontext{ecNone, regNotDefined, 0}
-var _ecnonem1 = &expcontext{ecNone, regNotDefined, -1}
-var _ecnonem2 = &expcontext{ecNone, regNotDefined, -2}
-var ecfuncdef = &expcontext{ecMethod, regNotDefined, 0}
-
-func ecupdate(ec *expcontext, ctype expContextType, reg, varargopt int) {
- if ec == _ecnone0 || ec == _ecnonem1 || ec == _ecnonem2 {
- panic("can not update ec cache")
- }
- ec.ctype = ctype
- ec.reg = reg
- ec.varargopt = varargopt
-}
-
-func ecnone(varargopt int) *expcontext {
- switch varargopt {
- case 0:
- return _ecnone0
- case -1:
- return _ecnonem1
- case -2:
- return _ecnonem2
- }
- return &expcontext{ecNone, regNotDefined, varargopt}
-}
-
-func shouldmove(ec *expcontext, reg int) bool {
- return ec.ctype == ecLocal && ec.reg != regNotDefined && ec.reg != reg
-}
-
-func sline(pos ast.PositionHolder) int {
- return pos.Line()
-}
-
-func eline(pos ast.PositionHolder) int {
- return pos.LastLine()
-}
-
-func savereg(ec *expcontext, reg int) int {
- if ec.ctype != ecLocal || ec.reg == regNotDefined {
- return reg
- }
- return ec.reg
-}
-
-func raiseCompileError(context *funcContext, line int, format string, args ...interface{}) {
- msg := fmt.Sprintf(format, args...)
- panic(&CompileError{context: context, Line: line, Message: msg})
-}
-
-func isVarArgReturnExpr(expr ast.Expr) bool {
- switch ex := expr.(type) {
- case *ast.FuncCallExpr:
- return !ex.AdjustRet
- case *ast.Comma3Expr:
- return true
- }
- return false
-}
-
-func lnumberValue(expr ast.Expr) (LNumber, bool) {
- if ex, ok := expr.(*ast.NumberExpr); ok {
- lv, err := parseNumber(ex.Value)
- if err != nil {
- lv = LNumber(math.NaN())
- }
- return lv, true
- } else if ex, ok := expr.(*constLValueExpr); ok {
- return ex.Value.(LNumber), true
- }
- return 0, false
-}
-
-/* utilities }}} */
-
-type CompileError struct { // {{{
- context *funcContext
- Line int
- Message string
-}
-
-func (e *CompileError) Error() string {
- return fmt.Sprintf("compile error near line(%v) %v: %v", e.Line, e.context.Proto.SourceName, e.Message)
-} // }}}
-
-type codeStore struct { // {{{
- codes []uint32
- lines []int
- pc int
-}
-
-func (cd *codeStore) Add(inst uint32, line int) {
- if l := len(cd.codes); l <= 0 || cd.pc == l {
- cd.codes = append(cd.codes, inst)
- cd.lines = append(cd.lines, line)
- } else {
- cd.codes[cd.pc] = inst
- cd.lines[cd.pc] = line
- }
- cd.pc++
-}
-
-func (cd *codeStore) AddABC(op int, a int, b int, c int, line int) {
- cd.Add(opCreateABC(op, a, b, c), line)
-}
-
-func (cd *codeStore) AddABx(op int, a int, bx int, line int) {
- cd.Add(opCreateABx(op, a, bx), line)
-}
-
-func (cd *codeStore) AddASbx(op int, a int, sbx int, line int) {
- cd.Add(opCreateASbx(op, a, sbx), line)
-}
-
-func (cd *codeStore) PropagateKMV(top int, save *int, reg *int, inc int) {
- lastinst := cd.Last()
- if opGetArgA(lastinst) >= top {
- switch opGetOpCode(lastinst) {
- case OP_LOADK:
- cindex := opGetArgBx(lastinst)
- if cindex <= opMaxIndexRk {
- cd.Pop()
- *save = opRkAsk(cindex)
- return
- }
- case OP_MOVE:
- cd.Pop()
- *save = opGetArgB(lastinst)
- return
- }
- }
- *save = *reg
- *reg = *reg + inc
-}
-
-func (cd *codeStore) PropagateMV(top int, save *int, reg *int, inc int) {
- lastinst := cd.Last()
- if opGetArgA(lastinst) >= top {
- switch opGetOpCode(lastinst) {
- case OP_MOVE:
- cd.Pop()
- *save = opGetArgB(lastinst)
- return
- }
- }
- *save = *reg
- *reg = *reg + inc
-}
-
-func (cd *codeStore) AddLoadNil(a, b, line int) {
- last := cd.Last()
- if opGetOpCode(last) == OP_LOADNIL && (opGetArgA(last)+opGetArgB(last)) == a {
- cd.SetB(cd.LastPC(), b)
- } else {
- cd.AddABC(OP_LOADNIL, a, b, 0, line)
- }
-}
-
-func (cd *codeStore) SetOpCode(pc int, v int) {
- opSetOpCode(&cd.codes[pc], v)
-}
-
-func (cd *codeStore) SetA(pc int, v int) {
- opSetArgA(&cd.codes[pc], v)
-}
-
-func (cd *codeStore) SetB(pc int, v int) {
- opSetArgB(&cd.codes[pc], v)
-}
-
-func (cd *codeStore) SetC(pc int, v int) {
- opSetArgC(&cd.codes[pc], v)
-}
-
-func (cd *codeStore) SetBx(pc int, v int) {
- opSetArgBx(&cd.codes[pc], v)
-}
-
-func (cd *codeStore) SetSbx(pc int, v int) {
- opSetArgSbx(&cd.codes[pc], v)
-}
-
-func (cd *codeStore) At(pc int) uint32 {
- return cd.codes[pc]
-}
-
-func (cd *codeStore) List() []uint32 {
- return cd.codes[:cd.pc]
-}
-
-func (cd *codeStore) PosList() []int {
- return cd.lines[:cd.pc]
-}
-
-func (cd *codeStore) LastPC() int {
- return cd.pc - 1
-}
-
-func (cd *codeStore) Last() uint32 {
- if cd.pc == 0 {
- return opInvalidInstruction
- }
- return cd.codes[cd.pc-1]
-}
-
-func (cd *codeStore) Pop() {
- cd.pc--
-} /* }}} Code */
-
-/* {{{ VarNamePool */
-
-type varNamePoolValue struct {
- Index int
- Name string
-}
-
-type varNamePool struct {
- names []string
- offset int
-}
-
-func newVarNamePool(offset int) *varNamePool {
- return &varNamePool{make([]string, 0, 16), offset}
-}
-
-func (vp *varNamePool) Names() []string {
- return vp.names
-}
-
-func (vp *varNamePool) List() []varNamePoolValue {
- result := make([]varNamePoolValue, len(vp.names), len(vp.names))
- for i, name := range vp.names {
- result[i].Index = i + vp.offset
- result[i].Name = name
- }
- return result
-}
-
-func (vp *varNamePool) LastIndex() int {
- return vp.offset + len(vp.names)
-}
-
-func (vp *varNamePool) Find(name string) int {
- for i := len(vp.names) - 1; i >= 0; i-- {
- if vp.names[i] == name {
- return i + vp.offset
- }
- }
- return -1
-}
-
-func (vp *varNamePool) RegisterUnique(name string) int {
- index := vp.Find(name)
- if index < 0 {
- return vp.Register(name)
- }
- return index
-}
-
-func (vp *varNamePool) Register(name string) int {
- vp.names = append(vp.names, name)
- return len(vp.names) - 1 + vp.offset
-}
-
-/* }}} VarNamePool */
-
-/* FuncContext {{{ */
-
-type codeBlock struct {
- LocalVars *varNamePool
- BreakLabel int
- Parent *codeBlock
- RefUpvalue bool
- LineStart int
- LastLine int
-}
-
-func newCodeBlock(localvars *varNamePool, blabel int, parent *codeBlock, pos ast.PositionHolder) *codeBlock {
- bl := &codeBlock{localvars, blabel, parent, false, 0, 0}
- if pos != nil {
- bl.LineStart = pos.Line()
- bl.LastLine = pos.LastLine()
- }
- return bl
-}
-
-type funcContext struct {
- Proto *FunctionProto
- Code *codeStore
- Parent *funcContext
- Upvalues *varNamePool
- Block *codeBlock
- Blocks []*codeBlock
- regTop int
- labelId int
- labelPc map[int]int
-}
-
-func newFuncContext(sourcename string, parent *funcContext) *funcContext {
- fc := &funcContext{
- Proto: newFunctionProto(sourcename),
- Code: &codeStore{make([]uint32, 0, 1024), make([]int, 0, 1024), 0},
- Parent: parent,
- Upvalues: newVarNamePool(0),
- Block: newCodeBlock(newVarNamePool(0), labelNoJump, nil, nil),
- regTop: 0,
- labelId: 1,
- labelPc: map[int]int{},
- }
- fc.Blocks = []*codeBlock{fc.Block}
- return fc
-}
-
-func (fc *funcContext) NewLabel() int {
- ret := fc.labelId
- fc.labelId++
- return ret
-}
-
-func (fc *funcContext) SetLabelPc(label int, pc int) {
- fc.labelPc[label] = pc
-}
-
-func (fc *funcContext) GetLabelPc(label int) int {
- return fc.labelPc[label]
-}
-
-func (fc *funcContext) ConstIndex(value LValue) int {
- ctype := value.Type()
- for i, lv := range fc.Proto.Constants {
- if lv.Type() == ctype && lv == value {
- return i
- }
- }
- fc.Proto.Constants = append(fc.Proto.Constants, value)
- v := len(fc.Proto.Constants) - 1
- if v > opMaxArgBx {
- raiseCompileError(fc, fc.Proto.LineDefined, "too many constants")
- }
- return v
-}
-
-func (fc *funcContext) RegisterLocalVar(name string) int {
- ret := fc.Block.LocalVars.Register(name)
- fc.Proto.DbgLocals = append(fc.Proto.DbgLocals, &DbgLocalInfo{Name: name, StartPc: fc.Code.LastPC() + 1})
- fc.SetRegTop(fc.RegTop() + 1)
- return ret
-}
-
-func (fc *funcContext) FindLocalVarAndBlock(name string) (int, *codeBlock) {
- for block := fc.Block; block != nil; block = block.Parent {
- if index := block.LocalVars.Find(name); index > -1 {
- return index, block
- }
- }
- return -1, nil
-}
-
-func (fc *funcContext) FindLocalVar(name string) int {
- idx, _ := fc.FindLocalVarAndBlock(name)
- return idx
-}
-
-func (fc *funcContext) LocalVars() []varNamePoolValue {
- result := make([]varNamePoolValue, 0, 32)
- for _, block := range fc.Blocks {
- result = append(result, block.LocalVars.List()...)
- }
- return result
-}
-
-func (fc *funcContext) EnterBlock(blabel int, pos ast.PositionHolder) {
- fc.Block = newCodeBlock(newVarNamePool(fc.RegTop()), blabel, fc.Block, pos)
- fc.Blocks = append(fc.Blocks, fc.Block)
-}
-
-func (fc *funcContext) CloseUpvalues() int {
- n := -1
- if fc.Block.RefUpvalue {
- n = fc.Block.Parent.LocalVars.LastIndex()
- fc.Code.AddABC(OP_CLOSE, n, 0, 0, fc.Block.LastLine)
- }
- return n
-}
-
-func (fc *funcContext) LeaveBlock() int {
- closed := fc.CloseUpvalues()
- fc.EndScope()
- fc.Block = fc.Block.Parent
- fc.SetRegTop(fc.Block.LocalVars.LastIndex())
- return closed
-}
-
-func (fc *funcContext) EndScope() {
- for _, vr := range fc.Block.LocalVars.List() {
- fc.Proto.DbgLocals[vr.Index].EndPc = fc.Code.LastPC()
- }
-}
-
-func (fc *funcContext) SetRegTop(top int) {
- if top > maxRegisters {
- raiseCompileError(fc, fc.Proto.LineDefined, "too many local variables")
- }
- fc.regTop = top
-}
-
-func (fc *funcContext) RegTop() int {
- return fc.regTop
-}
-
-/* FuncContext }}} */
-
-func compileChunk(context *funcContext, chunk []ast.Stmt) { // {{{
- for _, stmt := range chunk {
- compileStmt(context, stmt)
- }
-} // }}}
-
-func compileBlock(context *funcContext, chunk []ast.Stmt) { // {{{
- if len(chunk) == 0 {
- return
- }
- ph := &ast.Node{}
- ph.SetLine(sline(chunk[0]))
- ph.SetLastLine(eline(chunk[len(chunk)-1]))
- context.EnterBlock(labelNoJump, ph)
- for _, stmt := range chunk {
- compileStmt(context, stmt)
- }
- context.LeaveBlock()
-} // }}}
-
-func compileStmt(context *funcContext, stmt ast.Stmt) { // {{{
- switch st := stmt.(type) {
- case *ast.AssignStmt:
- compileAssignStmt(context, st)
- case *ast.LocalAssignStmt:
- compileLocalAssignStmt(context, st)
- case *ast.FuncCallStmt:
- compileFuncCallExpr(context, context.RegTop(), st.Expr.(*ast.FuncCallExpr), ecnone(-1))
- case *ast.DoBlockStmt:
- context.EnterBlock(labelNoJump, st)
- compileChunk(context, st.Stmts)
- context.LeaveBlock()
- case *ast.WhileStmt:
- compileWhileStmt(context, st)
- case *ast.RepeatStmt:
- compileRepeatStmt(context, st)
- case *ast.FuncDefStmt:
- compileFuncDefStmt(context, st)
- case *ast.ReturnStmt:
- compileReturnStmt(context, st)
- case *ast.IfStmt:
- compileIfStmt(context, st)
- case *ast.BreakStmt:
- compileBreakStmt(context, st)
- case *ast.NumberForStmt:
- compileNumberForStmt(context, st)
- case *ast.GenericForStmt:
- compileGenericForStmt(context, st)
- }
-} // }}}
-
-func compileAssignStmtLeft(context *funcContext, stmt *ast.AssignStmt) (int, []*assigncontext) { // {{{
- reg := context.RegTop()
- acs := make([]*assigncontext, 0, len(stmt.Lhs))
- for i, lhs := range stmt.Lhs {
- islast := i == len(stmt.Lhs)-1
- switch st := lhs.(type) {
- case *ast.IdentExpr:
- identtype := getIdentRefType(context, context, st)
- ec := &expcontext{identtype, regNotDefined, 0}
- switch identtype {
- case ecGlobal:
- context.ConstIndex(LString(st.Value))
- case ecUpvalue:
- context.Upvalues.RegisterUnique(st.Value)
- case ecLocal:
- if islast {
- ec.reg = context.FindLocalVar(st.Value)
- }
- }
- acs = append(acs, &assigncontext{ec, 0, 0, false, false})
- case *ast.AttrGetExpr:
- ac := &assigncontext{&expcontext{ecTable, regNotDefined, 0}, 0, 0, false, false}
- compileExprWithKMVPropagation(context, st.Object, ®, &ac.ec.reg)
- ac.keyrk = reg
- reg += compileExpr(context, reg, st.Key, ecnone(0))
- if _, ok := st.Key.(*ast.StringExpr); ok {
- ac.keyks = true
- }
- acs = append(acs, ac)
-
- default:
- panic("invalid left expression.")
- }
- }
- return reg, acs
-} // }}}
-
-func compileAssignStmtRight(context *funcContext, stmt *ast.AssignStmt, reg int, acs []*assigncontext) (int, []*assigncontext) { // {{{
- lennames := len(stmt.Lhs)
- lenexprs := len(stmt.Rhs)
- namesassigned := 0
-
- for namesassigned < lennames {
- ac := acs[namesassigned]
- ec := ac.ec
- var expr ast.Expr = nil
- if namesassigned >= lenexprs {
- expr = &ast.NilExpr{}
- expr.SetLine(sline(stmt.Lhs[namesassigned]))
- expr.SetLastLine(eline(stmt.Lhs[namesassigned]))
- } else if isVarArgReturnExpr(stmt.Rhs[namesassigned]) && (lenexprs-namesassigned-1) <= 0 {
- varargopt := lennames - namesassigned - 1
- regstart := reg
- reginc := compileExpr(context, reg, stmt.Rhs[namesassigned], ecnone(varargopt))
- reg += reginc
- for i := namesassigned; i < namesassigned+int(reginc); i++ {
- acs[i].needmove = true
- if acs[i].ec.ctype == ecTable {
- acs[i].valuerk = regstart + (i - namesassigned)
- }
- }
- namesassigned = lennames
- continue
- }
-
- if expr == nil {
- expr = stmt.Rhs[namesassigned]
- }
- idx := reg
- reginc := compileExpr(context, reg, expr, ec)
- if ec.ctype == ecTable {
- if _, ok := expr.(*ast.LogicalOpExpr); !ok {
- context.Code.PropagateKMV(context.RegTop(), &ac.valuerk, ®, reginc)
- } else {
- ac.valuerk = idx
- reg += reginc
- }
- } else {
- ac.needmove = reginc != 0
- reg += reginc
- }
- namesassigned += 1
- }
-
- rightreg := reg - 1
-
- // extra right exprs
- for i := namesassigned; i < lenexprs; i++ {
- varargopt := -1
- if i != lenexprs-1 {
- varargopt = 0
- }
- reg += compileExpr(context, reg, stmt.Rhs[i], ecnone(varargopt))
- }
- return rightreg, acs
-} // }}}
-
-func compileAssignStmt(context *funcContext, stmt *ast.AssignStmt) { // {{{
- code := context.Code
- lennames := len(stmt.Lhs)
- reg, acs := compileAssignStmtLeft(context, stmt)
- reg, acs = compileAssignStmtRight(context, stmt, reg, acs)
-
- for i := lennames - 1; i >= 0; i-- {
- ex := stmt.Lhs[i]
- switch acs[i].ec.ctype {
- case ecLocal:
- if acs[i].needmove {
- code.AddABC(OP_MOVE, context.FindLocalVar(ex.(*ast.IdentExpr).Value), reg, 0, sline(ex))
- reg -= 1
- }
- case ecGlobal:
- code.AddABx(OP_SETGLOBAL, reg, context.ConstIndex(LString(ex.(*ast.IdentExpr).Value)), sline(ex))
- reg -= 1
- case ecUpvalue:
- code.AddABC(OP_SETUPVAL, reg, context.Upvalues.RegisterUnique(ex.(*ast.IdentExpr).Value), 0, sline(ex))
- reg -= 1
- case ecTable:
- opcode := OP_SETTABLE
- if acs[i].keyks {
- opcode = OP_SETTABLEKS
- }
- code.AddABC(opcode, acs[i].ec.reg, acs[i].keyrk, acs[i].valuerk, sline(ex))
- if !opIsK(acs[i].valuerk) {
- reg -= 1
- }
- }
- }
-} // }}}
-
-func compileRegAssignment(context *funcContext, names []string, exprs []ast.Expr, reg int, nvars int, line int) { // {{{
- lennames := len(names)
- lenexprs := len(exprs)
- namesassigned := 0
- ec := &expcontext{}
-
- for namesassigned < lennames && namesassigned < lenexprs {
- if isVarArgReturnExpr(exprs[namesassigned]) && (lenexprs-namesassigned-1) <= 0 {
-
- varargopt := nvars - namesassigned
- ecupdate(ec, ecVararg, reg, varargopt-1)
- compileExpr(context, reg, exprs[namesassigned], ec)
- reg += varargopt
- namesassigned = lennames
- } else {
- ecupdate(ec, ecLocal, reg, 0)
- compileExpr(context, reg, exprs[namesassigned], ec)
- reg += 1
- namesassigned += 1
- }
- }
-
- // extra left names
- if lennames > namesassigned {
- restleft := lennames - namesassigned - 1
- context.Code.AddLoadNil(reg, reg+restleft, line)
- reg += restleft
- }
-
- // extra right exprs
- for i := namesassigned; i < lenexprs; i++ {
- varargopt := -1
- if i != lenexprs-1 {
- varargopt = 0
- }
- ecupdate(ec, ecNone, reg, varargopt)
- reg += compileExpr(context, reg, exprs[i], ec)
- }
-} // }}}
-
-func compileLocalAssignStmt(context *funcContext, stmt *ast.LocalAssignStmt) { // {{{
- reg := context.RegTop()
- if len(stmt.Names) == 1 && len(stmt.Exprs) == 1 {
- if _, ok := stmt.Exprs[0].(*ast.FunctionExpr); ok {
- context.RegisterLocalVar(stmt.Names[0])
- compileRegAssignment(context, stmt.Names, stmt.Exprs, reg, len(stmt.Names), sline(stmt))
- return
- }
- }
-
- compileRegAssignment(context, stmt.Names, stmt.Exprs, reg, len(stmt.Names), sline(stmt))
- for _, name := range stmt.Names {
- context.RegisterLocalVar(name)
- }
-} // }}}
-
-func compileReturnStmt(context *funcContext, stmt *ast.ReturnStmt) { // {{{
- lenexprs := len(stmt.Exprs)
- code := context.Code
- reg := context.RegTop()
- a := reg
- lastisvaarg := false
-
- if lenexprs == 1 {
- switch ex := stmt.Exprs[0].(type) {
- case *ast.IdentExpr:
- if idx := context.FindLocalVar(ex.Value); idx > -1 {
- code.AddABC(OP_RETURN, idx, 2, 0, sline(stmt))
- return
- }
- case *ast.FuncCallExpr:
- reg += compileExpr(context, reg, ex, ecnone(-2))
- code.SetOpCode(code.LastPC(), OP_TAILCALL)
- code.AddABC(OP_RETURN, a, 0, 0, sline(stmt))
- return
- }
- }
-
- for i, expr := range stmt.Exprs {
- if i == lenexprs-1 && isVarArgReturnExpr(expr) {
- compileExpr(context, reg, expr, ecnone(-2))
- lastisvaarg = true
- } else {
- reg += compileExpr(context, reg, expr, ecnone(0))
- }
- }
- count := reg - a + 1
- if lastisvaarg {
- count = 0
- }
- context.Code.AddABC(OP_RETURN, a, count, 0, sline(stmt))
-} // }}}
-
-func compileIfStmt(context *funcContext, stmt *ast.IfStmt) { // {{{
- thenlabel := context.NewLabel()
- elselabel := context.NewLabel()
- endlabel := context.NewLabel()
-
- compileBranchCondition(context, context.RegTop(), stmt.Condition, thenlabel, elselabel, false)
- context.SetLabelPc(thenlabel, context.Code.LastPC())
- compileBlock(context, stmt.Then)
- if len(stmt.Else) > 0 {
- context.Code.AddASbx(OP_JMP, 0, endlabel, sline(stmt))
- }
- context.SetLabelPc(elselabel, context.Code.LastPC())
- if len(stmt.Else) > 0 {
- compileBlock(context, stmt.Else)
- context.SetLabelPc(endlabel, context.Code.LastPC())
- }
-
-} // }}}
-
-func compileBranchCondition(context *funcContext, reg int, expr ast.Expr, thenlabel, elselabel int, hasnextcond bool) { // {{{
- // TODO folding constants?
- code := context.Code
- flip := 0
- jumplabel := elselabel
- if hasnextcond {
- flip = 1
- jumplabel = thenlabel
- }
-
- switch ex := expr.(type) {
- case *ast.FalseExpr, *ast.NilExpr:
- if !hasnextcond {
- code.AddASbx(OP_JMP, 0, elselabel, sline(expr))
- return
- }
- case *ast.TrueExpr, *ast.NumberExpr, *ast.StringExpr:
- if !hasnextcond {
- return
- }
- case *ast.UnaryNotOpExpr:
- compileBranchCondition(context, reg, ex.Expr, elselabel, thenlabel, !hasnextcond)
- return
- case *ast.LogicalOpExpr:
- switch ex.Operator {
- case "and":
- nextcondlabel := context.NewLabel()
- compileBranchCondition(context, reg, ex.Lhs, nextcondlabel, elselabel, false)
- context.SetLabelPc(nextcondlabel, context.Code.LastPC())
- compileBranchCondition(context, reg, ex.Rhs, thenlabel, elselabel, hasnextcond)
- case "or":
- nextcondlabel := context.NewLabel()
- compileBranchCondition(context, reg, ex.Lhs, thenlabel, nextcondlabel, true)
- context.SetLabelPc(nextcondlabel, context.Code.LastPC())
- compileBranchCondition(context, reg, ex.Rhs, thenlabel, elselabel, hasnextcond)
- }
- return
- case *ast.RelationalOpExpr:
- compileRelationalOpExprAux(context, reg, ex, flip, jumplabel)
- return
- }
-
- a := reg
- compileExprWithMVPropagation(context, expr, ®, &a)
- code.AddABC(OP_TEST, a, 0, 0^flip, sline(expr))
- code.AddASbx(OP_JMP, 0, jumplabel, sline(expr))
-} // }}}
-
-func compileWhileStmt(context *funcContext, stmt *ast.WhileStmt) { // {{{
- thenlabel := context.NewLabel()
- elselabel := context.NewLabel()
- condlabel := context.NewLabel()
-
- context.SetLabelPc(condlabel, context.Code.LastPC())
- compileBranchCondition(context, context.RegTop(), stmt.Condition, thenlabel, elselabel, false)
- context.SetLabelPc(thenlabel, context.Code.LastPC())
- context.EnterBlock(elselabel, stmt)
- compileChunk(context, stmt.Stmts)
- context.CloseUpvalues()
- context.Code.AddASbx(OP_JMP, 0, condlabel, eline(stmt))
- context.LeaveBlock()
- context.SetLabelPc(elselabel, context.Code.LastPC())
-} // }}}
-
-func compileRepeatStmt(context *funcContext, stmt *ast.RepeatStmt) { // {{{
- initlabel := context.NewLabel()
- thenlabel := context.NewLabel()
- elselabel := context.NewLabel()
-
- context.SetLabelPc(initlabel, context.Code.LastPC())
- context.SetLabelPc(elselabel, context.Code.LastPC())
- context.EnterBlock(thenlabel, stmt)
- compileChunk(context, stmt.Stmts)
- compileBranchCondition(context, context.RegTop(), stmt.Condition, thenlabel, elselabel, false)
-
- context.SetLabelPc(thenlabel, context.Code.LastPC())
- n := context.LeaveBlock()
-
- if n > -1 {
- label := context.NewLabel()
- context.Code.AddASbx(OP_JMP, 0, label, eline(stmt))
- context.SetLabelPc(elselabel, context.Code.LastPC())
- context.Code.AddABC(OP_CLOSE, n, 0, 0, eline(stmt))
- context.Code.AddASbx(OP_JMP, 0, initlabel, eline(stmt))
- context.SetLabelPc(label, context.Code.LastPC())
- }
-
-} // }}}
-
-func compileBreakStmt(context *funcContext, stmt *ast.BreakStmt) { // {{{
- for block := context.Block; block != nil; block = block.Parent {
- if label := block.BreakLabel; label != labelNoJump {
- if block.RefUpvalue {
- context.Code.AddABC(OP_CLOSE, block.Parent.LocalVars.LastIndex(), 0, 0, sline(stmt))
- }
- context.Code.AddASbx(OP_JMP, 0, label, sline(stmt))
- return
- }
- }
- raiseCompileError(context, sline(stmt), "no loop to break")
-} // }}}
-
-func compileFuncDefStmt(context *funcContext, stmt *ast.FuncDefStmt) { // {{{
- if stmt.Name.Func == nil {
- reg := context.RegTop()
- var treg, kreg int
- compileExprWithKMVPropagation(context, stmt.Name.Receiver, ®, &treg)
- kreg = loadRk(context, ®, stmt.Func, LString(stmt.Name.Method))
- compileExpr(context, reg, stmt.Func, ecfuncdef)
- context.Code.AddABC(OP_SETTABLE, treg, kreg, reg, sline(stmt.Name.Receiver))
- } else {
- astmt := &ast.AssignStmt{Lhs: []ast.Expr{stmt.Name.Func}, Rhs: []ast.Expr{stmt.Func}}
- astmt.SetLine(sline(stmt.Func))
- astmt.SetLastLine(eline(stmt.Func))
- compileAssignStmt(context, astmt)
- }
-} // }}}
-
-func compileNumberForStmt(context *funcContext, stmt *ast.NumberForStmt) { // {{{
- code := context.Code
- endlabel := context.NewLabel()
- ec := &expcontext{}
-
- context.EnterBlock(endlabel, stmt)
- reg := context.RegTop()
- rindex := context.RegisterLocalVar("(for index)")
- ecupdate(ec, ecLocal, rindex, 0)
- compileExpr(context, reg, stmt.Init, ec)
-
- reg = context.RegTop()
- rlimit := context.RegisterLocalVar("(for limit)")
- ecupdate(ec, ecLocal, rlimit, 0)
- compileExpr(context, reg, stmt.Limit, ec)
-
- reg = context.RegTop()
- rstep := context.RegisterLocalVar("(for step)")
- if stmt.Step == nil {
- stmt.Step = &ast.NumberExpr{Value: "1"}
- stmt.Step.SetLine(sline(stmt.Init))
- }
- ecupdate(ec, ecLocal, rstep, 0)
- compileExpr(context, reg, stmt.Step, ec)
-
- code.AddASbx(OP_FORPREP, rindex, 0, sline(stmt))
-
- context.RegisterLocalVar(stmt.Name)
-
- bodypc := code.LastPC()
- compileChunk(context, stmt.Stmts)
-
- context.LeaveBlock()
-
- flpc := code.LastPC()
- code.AddASbx(OP_FORLOOP, rindex, bodypc-(flpc+1), sline(stmt))
-
- context.SetLabelPc(endlabel, code.LastPC())
- code.SetSbx(bodypc, flpc-bodypc)
-
-} // }}}
-
-func compileGenericForStmt(context *funcContext, stmt *ast.GenericForStmt) { // {{{
- code := context.Code
- endlabel := context.NewLabel()
- bodylabel := context.NewLabel()
- fllabel := context.NewLabel()
- nnames := len(stmt.Names)
-
- context.EnterBlock(endlabel, stmt)
- rgen := context.RegisterLocalVar("(for generator)")
- context.RegisterLocalVar("(for state)")
- context.RegisterLocalVar("(for control)")
-
- compileRegAssignment(context, stmt.Names, stmt.Exprs, context.RegTop()-3, 3, sline(stmt))
-
- code.AddASbx(OP_JMP, 0, fllabel, sline(stmt))
-
- for _, name := range stmt.Names {
- context.RegisterLocalVar(name)
- }
-
- context.SetLabelPc(bodylabel, code.LastPC())
- compileChunk(context, stmt.Stmts)
-
- context.LeaveBlock()
-
- context.SetLabelPc(fllabel, code.LastPC())
- code.AddABC(OP_TFORLOOP, rgen, 0, nnames, sline(stmt))
- code.AddASbx(OP_JMP, 0, bodylabel, sline(stmt))
-
- context.SetLabelPc(endlabel, code.LastPC())
-} // }}}
-
-func compileExpr(context *funcContext, reg int, expr ast.Expr, ec *expcontext) int { // {{{
- code := context.Code
- sreg := savereg(ec, reg)
- sused := 1
- if sreg < reg {
- sused = 0
- }
-
- switch ex := expr.(type) {
- case *ast.StringExpr:
- code.AddABx(OP_LOADK, sreg, context.ConstIndex(LString(ex.Value)), sline(ex))
- return sused
- case *ast.NumberExpr:
- num, err := parseNumber(ex.Value)
- if err != nil {
- num = LNumber(math.NaN())
- }
- code.AddABx(OP_LOADK, sreg, context.ConstIndex(num), sline(ex))
- return sused
- case *constLValueExpr:
- code.AddABx(OP_LOADK, sreg, context.ConstIndex(ex.Value), sline(ex))
- return sused
- case *ast.NilExpr:
- code.AddLoadNil(sreg, sreg, sline(ex))
- return sused
- case *ast.FalseExpr:
- code.AddABC(OP_LOADBOOL, sreg, 0, 0, sline(ex))
- return sused
- case *ast.TrueExpr:
- code.AddABC(OP_LOADBOOL, sreg, 1, 0, sline(ex))
- return sused
- case *ast.IdentExpr:
- switch getIdentRefType(context, context, ex) {
- case ecGlobal:
- code.AddABx(OP_GETGLOBAL, sreg, context.ConstIndex(LString(ex.Value)), sline(ex))
- case ecUpvalue:
- code.AddABC(OP_GETUPVAL, sreg, context.Upvalues.RegisterUnique(ex.Value), 0, sline(ex))
- case ecLocal:
- b := context.FindLocalVar(ex.Value)
- code.AddABC(OP_MOVE, sreg, b, 0, sline(ex))
- }
- return sused
- case *ast.Comma3Expr:
- if context.Proto.IsVarArg == 0 {
- raiseCompileError(context, sline(ex), "cannot use '...' outside a vararg function")
- }
- context.Proto.IsVarArg &= ^VarArgNeedsArg
- code.AddABC(OP_VARARG, sreg, 2+ec.varargopt, 0, sline(ex))
- if context.RegTop() > (sreg+2+ec.varargopt) || ec.varargopt < -1 {
- return 0
- }
- return (sreg + 1 + ec.varargopt) - reg
- case *ast.AttrGetExpr:
- a := sreg
- b := reg
- compileExprWithMVPropagation(context, ex.Object, ®, &b)
- c := reg
- compileExprWithKMVPropagation(context, ex.Key, ®, &c)
- opcode := OP_GETTABLE
- if _, ok := ex.Key.(*ast.StringExpr); ok {
- opcode = OP_GETTABLEKS
- }
- code.AddABC(opcode, a, b, c, sline(ex))
- return sused
- case *ast.TableExpr:
- compileTableExpr(context, reg, ex, ec)
- return 1
- case *ast.ArithmeticOpExpr:
- compileArithmeticOpExpr(context, reg, ex, ec)
- return sused
- case *ast.StringConcatOpExpr:
- compileStringConcatOpExpr(context, reg, ex, ec)
- return sused
- case *ast.UnaryMinusOpExpr, *ast.UnaryNotOpExpr, *ast.UnaryLenOpExpr:
- compileUnaryOpExpr(context, reg, ex, ec)
- return sused
- case *ast.RelationalOpExpr:
- compileRelationalOpExpr(context, reg, ex, ec)
- return sused
- case *ast.LogicalOpExpr:
- compileLogicalOpExpr(context, reg, ex, ec)
- return sused
- case *ast.FuncCallExpr:
- return compileFuncCallExpr(context, reg, ex, ec)
- case *ast.FunctionExpr:
- childcontext := newFuncContext(context.Proto.SourceName, context)
- compileFunctionExpr(childcontext, ex, ec)
- protono := len(context.Proto.FunctionPrototypes)
- context.Proto.FunctionPrototypes = append(context.Proto.FunctionPrototypes, childcontext.Proto)
- code.AddABx(OP_CLOSURE, sreg, protono, sline(ex))
- for _, upvalue := range childcontext.Upvalues.List() {
- localidx, block := context.FindLocalVarAndBlock(upvalue.Name)
- if localidx > -1 {
- code.AddABC(OP_MOVE, 0, localidx, 0, sline(ex))
- block.RefUpvalue = true
- } else {
- upvalueidx := context.Upvalues.Find(upvalue.Name)
- if upvalueidx < 0 {
- upvalueidx = context.Upvalues.RegisterUnique(upvalue.Name)
- }
- code.AddABC(OP_GETUPVAL, 0, upvalueidx, 0, sline(ex))
- }
- }
- return sused
- default:
- panic(fmt.Sprintf("expr %v not implemented.", reflect.TypeOf(ex).Elem().Name()))
- }
-
-} // }}}
-
-func compileExprWithPropagation(context *funcContext, expr ast.Expr, reg *int, save *int, propergator func(int, *int, *int, int)) { // {{{
- reginc := compileExpr(context, *reg, expr, ecnone(0))
- if _, ok := expr.(*ast.LogicalOpExpr); ok {
- *save = *reg
- *reg = *reg + reginc
- } else {
- propergator(context.RegTop(), save, reg, reginc)
- }
-} // }}}
-
-func compileExprWithKMVPropagation(context *funcContext, expr ast.Expr, reg *int, save *int) { // {{{
- compileExprWithPropagation(context, expr, reg, save, context.Code.PropagateKMV)
-} // }}}
-
-func compileExprWithMVPropagation(context *funcContext, expr ast.Expr, reg *int, save *int) { // {{{
- compileExprWithPropagation(context, expr, reg, save, context.Code.PropagateMV)
-} // }}}
-
-func constFold(exp ast.Expr) ast.Expr { // {{{
- switch expr := exp.(type) {
- case *ast.ArithmeticOpExpr:
- lvalue, lisconst := lnumberValue(constFold(expr.Lhs))
- rvalue, risconst := lnumberValue(constFold(expr.Rhs))
- if lisconst && risconst {
- switch expr.Operator {
- case "+":
- return &constLValueExpr{Value: lvalue + rvalue}
- case "-":
- return &constLValueExpr{Value: lvalue - rvalue}
- case "*":
- return &constLValueExpr{Value: lvalue * rvalue}
- case "/":
- return &constLValueExpr{Value: lvalue / rvalue}
- case "%":
- return &constLValueExpr{Value: luaModulo(lvalue, rvalue)}
- case "^":
- return &constLValueExpr{Value: LNumber(math.Pow(float64(lvalue), float64(rvalue)))}
- default:
- panic(fmt.Sprintf("unknown binop: %v", expr.Operator))
- }
- } else {
- return expr
- }
- case *ast.UnaryMinusOpExpr:
- expr.Expr = constFold(expr.Expr)
- if value, ok := lnumberValue(expr.Expr); ok {
- return &constLValueExpr{Value: LNumber(-value)}
- }
- return expr
- default:
-
- return exp
- }
-} // }}}
-
-func compileFunctionExpr(context *funcContext, funcexpr *ast.FunctionExpr, ec *expcontext) { // {{{
- context.Proto.LineDefined = sline(funcexpr)
- context.Proto.LastLineDefined = eline(funcexpr)
- if len(funcexpr.ParList.Names) > maxRegisters {
- raiseCompileError(context, context.Proto.LineDefined, "register overflow")
- }
- context.Proto.NumParameters = uint8(len(funcexpr.ParList.Names))
- if ec.ctype == ecMethod {
- context.Proto.NumParameters += 1
- context.RegisterLocalVar("self")
- }
- for _, name := range funcexpr.ParList.Names {
- context.RegisterLocalVar(name)
- }
- if funcexpr.ParList.HasVargs {
- if CompatVarArg {
- context.Proto.IsVarArg = VarArgHasArg | VarArgNeedsArg
- if context.Parent != nil {
- context.RegisterLocalVar("arg")
- }
- }
- context.Proto.IsVarArg |= VarArgIsVarArg
- }
-
- compileChunk(context, funcexpr.Stmts)
-
- context.Code.AddABC(OP_RETURN, 0, 1, 0, eline(funcexpr))
- context.EndScope()
- context.Proto.Code = context.Code.List()
- context.Proto.DbgSourcePositions = context.Code.PosList()
- context.Proto.DbgUpvalues = context.Upvalues.Names()
- context.Proto.NumUpvalues = uint8(len(context.Proto.DbgUpvalues))
- for _, clv := range context.Proto.Constants {
- sv := ""
- if slv, ok := clv.(LString); ok {
- sv = string(slv)
- }
- context.Proto.stringConstants = append(context.Proto.stringConstants, sv)
- }
- patchCode(context)
-} // }}}
-
-func compileTableExpr(context *funcContext, reg int, ex *ast.TableExpr, ec *expcontext) { // {{{
- code := context.Code
- /*
- tablereg := savereg(ec, reg)
- if tablereg == reg {
- reg += 1
- }
- */
- tablereg := reg
- reg++
- code.AddABC(OP_NEWTABLE, tablereg, 0, 0, sline(ex))
- tablepc := code.LastPC()
- regbase := reg
-
- arraycount := 0
- lastvararg := false
- for i, field := range ex.Fields {
- islast := i == len(ex.Fields)-1
- if field.Key == nil {
- if islast && isVarArgReturnExpr(field.Value) {
- reg += compileExpr(context, reg, field.Value, ecnone(-2))
- lastvararg = true
- } else {
- reg += compileExpr(context, reg, field.Value, ecnone(0))
- arraycount += 1
- }
- } else {
- regorg := reg
- b := reg
- compileExprWithKMVPropagation(context, field.Key, ®, &b)
- c := reg
- compileExprWithKMVPropagation(context, field.Value, ®, &c)
- opcode := OP_SETTABLE
- if _, ok := field.Key.(*ast.StringExpr); ok {
- opcode = OP_SETTABLEKS
- }
- code.AddABC(opcode, tablereg, b, c, sline(ex))
- reg = regorg
- }
- flush := arraycount % FieldsPerFlush
- if (arraycount != 0 && (flush == 0 || islast)) || lastvararg {
- reg = regbase
- num := flush
- if num == 0 {
- num = FieldsPerFlush
- }
- c := (arraycount-1)/FieldsPerFlush + 1
- b := num
- if islast && isVarArgReturnExpr(field.Value) {
- b = 0
- }
- line := field.Value
- if field.Key != nil {
- line = field.Key
- }
- if c > 511 {
- c = 0
- }
- code.AddABC(OP_SETLIST, tablereg, b, c, sline(line))
- if c == 0 {
- code.Add(uint32(c), sline(line))
- }
- }
- }
- code.SetB(tablepc, int2Fb(arraycount))
- code.SetC(tablepc, int2Fb(len(ex.Fields)-arraycount))
- if shouldmove(ec, tablereg) {
- code.AddABC(OP_MOVE, ec.reg, tablereg, 0, sline(ex))
- }
-} // }}}
-
-func compileArithmeticOpExpr(context *funcContext, reg int, expr *ast.ArithmeticOpExpr, ec *expcontext) { // {{{
- exp := constFold(expr)
- if ex, ok := exp.(*constLValueExpr); ok {
- exp.SetLine(sline(expr))
- compileExpr(context, reg, ex, ec)
- return
- }
- expr, _ = exp.(*ast.ArithmeticOpExpr)
- a := savereg(ec, reg)
- b := reg
- compileExprWithKMVPropagation(context, expr.Lhs, ®, &b)
- c := reg
- compileExprWithKMVPropagation(context, expr.Rhs, ®, &c)
-
- op := 0
- switch expr.Operator {
- case "+":
- op = OP_ADD
- case "-":
- op = OP_SUB
- case "*":
- op = OP_MUL
- case "/":
- op = OP_DIV
- case "%":
- op = OP_MOD
- case "^":
- op = OP_POW
- }
- context.Code.AddABC(op, a, b, c, sline(expr))
-} // }}}
-
-func compileStringConcatOpExpr(context *funcContext, reg int, expr *ast.StringConcatOpExpr, ec *expcontext) { // {{{
- code := context.Code
- crange := 1
- for current := expr.Rhs; current != nil; {
- if ex, ok := current.(*ast.StringConcatOpExpr); ok {
- crange += 1
- current = ex.Rhs
- } else {
- current = nil
- }
- }
- a := savereg(ec, reg)
- basereg := reg
- reg += compileExpr(context, reg, expr.Lhs, ecnone(0))
- reg += compileExpr(context, reg, expr.Rhs, ecnone(0))
- for pc := code.LastPC(); pc != 0 && opGetOpCode(code.At(pc)) == OP_CONCAT; pc-- {
- code.Pop()
- }
- code.AddABC(OP_CONCAT, a, basereg, basereg+crange, sline(expr))
-} // }}}
-
-func compileUnaryOpExpr(context *funcContext, reg int, expr ast.Expr, ec *expcontext) { // {{{
- opcode := 0
- code := context.Code
- var operandexpr ast.Expr
- switch ex := expr.(type) {
- case *ast.UnaryMinusOpExpr:
- exp := constFold(ex)
- if lvexpr, ok := exp.(*constLValueExpr); ok {
- exp.SetLine(sline(expr))
- compileExpr(context, reg, lvexpr, ec)
- return
- }
- ex, _ = exp.(*ast.UnaryMinusOpExpr)
- operandexpr = ex.Expr
- opcode = OP_UNM
- case *ast.UnaryNotOpExpr:
- switch ex.Expr.(type) {
- case *ast.TrueExpr:
- code.AddABC(OP_LOADBOOL, savereg(ec, reg), 0, 0, sline(expr))
- return
- case *ast.FalseExpr, *ast.NilExpr:
- code.AddABC(OP_LOADBOOL, savereg(ec, reg), 1, 0, sline(expr))
- return
- default:
- opcode = OP_NOT
- operandexpr = ex.Expr
- }
- case *ast.UnaryLenOpExpr:
- opcode = OP_LEN
- operandexpr = ex.Expr
- }
-
- a := savereg(ec, reg)
- b := reg
- compileExprWithMVPropagation(context, operandexpr, ®, &b)
- code.AddABC(opcode, a, b, 0, sline(expr))
-} // }}}
-
-func compileRelationalOpExprAux(context *funcContext, reg int, expr *ast.RelationalOpExpr, flip int, label int) { // {{{
- code := context.Code
- b := reg
- compileExprWithKMVPropagation(context, expr.Lhs, ®, &b)
- c := reg
- compileExprWithKMVPropagation(context, expr.Rhs, ®, &c)
- switch expr.Operator {
- case "<":
- code.AddABC(OP_LT, 0^flip, b, c, sline(expr))
- case ">":
- code.AddABC(OP_LT, 0^flip, c, b, sline(expr))
- case "<=":
- code.AddABC(OP_LE, 0^flip, b, c, sline(expr))
- case ">=":
- code.AddABC(OP_LE, 0^flip, c, b, sline(expr))
- case "==":
- code.AddABC(OP_EQ, 0^flip, b, c, sline(expr))
- case "~=":
- code.AddABC(OP_EQ, 1^flip, b, c, sline(expr))
- }
- code.AddASbx(OP_JMP, 0, label, sline(expr))
-} // }}}
-
-func compileRelationalOpExpr(context *funcContext, reg int, expr *ast.RelationalOpExpr, ec *expcontext) { // {{{
- a := savereg(ec, reg)
- code := context.Code
- jumplabel := context.NewLabel()
- compileRelationalOpExprAux(context, reg, expr, 1, jumplabel)
- code.AddABC(OP_LOADBOOL, a, 0, 1, sline(expr))
- context.SetLabelPc(jumplabel, code.LastPC())
- code.AddABC(OP_LOADBOOL, a, 1, 0, sline(expr))
-} // }}}
-
-func compileLogicalOpExpr(context *funcContext, reg int, expr *ast.LogicalOpExpr, ec *expcontext) { // {{{
- a := savereg(ec, reg)
- code := context.Code
- endlabel := context.NewLabel()
- lb := &lblabels{context.NewLabel(), context.NewLabel(), endlabel, false}
- nextcondlabel := context.NewLabel()
- if expr.Operator == "and" {
- compileLogicalOpExprAux(context, reg, expr.Lhs, ec, nextcondlabel, endlabel, false, lb)
- context.SetLabelPc(nextcondlabel, code.LastPC())
- compileLogicalOpExprAux(context, reg, expr.Rhs, ec, endlabel, endlabel, false, lb)
- } else {
- compileLogicalOpExprAux(context, reg, expr.Lhs, ec, endlabel, nextcondlabel, true, lb)
- context.SetLabelPc(nextcondlabel, code.LastPC())
- compileLogicalOpExprAux(context, reg, expr.Rhs, ec, endlabel, endlabel, false, lb)
- }
-
- if lb.b {
- context.SetLabelPc(lb.f, code.LastPC())
- code.AddABC(OP_LOADBOOL, a, 0, 1, sline(expr))
- context.SetLabelPc(lb.t, code.LastPC())
- code.AddABC(OP_LOADBOOL, a, 1, 0, sline(expr))
- }
-
- lastinst := code.Last()
- if opGetOpCode(lastinst) == OP_JMP && opGetArgSbx(lastinst) == endlabel {
- code.Pop()
- }
-
- context.SetLabelPc(endlabel, code.LastPC())
-} // }}}
-
-func compileLogicalOpExprAux(context *funcContext, reg int, expr ast.Expr, ec *expcontext, thenlabel, elselabel int, hasnextcond bool, lb *lblabels) { // {{{
- // TODO folding constants?
- code := context.Code
- flip := 0
- jumplabel := elselabel
- if hasnextcond {
- flip = 1
- jumplabel = thenlabel
- }
-
- switch ex := expr.(type) {
- case *ast.FalseExpr:
- if elselabel == lb.e {
- code.AddASbx(OP_JMP, 0, lb.f, sline(expr))
- lb.b = true
- } else {
- code.AddASbx(OP_JMP, 0, elselabel, sline(expr))
- }
- return
- case *ast.NilExpr:
- if elselabel == lb.e {
- compileExpr(context, reg, expr, ec)
- code.AddASbx(OP_JMP, 0, lb.e, sline(expr))
- } else {
- code.AddASbx(OP_JMP, 0, elselabel, sline(expr))
- }
- return
- case *ast.TrueExpr:
- if thenlabel == lb.e {
- code.AddASbx(OP_JMP, 0, lb.t, sline(expr))
- lb.b = true
- } else {
- code.AddASbx(OP_JMP, 0, thenlabel, sline(expr))
- }
- return
- case *ast.NumberExpr, *ast.StringExpr:
- if thenlabel == lb.e {
- compileExpr(context, reg, expr, ec)
- code.AddASbx(OP_JMP, 0, lb.e, sline(expr))
- } else {
- code.AddASbx(OP_JMP, 0, thenlabel, sline(expr))
- }
- return
- case *ast.LogicalOpExpr:
- switch ex.Operator {
- case "and":
- nextcondlabel := context.NewLabel()
- compileLogicalOpExprAux(context, reg, ex.Lhs, ec, nextcondlabel, elselabel, false, lb)
- context.SetLabelPc(nextcondlabel, context.Code.LastPC())
- compileLogicalOpExprAux(context, reg, ex.Rhs, ec, thenlabel, elselabel, hasnextcond, lb)
- case "or":
- nextcondlabel := context.NewLabel()
- compileLogicalOpExprAux(context, reg, ex.Lhs, ec, thenlabel, nextcondlabel, true, lb)
- context.SetLabelPc(nextcondlabel, context.Code.LastPC())
- compileLogicalOpExprAux(context, reg, ex.Rhs, ec, thenlabel, elselabel, hasnextcond, lb)
- }
- return
- case *ast.RelationalOpExpr:
- if thenlabel == elselabel {
- flip ^= 1
- jumplabel = lb.t
- lb.b = true
- } else if thenlabel == lb.e {
- jumplabel = lb.t
- lb.b = true
- } else if elselabel == lb.e {
- jumplabel = lb.f
- lb.b = true
- }
- compileRelationalOpExprAux(context, reg, ex, flip, jumplabel)
- return
- }
-
- a := reg
- sreg := savereg(ec, a)
- if !hasnextcond && thenlabel == elselabel {
- reg += compileExpr(context, reg, expr, &expcontext{ec.ctype, intMax(a, sreg), ec.varargopt})
- last := context.Code.Last()
- if opGetOpCode(last) == OP_MOVE && opGetArgA(last) == a {
- context.Code.SetA(context.Code.LastPC(), sreg)
- } else {
- context.Code.AddABC(OP_MOVE, sreg, a, 0, sline(expr))
- }
- } else {
- reg += compileExpr(context, reg, expr, ecnone(0))
- if sreg == a {
- code.AddABC(OP_TEST, a, 0, 0^flip, sline(expr))
- } else {
- code.AddABC(OP_TESTSET, sreg, a, 0^flip, sline(expr))
- }
- }
- code.AddASbx(OP_JMP, 0, jumplabel, sline(expr))
-} // }}}
-
-func compileFuncCallExpr(context *funcContext, reg int, expr *ast.FuncCallExpr, ec *expcontext) int { // {{{
- funcreg := reg
- if ec.ctype == ecLocal && ec.reg == (int(context.Proto.NumParameters)-1) {
- funcreg = ec.reg
- reg = ec.reg
- }
- argc := len(expr.Args)
- islastvararg := false
- name := "(anonymous)"
-
- if expr.Func != nil { // hoge.func()
- reg += compileExpr(context, reg, expr.Func, ecnone(0))
- name = getExprName(context, expr.Func)
- } else { // hoge:method()
- b := reg
- compileExprWithMVPropagation(context, expr.Receiver, ®, &b)
- c := loadRk(context, ®, expr, LString(expr.Method))
- context.Code.AddABC(OP_SELF, funcreg, b, c, sline(expr))
- // increments a register for an implicit "self"
- reg = b + 1
- reg2 := funcreg + 2
- if reg2 > reg {
- reg = reg2
- }
- argc += 1
- name = string(expr.Method)
- }
-
- for i, ar := range expr.Args {
- islastvararg = (i == len(expr.Args)-1) && isVarArgReturnExpr(ar)
- if islastvararg {
- compileExpr(context, reg, ar, ecnone(-2))
- } else {
- reg += compileExpr(context, reg, ar, ecnone(0))
- }
- }
- b := argc + 1
- if islastvararg {
- b = 0
- }
- context.Code.AddABC(OP_CALL, funcreg, b, ec.varargopt+2, sline(expr))
- context.Proto.DbgCalls = append(context.Proto.DbgCalls, DbgCall{Pc: context.Code.LastPC(), Name: name})
-
- if ec.varargopt == 0 && shouldmove(ec, funcreg) {
- context.Code.AddABC(OP_MOVE, ec.reg, funcreg, 0, sline(expr))
- return 1
- }
- if context.RegTop() > (funcreg+2+ec.varargopt) || ec.varargopt < -1 {
- return 0
- }
- return ec.varargopt + 1
-} // }}}
-
-func loadRk(context *funcContext, reg *int, expr ast.Expr, cnst LValue) int { // {{{
- cindex := context.ConstIndex(cnst)
- if cindex <= opMaxIndexRk {
- return opRkAsk(cindex)
- } else {
- ret := *reg
- *reg++
- context.Code.AddABx(OP_LOADK, ret, cindex, sline(expr))
- return ret
- }
-} // }}}
-
-func getIdentRefType(context *funcContext, current *funcContext, expr *ast.IdentExpr) expContextType { // {{{
- if current == nil {
- return ecGlobal
- } else if current.FindLocalVar(expr.Value) > -1 {
- if current == context {
- return ecLocal
- }
- return ecUpvalue
- }
- return getIdentRefType(context, current.Parent, expr)
-} // }}}
-
-func getExprName(context *funcContext, expr ast.Expr) string { // {{{
- switch ex := expr.(type) {
- case *ast.IdentExpr:
- return ex.Value
- case *ast.AttrGetExpr:
- switch kex := ex.Key.(type) {
- case *ast.StringExpr:
- return kex.Value
- }
- return "?"
- }
- return "?"
-} // }}}
-
-func patchCode(context *funcContext) { // {{{
- maxreg := 1
- if np := int(context.Proto.NumParameters); np > 1 {
- maxreg = np
- }
- moven := 0
- code := context.Code.List()
- for pc := 0; pc < len(code); pc++ {
- inst := code[pc]
- curop := opGetOpCode(inst)
- switch curop {
- case OP_CLOSURE:
- pc += int(context.Proto.FunctionPrototypes[opGetArgBx(inst)].NumUpvalues)
- moven = 0
- continue
- case OP_SETGLOBAL, OP_SETUPVAL, OP_EQ, OP_LT, OP_LE, OP_TEST,
- OP_TAILCALL, OP_RETURN, OP_FORPREP, OP_FORLOOP, OP_TFORLOOP,
- OP_SETLIST, OP_CLOSE:
- /* nothing to do */
- case OP_CALL:
- if reg := opGetArgA(inst) + opGetArgC(inst) - 2; reg > maxreg {
- maxreg = reg
- }
- case OP_VARARG:
- if reg := opGetArgA(inst) + opGetArgB(inst) - 1; reg > maxreg {
- maxreg = reg
- }
- case OP_SELF:
- if reg := opGetArgA(inst) + 1; reg > maxreg {
- maxreg = reg
- }
- case OP_LOADNIL:
- if reg := opGetArgB(inst); reg > maxreg {
- maxreg = reg
- }
- case OP_JMP: // jump to jump optimization
- distance := 0
- count := 0 // avoiding infinite loops
- for jmp := inst; opGetOpCode(jmp) == OP_JMP && count < 5; jmp = context.Code.At(pc + distance + 1) {
- d := context.GetLabelPc(opGetArgSbx(jmp)) - pc
- if d > opMaxArgSbx {
- if distance == 0 {
- raiseCompileError(context, context.Proto.LineDefined, "too long to jump.")
- }
- break
- }
- distance = d
- count++
- }
- if distance == 0 {
- context.Code.SetOpCode(pc, OP_NOP)
- } else {
- context.Code.SetSbx(pc, distance)
- }
- default:
- if reg := opGetArgA(inst); reg > maxreg {
- maxreg = reg
- }
- }
-
- // bulk move optimization(reducing op dipatch costs)
- if curop == OP_MOVE {
- moven++
- } else {
- if moven > 1 {
- context.Code.SetOpCode(pc-moven, OP_MOVEN)
- context.Code.SetC(pc-moven, intMin(moven-1, opMaxArgsC))
- }
- moven = 0
- }
- }
- maxreg++
- if maxreg > maxRegisters {
- raiseCompileError(context, context.Proto.LineDefined, "register overflow(too many local variables)")
- }
- context.Proto.NumUsedRegisters = uint8(maxreg)
-} // }}}
-
-func Compile(chunk []ast.Stmt, name string) (proto *FunctionProto, err error) { // {{{
- defer func() {
- if rcv := recover(); rcv != nil {
- if _, ok := rcv.(*CompileError); ok {
- err = rcv.(error)
- } else {
- panic(rcv)
- }
- }
- }()
- err = nil
- parlist := &ast.ParList{HasVargs: true, Names: []string{}}
- funcexpr := &ast.FunctionExpr{ParList: parlist, Stmts: chunk}
- context := newFuncContext(name, nil)
- compileFunctionExpr(context, funcexpr, ecnone(0))
- proto = context.Proto
- return
-} // }}}
diff --git a/vendor/github.com/yuin/gopher-lua/config.go b/vendor/github.com/yuin/gopher-lua/config.go
deleted file mode 100644
index f58b5939..00000000
--- a/vendor/github.com/yuin/gopher-lua/config.go
+++ /dev/null
@@ -1,36 +0,0 @@
-package lua
-
-import (
- "os"
-)
-
-var CompatVarArg = true
-var FieldsPerFlush = 50
-var RegistrySize = 256 * 20
-var RegistryGrowStep = 32
-var CallStackSize = 256
-var MaxTableGetLoop = 100
-var MaxArrayIndex = 67108864
-
-type LNumber float64
-
-const LNumberBit = 64
-const LNumberScanFormat = "%f"
-const LuaVersion = "Lua 5.1"
-
-var LuaPath = "LUA_PATH"
-var LuaLDir string
-var LuaPathDefault string
-var LuaOS string
-
-func init() {
- if os.PathSeparator == '/' { // unix-like
- LuaOS = "unix"
- LuaLDir = "/usr/local/share/lua/5.1"
- LuaPathDefault = "./?.lua;" + LuaLDir + "/?.lua;" + LuaLDir + "/?/init.lua"
- } else { // windows
- LuaOS = "windows"
- LuaLDir = "!\\lua"
- LuaPathDefault = ".\\?.lua;" + LuaLDir + "\\?.lua;" + LuaLDir + "\\?\\init.lua"
- }
-}
diff --git a/vendor/github.com/yuin/gopher-lua/coroutinelib.go b/vendor/github.com/yuin/gopher-lua/coroutinelib.go
deleted file mode 100644
index d42c41a1..00000000
--- a/vendor/github.com/yuin/gopher-lua/coroutinelib.go
+++ /dev/null
@@ -1,112 +0,0 @@
-package lua
-
-func OpenCoroutine(L *LState) int {
- // TODO: Tie module name to contents of linit.go?
- mod := L.RegisterModule(CoroutineLibName, coFuncs)
- L.Push(mod)
- return 1
-}
-
-var coFuncs = map[string]LGFunction{
- "create": coCreate,
- "yield": coYield,
- "resume": coResume,
- "running": coRunning,
- "status": coStatus,
- "wrap": coWrap,
-}
-
-func coCreate(L *LState) int {
- fn := L.CheckFunction(1)
- newthread, _ := L.NewThread()
- base := 0
- newthread.stack.Push(callFrame{
- Fn: fn,
- Pc: 0,
- Base: base,
- LocalBase: base + 1,
- ReturnBase: base,
- NArgs: 0,
- NRet: MultRet,
- Parent: nil,
- TailCall: 0,
- })
- L.Push(newthread)
- return 1
-}
-
-func coYield(L *LState) int {
- return -1
-}
-
-func coResume(L *LState) int {
- th := L.CheckThread(1)
- if L.G.CurrentThread == th {
- msg := "can not resume a running thread"
- if th.wrapped {
- L.RaiseError(msg)
- return 0
- }
- L.Push(LFalse)
- L.Push(LString(msg))
- return 2
- }
- if th.Dead {
- msg := "can not resume a dead thread"
- if th.wrapped {
- L.RaiseError(msg)
- return 0
- }
- L.Push(LFalse)
- L.Push(LString(msg))
- return 2
- }
- th.Parent = L
- L.G.CurrentThread = th
- if !th.isStarted() {
- cf := th.stack.Last()
- th.currentFrame = cf
- th.SetTop(0)
- nargs := L.GetTop() - 1
- L.XMoveTo(th, nargs)
- cf.NArgs = nargs
- th.initCallFrame(cf)
- th.Panic = panicWithoutTraceback
- } else {
- nargs := L.GetTop() - 1
- L.XMoveTo(th, nargs)
- }
- top := L.GetTop()
- threadRun(th)
- return L.GetTop() - top
-}
-
-func coRunning(L *LState) int {
- if L.G.MainThread == L {
- L.Push(LNil)
- return 1
- }
- L.Push(L.G.CurrentThread)
- return 1
-}
-
-func coStatus(L *LState) int {
- L.Push(LString(L.Status(L.CheckThread(1))))
- return 1
-}
-
-func wrapaux(L *LState) int {
- L.Insert(L.ToThread(UpvalueIndex(1)), 1)
- return coResume(L)
-}
-
-func coWrap(L *LState) int {
- coCreate(L)
- L.CheckThread(L.GetTop()).wrapped = true
- v := L.Get(L.GetTop())
- L.Pop(1)
- L.Push(L.NewClosure(wrapaux, v))
- return 1
-}
-
-//
diff --git a/vendor/github.com/yuin/gopher-lua/debuglib.go b/vendor/github.com/yuin/gopher-lua/debuglib.go
deleted file mode 100644
index 41f883f1..00000000
--- a/vendor/github.com/yuin/gopher-lua/debuglib.go
+++ /dev/null
@@ -1,173 +0,0 @@
-package lua
-
-import (
- "fmt"
- "strings"
-)
-
-func OpenDebug(L *LState) int {
- dbgmod := L.RegisterModule(DebugLibName, debugFuncs)
- L.Push(dbgmod)
- return 1
-}
-
-var debugFuncs = map[string]LGFunction{
- "getfenv": debugGetFEnv,
- "getinfo": debugGetInfo,
- "getlocal": debugGetLocal,
- "getmetatable": debugGetMetatable,
- "getupvalue": debugGetUpvalue,
- "setfenv": debugSetFEnv,
- "setlocal": debugSetLocal,
- "setmetatable": debugSetMetatable,
- "setupvalue": debugSetUpvalue,
- "traceback": debugTraceback,
-}
-
-func debugGetFEnv(L *LState) int {
- L.Push(L.GetFEnv(L.CheckAny(1)))
- return 1
-}
-
-func debugGetInfo(L *LState) int {
- L.CheckTypes(1, LTFunction, LTNumber)
- arg1 := L.Get(1)
- what := L.OptString(2, "Slunf")
- var dbg *Debug
- var fn LValue
- var err error
- var ok bool
- switch lv := arg1.(type) {
- case *LFunction:
- dbg = &Debug{}
- fn, err = L.GetInfo(">"+what, dbg, lv)
- case LNumber:
- dbg, ok = L.GetStack(int(lv))
- if !ok {
- L.Push(LNil)
- return 1
- }
- fn, err = L.GetInfo(what, dbg, LNil)
- }
-
- if err != nil {
- L.Push(LNil)
- return 1
- }
- tbl := L.NewTable()
- if len(dbg.Name) > 0 {
- tbl.RawSetString("name", LString(dbg.Name))
- } else {
- tbl.RawSetString("name", LNil)
- }
- tbl.RawSetString("what", LString(dbg.What))
- tbl.RawSetString("source", LString(dbg.Source))
- tbl.RawSetString("currentline", LNumber(dbg.CurrentLine))
- tbl.RawSetString("nups", LNumber(dbg.NUpvalues))
- tbl.RawSetString("linedefined", LNumber(dbg.LineDefined))
- tbl.RawSetString("lastlinedefined", LNumber(dbg.LastLineDefined))
- tbl.RawSetString("func", fn)
- L.Push(tbl)
- return 1
-}
-
-func debugGetLocal(L *LState) int {
- level := L.CheckInt(1)
- idx := L.CheckInt(2)
- dbg, ok := L.GetStack(level)
- if !ok {
- L.ArgError(1, "level out of range")
- }
- name, value := L.GetLocal(dbg, idx)
- if len(name) > 0 {
- L.Push(LString(name))
- L.Push(value)
- return 2
- }
- L.Push(LNil)
- return 1
-}
-
-func debugGetMetatable(L *LState) int {
- L.Push(L.GetMetatable(L.CheckAny(1)))
- return 1
-}
-
-func debugGetUpvalue(L *LState) int {
- fn := L.CheckFunction(1)
- idx := L.CheckInt(2)
- name, value := L.GetUpvalue(fn, idx)
- if len(name) > 0 {
- L.Push(LString(name))
- L.Push(value)
- return 2
- }
- L.Push(LNil)
- return 1
-}
-
-func debugSetFEnv(L *LState) int {
- L.SetFEnv(L.CheckAny(1), L.CheckAny(2))
- return 0
-}
-
-func debugSetLocal(L *LState) int {
- level := L.CheckInt(1)
- idx := L.CheckInt(2)
- value := L.CheckAny(3)
- dbg, ok := L.GetStack(level)
- if !ok {
- L.ArgError(1, "level out of range")
- }
- name := L.SetLocal(dbg, idx, value)
- if len(name) > 0 {
- L.Push(LString(name))
- } else {
- L.Push(LNil)
- }
- return 1
-}
-
-func debugSetMetatable(L *LState) int {
- L.CheckTypes(2, LTNil, LTTable)
- obj := L.Get(1)
- mt := L.Get(2)
- L.SetMetatable(obj, mt)
- L.SetTop(1)
- return 1
-}
-
-func debugSetUpvalue(L *LState) int {
- fn := L.CheckFunction(1)
- idx := L.CheckInt(2)
- value := L.CheckAny(3)
- name := L.SetUpvalue(fn, idx, value)
- if len(name) > 0 {
- L.Push(LString(name))
- } else {
- L.Push(LNil)
- }
- return 1
-}
-
-func debugTraceback(L *LState) int {
- msg := ""
- level := L.OptInt(2, 1)
- ls := L
- if L.GetTop() > 0 {
- if s, ok := L.Get(1).assertString(); ok {
- msg = s
- }
- if l, ok := L.Get(1).(*LState); ok {
- ls = l
- msg = ""
- }
- }
-
- traceback := strings.TrimSpace(ls.stackTrace(level))
- if len(msg) > 0 {
- traceback = fmt.Sprintf("%s\n%s", msg, traceback)
- }
- L.Push(LString(traceback))
- return 1
-}
diff --git a/vendor/github.com/yuin/gopher-lua/function.go b/vendor/github.com/yuin/gopher-lua/function.go
deleted file mode 100644
index 169e5407..00000000
--- a/vendor/github.com/yuin/gopher-lua/function.go
+++ /dev/null
@@ -1,193 +0,0 @@
-package lua
-
-import (
- "fmt"
- "strings"
-)
-
-const (
- VarArgHasArg uint8 = 1
- VarArgIsVarArg uint8 = 2
- VarArgNeedsArg uint8 = 4
-)
-
-type DbgLocalInfo struct {
- Name string
- StartPc int
- EndPc int
-}
-
-type DbgCall struct {
- Name string
- Pc int
-}
-
-type FunctionProto struct {
- SourceName string
- LineDefined int
- LastLineDefined int
- NumUpvalues uint8
- NumParameters uint8
- IsVarArg uint8
- NumUsedRegisters uint8
- Code []uint32
- Constants []LValue
- FunctionPrototypes []*FunctionProto
-
- DbgSourcePositions []int
- DbgLocals []*DbgLocalInfo
- DbgCalls []DbgCall
- DbgUpvalues []string
-
- stringConstants []string
-}
-
-/* Upvalue {{{ */
-
-type Upvalue struct {
- next *Upvalue
- reg *registry
- index int
- value LValue
- closed bool
-}
-
-func (uv *Upvalue) Value() LValue {
- //if uv.IsClosed() {
- if uv.closed || uv.reg == nil {
- return uv.value
- }
- //return uv.reg.Get(uv.index)
- return uv.reg.array[uv.index]
-}
-
-func (uv *Upvalue) SetValue(value LValue) {
- if uv.IsClosed() {
- uv.value = value
- } else {
- uv.reg.Set(uv.index, value)
- }
-}
-
-func (uv *Upvalue) Close() {
- value := uv.Value()
- uv.closed = true
- uv.value = value
-}
-
-func (uv *Upvalue) IsClosed() bool {
- return uv.closed || uv.reg == nil
-}
-
-func UpvalueIndex(i int) int {
- return GlobalsIndex - i
-}
-
-/* }}} */
-
-/* FunctionProto {{{ */
-
-func newFunctionProto(name string) *FunctionProto {
- return &FunctionProto{
- SourceName: name,
- LineDefined: 0,
- LastLineDefined: 0,
- NumUpvalues: 0,
- NumParameters: 0,
- IsVarArg: 0,
- NumUsedRegisters: 2,
- Code: make([]uint32, 0, 128),
- Constants: make([]LValue, 0, 32),
- FunctionPrototypes: make([]*FunctionProto, 0, 16),
-
- DbgSourcePositions: make([]int, 0, 128),
- DbgLocals: make([]*DbgLocalInfo, 0, 16),
- DbgCalls: make([]DbgCall, 0, 128),
- DbgUpvalues: make([]string, 0, 16),
-
- stringConstants: make([]string, 0, 32),
- }
-}
-
-func (fp *FunctionProto) String() string {
- return fp.str(1, 0)
-}
-
-func (fp *FunctionProto) str(level int, count int) string {
- indent := strings.Repeat(" ", level-1)
- buf := []string{}
- buf = append(buf, fmt.Sprintf("%v; function [%v] definition (level %v)\n",
- indent, count, level))
- buf = append(buf, fmt.Sprintf("%v; %v upvalues, %v params, %v stacks\n",
- indent, fp.NumUpvalues, fp.NumParameters, fp.NumUsedRegisters))
- for reg, linfo := range fp.DbgLocals {
- buf = append(buf, fmt.Sprintf("%v.local %v ; %v\n", indent, linfo.Name, reg))
- }
- for reg, upvalue := range fp.DbgUpvalues {
- buf = append(buf, fmt.Sprintf("%v.upvalue %v ; %v\n", indent, upvalue, reg))
- }
- for reg, conzt := range fp.Constants {
- buf = append(buf, fmt.Sprintf("%v.const %v ; %v\n", indent, conzt.String(), reg))
- }
- buf = append(buf, "\n")
-
- protono := 0
- for no, code := range fp.Code {
- inst := opGetOpCode(code)
- if inst == OP_CLOSURE {
- buf = append(buf, "\n")
- buf = append(buf, fp.FunctionPrototypes[protono].str(level+1, protono))
- buf = append(buf, "\n")
- protono++
- }
- buf = append(buf, fmt.Sprintf("%v[%03d] %v (line:%v)\n",
- indent, no+1, opToString(code), fp.DbgSourcePositions[no]))
-
- }
- buf = append(buf, fmt.Sprintf("%v; end of function\n", indent))
- return strings.Join(buf, "")
-}
-
-/* }}} */
-
-/* LFunction {{{ */
-
-func newLFunctionL(proto *FunctionProto, env *LTable, nupvalue int) *LFunction {
- return &LFunction{
- IsG: false,
- Env: env,
-
- Proto: proto,
- GFunction: nil,
- Upvalues: make([]*Upvalue, nupvalue),
- }
-}
-
-func newLFunctionG(gfunc LGFunction, env *LTable, nupvalue int) *LFunction {
- return &LFunction{
- IsG: true,
- Env: env,
-
- Proto: nil,
- GFunction: gfunc,
- Upvalues: make([]*Upvalue, nupvalue),
- }
-}
-
-func (fn *LFunction) LocalName(regno, pc int) (string, bool) {
- if fn.IsG {
- return "", false
- }
- p := fn.Proto
- for i := 0; i < len(p.DbgLocals) && p.DbgLocals[i].StartPc < pc; i++ {
- if pc < p.DbgLocals[i].EndPc {
- regno--
- if regno == 0 {
- return p.DbgLocals[i].Name, true
- }
- }
- }
- return "", false
-}
-
-/* }}} */
diff --git a/vendor/github.com/yuin/gopher-lua/iolib.go b/vendor/github.com/yuin/gopher-lua/iolib.go
deleted file mode 100644
index 4a86f893..00000000
--- a/vendor/github.com/yuin/gopher-lua/iolib.go
+++ /dev/null
@@ -1,746 +0,0 @@
-package lua
-
-import (
- "bufio"
- "errors"
- "fmt"
- "io"
- "io/ioutil"
- "os"
- "os/exec"
- "syscall"
-)
-
-var ioFuncs = map[string]LGFunction{
- "close": ioClose,
- "flush": ioFlush,
- "lines": ioLines,
- "input": ioInput,
- "output": ioOutput,
- "open": ioOpenFile,
- "popen": ioPopen,
- "read": ioRead,
- "type": ioType,
- "tmpfile": ioTmpFile,
- "write": ioWrite,
-}
-
-const lFileClass = "FILE*"
-
-type lFile struct {
- fp *os.File
- pp *exec.Cmd
- writer io.Writer
- reader *bufio.Reader
- stdout io.ReadCloser
- closed bool
-}
-
-type lFileType int
-
-const (
- lFileFile lFileType = iota
- lFileProcess
-)
-
-const fileDefOutIndex = 1
-const fileDefInIndex = 2
-const fileDefaultWriteBuffer = 4096
-const fileDefaultReadBuffer = 4096
-
-func checkFile(L *LState) *lFile {
- ud := L.CheckUserData(1)
- if file, ok := ud.Value.(*lFile); ok {
- return file
- }
- L.ArgError(1, "file expected")
- return nil
-}
-
-func errorIfFileIsClosed(L *LState, file *lFile) {
- if file.closed {
- L.ArgError(1, "file is closed")
- }
-}
-
-func newFile(L *LState, file *os.File, path string, flag int, perm os.FileMode, writable, readable bool) (*LUserData, error) {
- ud := L.NewUserData()
- var err error
- if file == nil {
- file, err = os.OpenFile(path, flag, perm)
- if err != nil {
- return nil, err
- }
- }
- lfile := &lFile{fp: file, pp: nil, writer: nil, reader: nil, stdout: nil, closed: false}
- ud.Value = lfile
- if writable {
- lfile.writer = file
- }
- if readable {
- lfile.reader = bufio.NewReaderSize(file, fileDefaultReadBuffer)
- }
- L.SetMetatable(ud, L.GetTypeMetatable(lFileClass))
- return ud, nil
-}
-
-func newProcess(L *LState, cmd string, writable, readable bool) (*LUserData, error) {
- ud := L.NewUserData()
- c, args := popenArgs(cmd)
- pp := exec.Command(c, args...)
- lfile := &lFile{fp: nil, pp: pp, writer: nil, reader: nil, stdout: nil, closed: false}
- ud.Value = lfile
-
- var err error
- if writable {
- lfile.writer, err = pp.StdinPipe()
- }
- if readable {
- lfile.stdout, err = pp.StdoutPipe()
- lfile.reader = bufio.NewReaderSize(lfile.stdout, fileDefaultReadBuffer)
- }
- if err != nil {
- return nil, err
- }
- err = pp.Start()
- if err != nil {
- return nil, err
- }
-
- L.SetMetatable(ud, L.GetTypeMetatable(lFileClass))
- return ud, nil
-}
-
-func (file *lFile) Type() lFileType {
- if file.fp == nil {
- return lFileProcess
- }
- return lFileFile
-}
-
-func (file *lFile) Name() string {
- switch file.Type() {
- case lFileFile:
- return fmt.Sprintf("file %s", file.fp.Name())
- case lFileProcess:
- return fmt.Sprintf("process %s", file.pp.Path)
- }
- return ""
-}
-
-func (file *lFile) AbandonReadBuffer() error {
- if file.Type() == lFileFile && file.reader != nil {
- _, err := file.fp.Seek(-int64(file.reader.Buffered()), 1)
- if err != nil {
- return err
- }
- file.reader = bufio.NewReaderSize(file.fp, fileDefaultReadBuffer)
- }
- return nil
-}
-
-func fileDefOut(L *LState) *LUserData {
- return L.Get(UpvalueIndex(1)).(*LTable).RawGetInt(fileDefOutIndex).(*LUserData)
-}
-
-func fileDefIn(L *LState) *LUserData {
- return L.Get(UpvalueIndex(1)).(*LTable).RawGetInt(fileDefInIndex).(*LUserData)
-}
-
-func fileIsWritable(L *LState, file *lFile) int {
- if file.writer == nil {
- L.Push(LNil)
- L.Push(LString(fmt.Sprintf("%s is opened for only reading.", file.Name())))
- L.Push(LNumber(1)) // C-Lua compatibility: Original Lua pushes errno to the stack
- return 3
- }
- return 0
-}
-
-func fileIsReadable(L *LState, file *lFile) int {
- if file.reader == nil {
- L.Push(LNil)
- L.Push(LString(fmt.Sprintf("%s is opened for only writing.", file.Name())))
- L.Push(LNumber(1)) // C-Lua compatibility: Original Lua pushes errno to the stack
- return 3
- }
- return 0
-}
-
-var stdFiles = []struct {
- name string
- file *os.File
- writable bool
- readable bool
-}{
- {"stdout", os.Stdout, true, false},
- {"stdin", os.Stdin, false, true},
- {"stderr", os.Stderr, true, false},
-}
-
-func OpenIo(L *LState) int {
- mod := L.RegisterModule(IoLibName, map[string]LGFunction{}).(*LTable)
- mt := L.NewTypeMetatable(lFileClass)
- mt.RawSetString("__index", mt)
- L.SetFuncs(mt, fileMethods)
- mt.RawSetString("lines", L.NewClosure(fileLines, L.NewFunction(fileLinesIter)))
-
- for _, finfo := range stdFiles {
- file, _ := newFile(L, finfo.file, "", 0, os.FileMode(0), finfo.writable, finfo.readable)
- mod.RawSetString(finfo.name, file)
- }
- uv := L.CreateTable(2, 0)
- uv.RawSetInt(fileDefOutIndex, mod.RawGetString("stdout"))
- uv.RawSetInt(fileDefInIndex, mod.RawGetString("stdin"))
- for name, fn := range ioFuncs {
- mod.RawSetString(name, L.NewClosure(fn, uv))
- }
- mod.RawSetString("lines", L.NewClosure(ioLines, uv, L.NewClosure(ioLinesIter, uv)))
- // Modifications are being made in-place rather than returned?
- L.Push(mod)
- return 1
-}
-
-var fileMethods = map[string]LGFunction{
- "__tostring": fileToString,
- "write": fileWrite,
- "close": fileClose,
- "flush": fileFlush,
- "lines": fileLines,
- "read": fileRead,
- "seek": fileSeek,
- "setvbuf": fileSetVBuf,
-}
-
-func fileToString(L *LState) int {
- file := checkFile(L)
- if file.Type() == lFileFile {
- if file.closed {
- L.Push(LString("file (closed)"))
- } else {
- L.Push(LString("file"))
- }
- } else {
- if file.closed {
- L.Push(LString("process (closed)"))
- } else {
- L.Push(LString("process"))
- }
- }
- return 1
-}
-
-func fileWriteAux(L *LState, file *lFile, idx int) int {
- if n := fileIsWritable(L, file); n != 0 {
- return n
- }
- errorIfFileIsClosed(L, file)
- top := L.GetTop()
- out := file.writer
- var err error
- for i := idx; i <= top; i++ {
- L.CheckTypes(i, LTNumber, LTString)
- s := LVAsString(L.Get(i))
- if _, err = out.Write(unsafeFastStringToReadOnlyBytes(s)); err != nil {
- goto errreturn
- }
- }
-
- file.AbandonReadBuffer()
- L.Push(LTrue)
- return 1
-errreturn:
-
- file.AbandonReadBuffer()
- L.Push(LNil)
- L.Push(LString(err.Error()))
- L.Push(LNumber(1)) // C-Lua compatibility: Original Lua pushes errno to the stack
- return 3
-}
-
-func fileCloseAux(L *LState, file *lFile) int {
- file.closed = true
- var err error
- if file.writer != nil {
- if bwriter, ok := file.writer.(*bufio.Writer); ok {
- if err = bwriter.Flush(); err != nil {
- goto errreturn
- }
- }
- }
- file.AbandonReadBuffer()
-
- switch file.Type() {
- case lFileFile:
- if err = file.fp.Close(); err != nil {
- goto errreturn
- }
- L.Push(LTrue)
- return 1
- case lFileProcess:
- if file.stdout != nil {
- file.stdout.Close() // ignore errors
- }
- err = file.pp.Wait()
- var exitStatus int // Initialised to zero value = 0
- if err != nil {
- if e2, ok := err.(*exec.ExitError); ok {
- if s, ok := e2.Sys().(syscall.WaitStatus); ok {
- exitStatus = s.ExitStatus()
- } else {
- err = errors.New("Unimplemented for system where exec.ExitError.Sys() is not syscall.WaitStatus.")
- }
- }
- } else {
- exitStatus = 0
- }
- L.Push(LNumber(exitStatus))
- return 1
- }
-
-errreturn:
- L.RaiseError(err.Error())
- return 0
-}
-
-func fileFlushAux(L *LState, file *lFile) int {
- if n := fileIsWritable(L, file); n != 0 {
- return n
- }
- errorIfFileIsClosed(L, file)
-
- if bwriter, ok := file.writer.(*bufio.Writer); ok {
- if err := bwriter.Flush(); err != nil {
- L.Push(LNil)
- L.Push(LString(err.Error()))
- return 2
- }
- }
- L.Push(LTrue)
- return 1
-}
-
-func fileReadAux(L *LState, file *lFile, idx int) int {
- if n := fileIsReadable(L, file); n != 0 {
- return n
- }
- errorIfFileIsClosed(L, file)
- if L.GetTop() == idx-1 {
- L.Push(LString("*l"))
- }
- var err error
- top := L.GetTop()
- for i := idx; i <= top; i++ {
- switch lv := L.Get(i).(type) {
- case LNumber:
- size := int64(lv)
- if size == 0 {
- _, err = file.reader.ReadByte()
- if err == io.EOF {
- L.Push(LNil)
- goto normalreturn
- }
- file.reader.UnreadByte()
- }
- var buf []byte
- var iseof bool
- buf, err, iseof = readBufioSize(file.reader, size)
- if iseof {
- L.Push(LNil)
- goto normalreturn
- }
- if err != nil {
- goto errreturn
- }
- L.Push(LString(string(buf)))
- case LString:
- options := L.CheckString(i)
- if len(options) > 0 && options[0] != '*' {
- L.ArgError(2, "invalid options:"+options)
- }
- for _, opt := range options[1:] {
- switch opt {
- case 'n':
- var v LNumber
- _, err = fmt.Fscanf(file.reader, LNumberScanFormat, &v)
- if err == io.EOF {
- L.Push(LNil)
- goto normalreturn
- }
- if err != nil {
- goto errreturn
- }
- L.Push(v)
- case 'a':
- var buf []byte
- buf, err = ioutil.ReadAll(file.reader)
- if err == io.EOF {
- L.Push(emptyLString)
- goto normalreturn
- }
- if err != nil {
- goto errreturn
- }
- L.Push(LString(string(buf)))
- case 'l':
- var buf []byte
- var iseof bool
- buf, err, iseof = readBufioLine(file.reader)
- if iseof {
- L.Push(LNil)
- goto normalreturn
- }
- if err != nil {
- goto errreturn
- }
- L.Push(LString(string(buf)))
- default:
- L.ArgError(2, "invalid options:"+string(opt))
- }
- }
- }
- }
-normalreturn:
- return L.GetTop() - top
-
-errreturn:
- L.RaiseError(err.Error())
- //L.Push(LNil)
- //L.Push(LString(err.Error()))
- return 2
-}
-
-var fileSeekOptions = []string{"set", "cur", "end"}
-
-func fileSeek(L *LState) int {
- file := checkFile(L)
- if file.Type() != lFileFile {
- L.Push(LNil)
- L.Push(LString("can not seek a process."))
- return 2
- }
-
- top := L.GetTop()
- if top == 1 {
- L.Push(LString("cur"))
- L.Push(LNumber(0))
- } else if top == 2 {
- L.Push(LNumber(0))
- }
-
- var pos int64
- var err error
-
- err = file.AbandonReadBuffer()
- if err != nil {
- goto errreturn
- }
-
- pos, err = file.fp.Seek(L.CheckInt64(3), L.CheckOption(2, fileSeekOptions))
- if err != nil {
- goto errreturn
- }
-
- L.Push(LNumber(pos))
- return 1
-
-errreturn:
- L.Push(LNil)
- L.Push(LString(err.Error()))
- return 2
-}
-
-func fileWrite(L *LState) int {
- return fileWriteAux(L, checkFile(L), 2)
-}
-
-func fileClose(L *LState) int {
- return fileCloseAux(L, checkFile(L))
-}
-
-func fileFlush(L *LState) int {
- return fileFlushAux(L, checkFile(L))
-}
-
-func fileLinesIter(L *LState) int {
- var file *lFile
- if ud, ok := L.Get(1).(*LUserData); ok {
- file = ud.Value.(*lFile)
- } else {
- file = L.Get(UpvalueIndex(2)).(*LUserData).Value.(*lFile)
- }
- buf, _, err := file.reader.ReadLine()
- if err != nil {
- if err == io.EOF {
- L.Push(LNil)
- return 1
- }
- L.RaiseError(err.Error())
- }
- L.Push(LString(string(buf)))
- return 1
-}
-
-func fileLines(L *LState) int {
- file := checkFile(L)
- ud := L.CheckUserData(1)
- if n := fileIsReadable(L, file); n != 0 {
- return 0
- }
- L.Push(L.NewClosure(fileLinesIter, L.Get(UpvalueIndex(1)), ud))
- return 1
-}
-
-func fileRead(L *LState) int {
- return fileReadAux(L, checkFile(L), 2)
-}
-
-var filebufOptions = []string{"no", "full"}
-
-func fileSetVBuf(L *LState) int {
- var err error
- var writer io.Writer
- file := checkFile(L)
- if n := fileIsWritable(L, file); n != 0 {
- return n
- }
- switch filebufOptions[L.CheckOption(2, filebufOptions)] {
- case "no":
- switch file.Type() {
- case lFileFile:
- file.writer = file.fp
- case lFileProcess:
- file.writer, err = file.pp.StdinPipe()
- if err != nil {
- goto errreturn
- }
- }
- case "full", "line": // TODO line buffer not supported
- bufsize := L.OptInt(3, fileDefaultWriteBuffer)
- switch file.Type() {
- case lFileFile:
- file.writer = bufio.NewWriterSize(file.fp, bufsize)
- case lFileProcess:
- writer, err = file.pp.StdinPipe()
- if err != nil {
- goto errreturn
- }
- file.writer = bufio.NewWriterSize(writer, bufsize)
- }
- }
- L.Push(LTrue)
- return 1
-errreturn:
- L.Push(LNil)
- L.Push(LString(err.Error()))
- return 2
-}
-
-func ioInput(L *LState) int {
- if L.GetTop() == 0 {
- L.Push(fileDefIn(L))
- return 1
- }
- switch lv := L.Get(1).(type) {
- case LString:
- file, err := newFile(L, nil, string(lv), os.O_RDONLY, 0600, false, true)
- if err != nil {
- L.RaiseError(err.Error())
- }
- L.Get(UpvalueIndex(1)).(*LTable).RawSetInt(fileDefInIndex, file)
- L.Push(file)
- return 1
- case *LUserData:
- if _, ok := lv.Value.(*lFile); ok {
- L.Get(UpvalueIndex(1)).(*LTable).RawSetInt(fileDefInIndex, lv)
- L.Push(lv)
- return 1
- }
-
- }
- L.ArgError(1, "string or file expedted, but got "+L.Get(1).Type().String())
- return 0
-}
-
-func ioClose(L *LState) int {
- if L.GetTop() == 0 {
- return fileCloseAux(L, fileDefOut(L).Value.(*lFile))
- }
- return fileClose(L)
-}
-
-func ioFlush(L *LState) int {
- return fileFlushAux(L, fileDefOut(L).Value.(*lFile))
-}
-
-func ioLinesIter(L *LState) int {
- var file *lFile
- toclose := false
- if ud, ok := L.Get(1).(*LUserData); ok {
- file = ud.Value.(*lFile)
- } else {
- file = L.Get(UpvalueIndex(2)).(*LUserData).Value.(*lFile)
- toclose = true
- }
- buf, _, err := file.reader.ReadLine()
- if err != nil {
- if err == io.EOF {
- if toclose {
- fileCloseAux(L, file)
- }
- L.Push(LNil)
- return 1
- }
- L.RaiseError(err.Error())
- }
- L.Push(LString(string(buf)))
- return 1
-}
-
-func ioLines(L *LState) int {
- if L.GetTop() == 0 {
- L.Push(L.Get(UpvalueIndex(2)))
- L.Push(fileDefIn(L))
- return 2
- }
-
- path := L.CheckString(1)
- ud, err := newFile(L, nil, path, os.O_RDONLY, os.FileMode(0600), false, true)
- if err != nil {
- return 0
- }
- L.Push(L.NewClosure(ioLinesIter, L.Get(UpvalueIndex(1)), ud))
- return 1
-}
-
-var ioOpenOpions = []string{"r", "rb", "w", "wb", "a", "ab", "r+", "rb+", "w+", "wb+", "a+", "ab+"}
-
-func ioOpenFile(L *LState) int {
- path := L.CheckString(1)
- if L.GetTop() == 1 {
- L.Push(LString("r"))
- }
- mode := os.O_RDONLY
- perm := 0600
- writable := true
- readable := true
- switch ioOpenOpions[L.CheckOption(2, ioOpenOpions)] {
- case "r", "rb":
- mode = os.O_RDONLY
- writable = false
- case "w", "wb":
- mode = os.O_WRONLY | os.O_CREATE
- readable = false
- case "a", "ab":
- mode = os.O_WRONLY | os.O_APPEND | os.O_CREATE
- case "r+", "rb+":
- mode = os.O_RDWR
- case "w+", "wb+":
- mode = os.O_RDWR | os.O_TRUNC | os.O_CREATE
- case "a+", "ab+":
- mode = os.O_APPEND | os.O_RDWR | os.O_CREATE
- }
- file, err := newFile(L, nil, path, mode, os.FileMode(perm), writable, readable)
- if err != nil {
- L.Push(LNil)
- L.Push(LString(err.Error()))
- L.Push(LNumber(1)) // C-Lua compatibility: Original Lua pushes errno to the stack
- return 3
- }
- L.Push(file)
- return 1
-
-}
-
-var ioPopenOptions = []string{"r", "w"}
-
-func ioPopen(L *LState) int {
- cmd := L.CheckString(1)
- if L.GetTop() == 1 {
- L.Push(LString("r"))
- }
- var file *LUserData
- var err error
-
- switch ioPopenOptions[L.CheckOption(2, ioPopenOptions)] {
- case "r":
- file, err = newProcess(L, cmd, false, true)
- case "w":
- file, err = newProcess(L, cmd, true, false)
- }
- if err != nil {
- L.Push(LNil)
- L.Push(LString(err.Error()))
- return 2
- }
- L.Push(file)
- return 1
-}
-
-func ioRead(L *LState) int {
- return fileReadAux(L, fileDefIn(L).Value.(*lFile), 1)
-}
-
-func ioType(L *LState) int {
- ud, udok := L.Get(1).(*LUserData)
- if !udok {
- L.Push(LNil)
- return 1
- }
- file, ok := ud.Value.(*lFile)
- if !ok {
- L.Push(LNil)
- return 1
- }
- if file.closed {
- L.Push(LString("closed file"))
- return 1
- }
- L.Push(LString("file"))
- return 1
-}
-
-func ioTmpFile(L *LState) int {
- file, err := ioutil.TempFile("", "")
- if err != nil {
- L.Push(LNil)
- L.Push(LString(err.Error()))
- return 2
- }
- L.G.tempFiles = append(L.G.tempFiles, file)
- ud, _ := newFile(L, file, "", 0, os.FileMode(0), true, true)
- L.Push(ud)
- return 1
-}
-
-func ioOutput(L *LState) int {
- if L.GetTop() == 0 {
- L.Push(fileDefOut(L))
- return 1
- }
- switch lv := L.Get(1).(type) {
- case LString:
- file, err := newFile(L, nil, string(lv), os.O_WRONLY|os.O_CREATE, 0600, true, false)
- if err != nil {
- L.RaiseError(err.Error())
- }
- L.Get(UpvalueIndex(1)).(*LTable).RawSetInt(fileDefOutIndex, file)
- L.Push(file)
- return 1
- case *LUserData:
- if _, ok := lv.Value.(*lFile); ok {
- L.Get(UpvalueIndex(1)).(*LTable).RawSetInt(fileDefOutIndex, lv)
- L.Push(lv)
- return 1
- }
-
- }
- L.ArgError(1, "string or file expedted, but got "+L.Get(1).Type().String())
- return 0
-}
-
-func ioWrite(L *LState) int {
- return fileWriteAux(L, fileDefOut(L).Value.(*lFile), 1)
-}
-
-//
diff --git a/vendor/github.com/yuin/gopher-lua/linit.go b/vendor/github.com/yuin/gopher-lua/linit.go
deleted file mode 100644
index cd96d660..00000000
--- a/vendor/github.com/yuin/gopher-lua/linit.go
+++ /dev/null
@@ -1,54 +0,0 @@
-package lua
-
-const (
- // BaseLibName is here for consistency; the base functions have no namespace/library.
- BaseLibName = ""
- // LoadLibName is here for consistency; the loading system has no namespace/library.
- LoadLibName = "package"
- // TabLibName is the name of the table Library.
- TabLibName = "table"
- // IoLibName is the name of the io Library.
- IoLibName = "io"
- // OsLibName is the name of the os Library.
- OsLibName = "os"
- // StringLibName is the name of the string Library.
- StringLibName = "string"
- // MathLibName is the name of the math Library.
- MathLibName = "math"
- // DebugLibName is the name of the debug Library.
- DebugLibName = "debug"
- // ChannelLibName is the name of the channel Library.
- ChannelLibName = "channel"
- // CoroutineLibName is the name of the coroutine Library.
- CoroutineLibName = "coroutine"
-)
-
-type luaLib struct {
- libName string
- libFunc LGFunction
-}
-
-var luaLibs = []luaLib{
- luaLib{LoadLibName, OpenPackage},
- luaLib{BaseLibName, OpenBase},
- luaLib{TabLibName, OpenTable},
- luaLib{IoLibName, OpenIo},
- luaLib{OsLibName, OpenOs},
- luaLib{StringLibName, OpenString},
- luaLib{MathLibName, OpenMath},
- luaLib{DebugLibName, OpenDebug},
- luaLib{ChannelLibName, OpenChannel},
- luaLib{CoroutineLibName, OpenCoroutine},
-}
-
-// OpenLibs loads the built-in libraries. It is equivalent to running OpenLoad,
-// then OpenBase, then iterating over the other OpenXXX functions in any order.
-func (ls *LState) OpenLibs() {
- // NB: Map iteration order in Go is deliberately randomised, so must open Load/Base
- // prior to iterating.
- for _, lib := range luaLibs {
- ls.Push(ls.NewFunction(lib.libFunc))
- ls.Push(LString(lib.libName))
- ls.Call(1, 0)
- }
-}
diff --git a/vendor/github.com/yuin/gopher-lua/loadlib.go b/vendor/github.com/yuin/gopher-lua/loadlib.go
deleted file mode 100644
index 772bb04a..00000000
--- a/vendor/github.com/yuin/gopher-lua/loadlib.go
+++ /dev/null
@@ -1,125 +0,0 @@
-package lua
-
-import (
- "fmt"
- "os"
- "path/filepath"
- "strings"
-)
-
-/* load lib {{{ */
-
-var loLoaders = []LGFunction{loLoaderPreload, loLoaderLua}
-
-func loGetPath(env string, defpath string) string {
- path := os.Getenv(env)
- if len(path) == 0 {
- path = defpath
- }
- path = strings.Replace(path, ";;", ";"+defpath+";", -1)
- if os.PathSeparator != '/' {
- dir, err := filepath.Abs(filepath.Dir(os.Args[0]))
- if err != nil {
- panic(err)
- }
- path = strings.Replace(path, "!", dir, -1)
- }
- return path
-}
-
-func loFindFile(L *LState, name, pname string) (string, string) {
- name = strings.Replace(name, ".", string(os.PathSeparator), -1)
- lv := L.GetField(L.GetField(L.Get(EnvironIndex), "package"), pname)
- path, ok := lv.(LString)
- if !ok {
- L.RaiseError("package.%s must be a string", pname)
- }
- messages := []string{}
- for _, pattern := range strings.Split(string(path), ";") {
- luapath := strings.Replace(pattern, "?", name, -1)
- if _, err := os.Stat(luapath); err == nil {
- return luapath, ""
- } else {
- messages = append(messages, err.Error())
- }
- }
- return "", strings.Join(messages, "\n\t")
-}
-
-func OpenPackage(L *LState) int {
- packagemod := L.RegisterModule(LoadLibName, loFuncs)
-
- L.SetField(packagemod, "preload", L.NewTable())
-
- loaders := L.CreateTable(len(loLoaders), 0)
- for i, loader := range loLoaders {
- L.RawSetInt(loaders, i+1, L.NewFunction(loader))
- }
- L.SetField(packagemod, "loaders", loaders)
- L.SetField(L.Get(RegistryIndex), "_LOADERS", loaders)
-
- loaded := L.NewTable()
- L.SetField(packagemod, "loaded", loaded)
- L.SetField(L.Get(RegistryIndex), "_LOADED", loaded)
-
- L.SetField(packagemod, "path", LString(loGetPath(LuaPath, LuaPathDefault)))
- L.SetField(packagemod, "cpath", emptyLString)
-
- L.Push(packagemod)
- return 1
-}
-
-var loFuncs = map[string]LGFunction{
- "loadlib": loLoadLib,
- "seeall": loSeeAll,
-}
-
-func loLoaderPreload(L *LState) int {
- name := L.CheckString(1)
- preload := L.GetField(L.GetField(L.Get(EnvironIndex), "package"), "preload")
- if _, ok := preload.(*LTable); !ok {
- L.RaiseError("package.preload must be a table")
- }
- lv := L.GetField(preload, name)
- if lv == LNil {
- L.Push(LString(fmt.Sprintf("no field package.preload['%s']", name)))
- return 1
- }
- L.Push(lv)
- return 1
-}
-
-func loLoaderLua(L *LState) int {
- name := L.CheckString(1)
- path, msg := loFindFile(L, name, "path")
- if len(path) == 0 {
- L.Push(LString(msg))
- return 1
- }
- fn, err1 := L.LoadFile(path)
- if err1 != nil {
- L.RaiseError(err1.Error())
- }
- L.Push(fn)
- return 1
-}
-
-func loLoadLib(L *LState) int {
- L.RaiseError("loadlib is not supported")
- return 0
-}
-
-func loSeeAll(L *LState) int {
- mod := L.CheckTable(1)
- mt := L.GetMetatable(mod)
- if mt == LNil {
- mt = L.CreateTable(0, 1)
- L.SetMetatable(mod, mt)
- }
- L.SetField(mt, "__index", L.Get(GlobalsIndex))
- return 0
-}
-
-/* }}} */
-
-//
diff --git a/vendor/github.com/yuin/gopher-lua/mathlib.go b/vendor/github.com/yuin/gopher-lua/mathlib.go
deleted file mode 100644
index e612f2f0..00000000
--- a/vendor/github.com/yuin/gopher-lua/mathlib.go
+++ /dev/null
@@ -1,231 +0,0 @@
-package lua
-
-import (
- "math"
- "math/rand"
-)
-
-func OpenMath(L *LState) int {
- mod := L.RegisterModule(MathLibName, mathFuncs).(*LTable)
- mod.RawSetString("pi", LNumber(math.Pi))
- mod.RawSetString("huge", LNumber(math.MaxFloat64))
- L.Push(mod)
- return 1
-}
-
-var mathFuncs = map[string]LGFunction{
- "abs": mathAbs,
- "acos": mathAcos,
- "asin": mathAsin,
- "atan": mathAtan,
- "atan2": mathAtan2,
- "ceil": mathCeil,
- "cos": mathCos,
- "cosh": mathCosh,
- "deg": mathDeg,
- "exp": mathExp,
- "floor": mathFloor,
- "fmod": mathFmod,
- "frexp": mathFrexp,
- "ldexp": mathLdexp,
- "log": mathLog,
- "log10": mathLog10,
- "max": mathMax,
- "min": mathMin,
- "mod": mathMod,
- "modf": mathModf,
- "pow": mathPow,
- "rad": mathRad,
- "random": mathRandom,
- "randomseed": mathRandomseed,
- "sin": mathSin,
- "sinh": mathSinh,
- "sqrt": mathSqrt,
- "tan": mathTan,
- "tanh": mathTanh,
-}
-
-func mathAbs(L *LState) int {
- L.Push(LNumber(math.Abs(float64(L.CheckNumber(1)))))
- return 1
-}
-
-func mathAcos(L *LState) int {
- L.Push(LNumber(math.Acos(float64(L.CheckNumber(1)))))
- return 1
-}
-
-func mathAsin(L *LState) int {
- L.Push(LNumber(math.Asin(float64(L.CheckNumber(1)))))
- return 1
-}
-
-func mathAtan(L *LState) int {
- L.Push(LNumber(math.Atan(float64(L.CheckNumber(1)))))
- return 1
-}
-
-func mathAtan2(L *LState) int {
- L.Push(LNumber(math.Atan2(float64(L.CheckNumber(1)), float64(L.CheckNumber(2)))))
- return 1
-}
-
-func mathCeil(L *LState) int {
- L.Push(LNumber(math.Ceil(float64(L.CheckNumber(1)))))
- return 1
-}
-
-func mathCos(L *LState) int {
- L.Push(LNumber(math.Cos(float64(L.CheckNumber(1)))))
- return 1
-}
-
-func mathCosh(L *LState) int {
- L.Push(LNumber(math.Cosh(float64(L.CheckNumber(1)))))
- return 1
-}
-
-func mathDeg(L *LState) int {
- L.Push(LNumber(float64(L.CheckNumber(1)) * 180 / math.Pi))
- return 1
-}
-
-func mathExp(L *LState) int {
- L.Push(LNumber(math.Exp(float64(L.CheckNumber(1)))))
- return 1
-}
-
-func mathFloor(L *LState) int {
- L.Push(LNumber(math.Floor(float64(L.CheckNumber(1)))))
- return 1
-}
-
-func mathFmod(L *LState) int {
- L.Push(LNumber(math.Mod(float64(L.CheckNumber(1)), float64(L.CheckNumber(2)))))
- return 1
-}
-
-func mathFrexp(L *LState) int {
- v1, v2 := math.Frexp(float64(L.CheckNumber(1)))
- L.Push(LNumber(v1))
- L.Push(LNumber(v2))
- return 2
-}
-
-func mathLdexp(L *LState) int {
- L.Push(LNumber(math.Ldexp(float64(L.CheckNumber(1)), L.CheckInt(2))))
- return 1
-}
-
-func mathLog(L *LState) int {
- L.Push(LNumber(math.Log(float64(L.CheckNumber(1)))))
- return 1
-}
-
-func mathLog10(L *LState) int {
- L.Push(LNumber(math.Log10(float64(L.CheckNumber(1)))))
- return 1
-}
-
-func mathMax(L *LState) int {
- if L.GetTop() == 0 {
- L.RaiseError("wrong number of arguments")
- }
- max := L.CheckNumber(1)
- top := L.GetTop()
- for i := 2; i <= top; i++ {
- v := L.CheckNumber(i)
- if v > max {
- max = v
- }
- }
- L.Push(max)
- return 1
-}
-
-func mathMin(L *LState) int {
- if L.GetTop() == 0 {
- L.RaiseError("wrong number of arguments")
- }
- min := L.CheckNumber(1)
- top := L.GetTop()
- for i := 2; i <= top; i++ {
- v := L.CheckNumber(i)
- if v < min {
- min = v
- }
- }
- L.Push(min)
- return 1
-}
-
-func mathMod(L *LState) int {
- lhs := L.CheckNumber(1)
- rhs := L.CheckNumber(2)
- L.Push(luaModulo(lhs, rhs))
- return 1
-}
-
-func mathModf(L *LState) int {
- v1, v2 := math.Modf(float64(L.CheckNumber(1)))
- L.Push(LNumber(v1))
- L.Push(LNumber(v2))
- return 2
-}
-
-func mathPow(L *LState) int {
- L.Push(LNumber(math.Pow(float64(L.CheckNumber(1)), float64(L.CheckNumber(2)))))
- return 1
-}
-
-func mathRad(L *LState) int {
- L.Push(LNumber(float64(L.CheckNumber(1)) * math.Pi / 180))
- return 1
-}
-
-func mathRandom(L *LState) int {
- switch L.GetTop() {
- case 0:
- L.Push(LNumber(rand.Float64()))
- case 1:
- n := L.CheckInt(1)
- L.Push(LNumber(rand.Intn(n) + 1))
- default:
- min := L.CheckInt(1)
- max := L.CheckInt(2) + 1
- L.Push(LNumber(rand.Intn(max-min) + min))
- }
- return 1
-}
-
-func mathRandomseed(L *LState) int {
- rand.Seed(L.CheckInt64(1))
- return 0
-}
-
-func mathSin(L *LState) int {
- L.Push(LNumber(math.Sin(float64(L.CheckNumber(1)))))
- return 1
-}
-
-func mathSinh(L *LState) int {
- L.Push(LNumber(math.Sinh(float64(L.CheckNumber(1)))))
- return 1
-}
-
-func mathSqrt(L *LState) int {
- L.Push(LNumber(math.Sqrt(float64(L.CheckNumber(1)))))
- return 1
-}
-
-func mathTan(L *LState) int {
- L.Push(LNumber(math.Tan(float64(L.CheckNumber(1)))))
- return 1
-}
-
-func mathTanh(L *LState) int {
- L.Push(LNumber(math.Tanh(float64(L.CheckNumber(1)))))
- return 1
-}
-
-//
diff --git a/vendor/github.com/yuin/gopher-lua/opcode.go b/vendor/github.com/yuin/gopher-lua/opcode.go
deleted file mode 100644
index 91fff1c9..00000000
--- a/vendor/github.com/yuin/gopher-lua/opcode.go
+++ /dev/null
@@ -1,371 +0,0 @@
-package lua
-
-import (
- "fmt"
-)
-
-/*
- gopherlua uses Lua 5.1.4's opcodes.
- Lua 5.1.4 opcodes layout:
-
- instruction = 32bit(fixed length)
-
- +---------------------------------------------+
- |0-5(6bits)|6-13(8bit)|14-22(9bit)|23-31(9bit)|
- |==========+==========+===========+===========|
- | opcode | A | C | B |
- |----------+----------+-----------+-----------|
- | opcode | A | Bx(unsigned) |
- |----------+----------+-----------+-----------|
- | opcode | A | sBx(signed) |
- +---------------------------------------------+
-*/
-
-const opInvalidInstruction = ^uint32(0)
-
-const opSizeCode = 6
-const opSizeA = 8
-const opSizeB = 9
-const opSizeC = 9
-const opSizeBx = 18
-const opSizesBx = 18
-
-const opMaxArgsA = (1 << opSizeA) - 1
-const opMaxArgsB = (1 << opSizeB) - 1
-const opMaxArgsC = (1 << opSizeC) - 1
-const opMaxArgBx = (1 << opSizeBx) - 1
-const opMaxArgSbx = opMaxArgBx >> 1
-
-const (
- OP_MOVE int = iota /* A B R(A) := R(B) */
- OP_MOVEN /* A B R(A) := R(B); followed by R(C) MOVE ops */
- OP_LOADK /* A Bx R(A) := Kst(Bx) */
- OP_LOADBOOL /* A B C R(A) := (Bool)B; if (C) pc++ */
- OP_LOADNIL /* A B R(A) := ... := R(B) := nil */
- OP_GETUPVAL /* A B R(A) := UpValue[B] */
-
- OP_GETGLOBAL /* A Bx R(A) := Gbl[Kst(Bx)] */
- OP_GETTABLE /* A B C R(A) := R(B)[RK(C)] */
- OP_GETTABLEKS /* A B C R(A) := R(B)[RK(C)] ; RK(C) is constant string */
-
- OP_SETGLOBAL /* A Bx Gbl[Kst(Bx)] := R(A) */
- OP_SETUPVAL /* A B UpValue[B] := R(A) */
- OP_SETTABLE /* A B C R(A)[RK(B)] := RK(C) */
- OP_SETTABLEKS /* A B C R(A)[RK(B)] := RK(C) ; RK(B) is constant string */
-
- OP_NEWTABLE /* A B C R(A) := {} (size = BC) */
-
- OP_SELF /* A B C R(A+1) := R(B); R(A) := R(B)[RK(C)] */
-
- OP_ADD /* A B C R(A) := RK(B) + RK(C) */
- OP_SUB /* A B C R(A) := RK(B) - RK(C) */
- OP_MUL /* A B C R(A) := RK(B) * RK(C) */
- OP_DIV /* A B C R(A) := RK(B) / RK(C) */
- OP_MOD /* A B C R(A) := RK(B) % RK(C) */
- OP_POW /* A B C R(A) := RK(B) ^ RK(C) */
- OP_UNM /* A B R(A) := -R(B) */
- OP_NOT /* A B R(A) := not R(B) */
- OP_LEN /* A B R(A) := length of R(B) */
-
- OP_CONCAT /* A B C R(A) := R(B).. ... ..R(C) */
-
- OP_JMP /* sBx pc+=sBx */
-
- OP_EQ /* A B C if ((RK(B) == RK(C)) ~= A) then pc++ */
- OP_LT /* A B C if ((RK(B) < RK(C)) ~= A) then pc++ */
- OP_LE /* A B C if ((RK(B) <= RK(C)) ~= A) then pc++ */
-
- OP_TEST /* A C if not (R(A) <=> C) then pc++ */
- OP_TESTSET /* A B C if (R(B) <=> C) then R(A) := R(B) else pc++ */
-
- OP_CALL /* A B C R(A) ... R(A+C-2) := R(A)(R(A+1) ... R(A+B-1)) */
- OP_TAILCALL /* A B C return R(A)(R(A+1) ... R(A+B-1)) */
- OP_RETURN /* A B return R(A) ... R(A+B-2) (see note) */
-
- OP_FORLOOP /* A sBx R(A)+=R(A+2);
- if R(A) = R(A+1) then { pc+=sBx; R(A+3)=R(A) }*/
- OP_FORPREP /* A sBx R(A)-=R(A+2); pc+=sBx */
-
- OP_TFORLOOP /* A C R(A+3) ... R(A+3+C) := R(A)(R(A+1) R(A+2));
- if R(A+3) ~= nil then { pc++; R(A+2)=R(A+3); } */
- OP_SETLIST /* A B C R(A)[(C-1)*FPF+i] := R(A+i) 1 <= i <= B */
-
- OP_CLOSE /* A close all variables in the stack up to (>=) R(A)*/
- OP_CLOSURE /* A Bx R(A) := closure(KPROTO[Bx] R(A) ... R(A+n)) */
-
- OP_VARARG /* A B R(A) R(A+1) ... R(A+B-1) = vararg */
-
- OP_NOP /* NOP */
-)
-const opCodeMax = OP_NOP
-
-type opArgMode int
-
-const (
- opArgModeN opArgMode = iota
- opArgModeU
- opArgModeR
- opArgModeK
-)
-
-type opType int
-
-const (
- opTypeABC = iota
- opTypeABx
- opTypeASbx
-)
-
-type opProp struct {
- Name string
- IsTest bool
- SetRegA bool
- ModeArgB opArgMode
- ModeArgC opArgMode
- Type opType
-}
-
-var opProps = []opProp{
- opProp{"MOVE", false, true, opArgModeR, opArgModeN, opTypeABC},
- opProp{"MOVEN", false, true, opArgModeR, opArgModeN, opTypeABC},
- opProp{"LOADK", false, true, opArgModeK, opArgModeN, opTypeABx},
- opProp{"LOADBOOL", false, true, opArgModeU, opArgModeU, opTypeABC},
- opProp{"LOADNIL", false, true, opArgModeR, opArgModeN, opTypeABC},
- opProp{"GETUPVAL", false, true, opArgModeU, opArgModeN, opTypeABC},
- opProp{"GETGLOBAL", false, true, opArgModeK, opArgModeN, opTypeABx},
- opProp{"GETTABLE", false, true, opArgModeR, opArgModeK, opTypeABC},
- opProp{"GETTABLEKS", false, true, opArgModeR, opArgModeK, opTypeABC},
- opProp{"SETGLOBAL", false, false, opArgModeK, opArgModeN, opTypeABx},
- opProp{"SETUPVAL", false, false, opArgModeU, opArgModeN, opTypeABC},
- opProp{"SETTABLE", false, false, opArgModeK, opArgModeK, opTypeABC},
- opProp{"SETTABLEKS", false, false, opArgModeK, opArgModeK, opTypeABC},
- opProp{"NEWTABLE", false, true, opArgModeU, opArgModeU, opTypeABC},
- opProp{"SELF", false, true, opArgModeR, opArgModeK, opTypeABC},
- opProp{"ADD", false, true, opArgModeK, opArgModeK, opTypeABC},
- opProp{"SUB", false, true, opArgModeK, opArgModeK, opTypeABC},
- opProp{"MUL", false, true, opArgModeK, opArgModeK, opTypeABC},
- opProp{"DIV", false, true, opArgModeK, opArgModeK, opTypeABC},
- opProp{"MOD", false, true, opArgModeK, opArgModeK, opTypeABC},
- opProp{"POW", false, true, opArgModeK, opArgModeK, opTypeABC},
- opProp{"UNM", false, true, opArgModeR, opArgModeN, opTypeABC},
- opProp{"NOT", false, true, opArgModeR, opArgModeN, opTypeABC},
- opProp{"LEN", false, true, opArgModeR, opArgModeN, opTypeABC},
- opProp{"CONCAT", false, true, opArgModeR, opArgModeR, opTypeABC},
- opProp{"JMP", false, false, opArgModeR, opArgModeN, opTypeASbx},
- opProp{"EQ", true, false, opArgModeK, opArgModeK, opTypeABC},
- opProp{"LT", true, false, opArgModeK, opArgModeK, opTypeABC},
- opProp{"LE", true, false, opArgModeK, opArgModeK, opTypeABC},
- opProp{"TEST", true, true, opArgModeR, opArgModeU, opTypeABC},
- opProp{"TESTSET", true, true, opArgModeR, opArgModeU, opTypeABC},
- opProp{"CALL", false, true, opArgModeU, opArgModeU, opTypeABC},
- opProp{"TAILCALL", false, true, opArgModeU, opArgModeU, opTypeABC},
- opProp{"RETURN", false, false, opArgModeU, opArgModeN, opTypeABC},
- opProp{"FORLOOP", false, true, opArgModeR, opArgModeN, opTypeASbx},
- opProp{"FORPREP", false, true, opArgModeR, opArgModeN, opTypeASbx},
- opProp{"TFORLOOP", true, false, opArgModeN, opArgModeU, opTypeABC},
- opProp{"SETLIST", false, false, opArgModeU, opArgModeU, opTypeABC},
- opProp{"CLOSE", false, false, opArgModeN, opArgModeN, opTypeABC},
- opProp{"CLOSURE", false, true, opArgModeU, opArgModeN, opTypeABx},
- opProp{"VARARG", false, true, opArgModeU, opArgModeN, opTypeABC},
- opProp{"NOP", false, false, opArgModeR, opArgModeN, opTypeASbx},
-}
-
-func opGetOpCode(inst uint32) int {
- return int(inst >> 26)
-}
-
-func opSetOpCode(inst *uint32, opcode int) {
- *inst = (*inst & 0x3ffffff) | uint32(opcode<<26)
-}
-
-func opGetArgA(inst uint32) int {
- return int(inst>>18) & 0xff
-}
-
-func opSetArgA(inst *uint32, arg int) {
- *inst = (*inst & 0xfc03ffff) | uint32((arg&0xff)<<18)
-}
-
-func opGetArgB(inst uint32) int {
- return int(inst & 0x1ff)
-}
-
-func opSetArgB(inst *uint32, arg int) {
- *inst = (*inst & 0xfffffe00) | uint32(arg&0x1ff)
-}
-
-func opGetArgC(inst uint32) int {
- return int(inst>>9) & 0x1ff
-}
-
-func opSetArgC(inst *uint32, arg int) {
- *inst = (*inst & 0xfffc01ff) | uint32((arg&0x1ff)<<9)
-}
-
-func opGetArgBx(inst uint32) int {
- return int(inst & 0x3ffff)
-}
-
-func opSetArgBx(inst *uint32, arg int) {
- *inst = (*inst & 0xfffc0000) | uint32(arg&0x3ffff)
-}
-
-func opGetArgSbx(inst uint32) int {
- return opGetArgBx(inst) - opMaxArgSbx
-}
-
-func opSetArgSbx(inst *uint32, arg int) {
- opSetArgBx(inst, arg+opMaxArgSbx)
-}
-
-func opCreateABC(op int, a int, b int, c int) uint32 {
- var inst uint32 = 0
- opSetOpCode(&inst, op)
- opSetArgA(&inst, a)
- opSetArgB(&inst, b)
- opSetArgC(&inst, c)
- return inst
-}
-
-func opCreateABx(op int, a int, bx int) uint32 {
- var inst uint32 = 0
- opSetOpCode(&inst, op)
- opSetArgA(&inst, a)
- opSetArgBx(&inst, bx)
- return inst
-}
-
-func opCreateASbx(op int, a int, sbx int) uint32 {
- var inst uint32 = 0
- opSetOpCode(&inst, op)
- opSetArgA(&inst, a)
- opSetArgSbx(&inst, sbx)
- return inst
-}
-
-const opBitRk = 1 << (opSizeB - 1)
-const opMaxIndexRk = opBitRk - 1
-
-func opIsK(value int) bool {
- return bool((value & opBitRk) != 0)
-}
-
-func opIndexK(value int) int {
- return value & ^opBitRk
-}
-
-func opRkAsk(value int) int {
- return value | opBitRk
-}
-
-func opToString(inst uint32) string {
- op := opGetOpCode(inst)
- if op > opCodeMax {
- return ""
- }
- prop := &(opProps[op])
-
- arga := opGetArgA(inst)
- argb := opGetArgB(inst)
- argc := opGetArgC(inst)
- argbx := opGetArgBx(inst)
- argsbx := opGetArgSbx(inst)
-
- buf := ""
- switch prop.Type {
- case opTypeABC:
- buf = fmt.Sprintf("%s | %d, %d, %d", prop.Name, arga, argb, argc)
- case opTypeABx:
- buf = fmt.Sprintf("%s | %d, %d", prop.Name, arga, argbx)
- case opTypeASbx:
- buf = fmt.Sprintf("%s | %d, %d", prop.Name, arga, argsbx)
- }
-
- switch op {
- case OP_MOVE:
- buf += fmt.Sprintf("; R(%v) := R(%v)", arga, argb)
- case OP_MOVEN:
- buf += fmt.Sprintf("; R(%v) := R(%v); followed by %v MOVE ops", arga, argb, argc)
- case OP_LOADK:
- buf += fmt.Sprintf("; R(%v) := Kst(%v)", arga, argbx)
- case OP_LOADBOOL:
- buf += fmt.Sprintf("; R(%v) := (Bool)%v; if (%v) pc++", arga, argb, argc)
- case OP_LOADNIL:
- buf += fmt.Sprintf("; R(%v) := ... := R(%v) := nil", arga, argb)
- case OP_GETUPVAL:
- buf += fmt.Sprintf("; R(%v) := UpValue[%v]", arga, argb)
- case OP_GETGLOBAL:
- buf += fmt.Sprintf("; R(%v) := Gbl[Kst(%v)]", arga, argbx)
- case OP_GETTABLE:
- buf += fmt.Sprintf("; R(%v) := R(%v)[RK(%v)]", arga, argb, argc)
- case OP_GETTABLEKS:
- buf += fmt.Sprintf("; R(%v) := R(%v)[RK(%v)] ; RK(%v) is constant string", arga, argb, argc, argc)
- case OP_SETGLOBAL:
- buf += fmt.Sprintf("; Gbl[Kst(%v)] := R(%v)", argbx, arga)
- case OP_SETUPVAL:
- buf += fmt.Sprintf("; UpValue[%v] := R(%v)", argb, arga)
- case OP_SETTABLE:
- buf += fmt.Sprintf("; R(%v)[RK(%v)] := RK(%v)", arga, argb, argc)
- case OP_SETTABLEKS:
- buf += fmt.Sprintf("; R(%v)[RK(%v)] := RK(%v) ; RK(%v) is constant string", arga, argb, argc, argb)
- case OP_NEWTABLE:
- buf += fmt.Sprintf("; R(%v) := {} (size = BC)", arga)
- case OP_SELF:
- buf += fmt.Sprintf("; R(%v+1) := R(%v); R(%v) := R(%v)[RK(%v)]", arga, argb, arga, argb, argc)
- case OP_ADD:
- buf += fmt.Sprintf("; R(%v) := RK(%v) + RK(%v)", arga, argb, argc)
- case OP_SUB:
- buf += fmt.Sprintf("; R(%v) := RK(%v) - RK(%v)", arga, argb, argc)
- case OP_MUL:
- buf += fmt.Sprintf("; R(%v) := RK(%v) * RK(%v)", arga, argb, argc)
- case OP_DIV:
- buf += fmt.Sprintf("; R(%v) := RK(%v) / RK(%v)", arga, argb, argc)
- case OP_MOD:
- buf += fmt.Sprintf("; R(%v) := RK(%v) %% RK(%v)", arga, argb, argc)
- case OP_POW:
- buf += fmt.Sprintf("; R(%v) := RK(%v) ^ RK(%v)", arga, argb, argc)
- case OP_UNM:
- buf += fmt.Sprintf("; R(%v) := -R(%v)", arga, argb)
- case OP_NOT:
- buf += fmt.Sprintf("; R(%v) := not R(%v)", arga, argb)
- case OP_LEN:
- buf += fmt.Sprintf("; R(%v) := length of R(%v)", arga, argb)
- case OP_CONCAT:
- buf += fmt.Sprintf("; R(%v) := R(%v).. ... ..R(%v)", arga, argb, argc)
- case OP_JMP:
- buf += fmt.Sprintf("; pc+=%v", argsbx)
- case OP_EQ:
- buf += fmt.Sprintf("; if ((RK(%v) == RK(%v)) ~= %v) then pc++", argb, argc, arga)
- case OP_LT:
- buf += fmt.Sprintf("; if ((RK(%v) < RK(%v)) ~= %v) then pc++", argb, argc, arga)
- case OP_LE:
- buf += fmt.Sprintf("; if ((RK(%v) <= RK(%v)) ~= %v) then pc++", argb, argc, arga)
- case OP_TEST:
- buf += fmt.Sprintf("; if not (R(%v) <=> %v) then pc++", arga, argc)
- case OP_TESTSET:
- buf += fmt.Sprintf("; if (R(%v) <=> %v) then R(%v) := R(%v) else pc++", argb, argc, arga, argb)
- case OP_CALL:
- buf += fmt.Sprintf("; R(%v) ... R(%v+%v-2) := R(%v)(R(%v+1) ... R(%v+%v-1))", arga, arga, argc, arga, arga, arga, argb)
- case OP_TAILCALL:
- buf += fmt.Sprintf("; return R(%v)(R(%v+1) ... R(%v+%v-1))", arga, arga, arga, argb)
- case OP_RETURN:
- buf += fmt.Sprintf("; return R(%v) ... R(%v+%v-2)", arga, arga, argb)
- case OP_FORLOOP:
- buf += fmt.Sprintf("; R(%v)+=R(%v+2); if R(%v) = R(%v+1) then { pc+=%v; R(%v+3)=R(%v) }", arga, arga, arga, arga, argsbx, arga, arga)
- case OP_FORPREP:
- buf += fmt.Sprintf("; R(%v)-=R(%v+2); pc+=%v", arga, arga, argsbx)
- case OP_TFORLOOP:
- buf += fmt.Sprintf("; R(%v+3) ... R(%v+3+%v) := R(%v)(R(%v+1) R(%v+2)); if R(%v+3) ~= nil then { pc++; R(%v+2)=R(%v+3); }", arga, arga, argc, arga, arga, arga, arga, arga, arga)
- case OP_SETLIST:
- buf += fmt.Sprintf("; R(%v)[(%v-1)*FPF+i] := R(%v+i) 1 <= i <= %v", arga, argc, arga, argb)
- case OP_CLOSE:
- buf += fmt.Sprintf("; close all variables in the stack up to (>=) R(%v)", arga)
- case OP_CLOSURE:
- buf += fmt.Sprintf("; R(%v) := closure(KPROTO[%v] R(%v) ... R(%v+n))", arga, argbx, arga, arga)
- case OP_VARARG:
- buf += fmt.Sprintf("; R(%v) R(%v+1) ... R(%v+%v-1) = vararg", arga, arga, arga, argb)
- case OP_NOP:
- /* nothing to do */
- }
- return buf
-}
diff --git a/vendor/github.com/yuin/gopher-lua/oslib.go b/vendor/github.com/yuin/gopher-lua/oslib.go
deleted file mode 100644
index c70a99bf..00000000
--- a/vendor/github.com/yuin/gopher-lua/oslib.go
+++ /dev/null
@@ -1,221 +0,0 @@
-package lua
-
-import (
- "io/ioutil"
- "os"
- "strings"
- "time"
-)
-
-var startedAt time.Time
-
-func init() {
- startedAt = time.Now()
-}
-
-func getIntField(L *LState, tb *LTable, key string, v int) int {
- ret := tb.RawGetString(key)
-
- switch lv := ret.(type) {
- case LNumber:
- return int(lv)
- case LString:
- slv := string(lv)
- slv = strings.TrimLeft(slv, " ")
- if strings.HasPrefix(slv, "0") && !strings.HasPrefix(slv, "0x") && !strings.HasPrefix(slv, "0X") {
- //Standard lua interpreter only support decimal and hexadecimal
- slv = strings.TrimLeft(slv, "0")
- }
- if num, err := parseNumber(slv); err == nil {
- return int(num)
- }
- default:
- return v
- }
-
- return v
-}
-
-func getBoolField(L *LState, tb *LTable, key string, v bool) bool {
- ret := tb.RawGetString(key)
- if lb, ok := ret.(LBool); ok {
- return bool(lb)
- }
- return v
-}
-
-func OpenOs(L *LState) int {
- osmod := L.RegisterModule(OsLibName, osFuncs)
- L.Push(osmod)
- return 1
-}
-
-var osFuncs = map[string]LGFunction{
- "clock": osClock,
- "difftime": osDiffTime,
- "execute": osExecute,
- "exit": osExit,
- "date": osDate,
- "getenv": osGetEnv,
- "remove": osRemove,
- "rename": osRename,
- "setenv": osSetEnv,
- "setlocale": osSetLocale,
- "time": osTime,
- "tmpname": osTmpname,
-}
-
-func osClock(L *LState) int {
- L.Push(LNumber(float64(time.Now().Sub(startedAt)) / float64(time.Second)))
- return 1
-}
-
-func osDiffTime(L *LState) int {
- L.Push(LNumber(L.CheckInt64(1) - L.CheckInt64(2)))
- return 1
-}
-
-func osExecute(L *LState) int {
- var procAttr os.ProcAttr
- procAttr.Files = []*os.File{os.Stdin, os.Stdout, os.Stderr}
- cmd, args := popenArgs(L.CheckString(1))
- args = append([]string{cmd}, args...)
- process, err := os.StartProcess(cmd, args, &procAttr)
- if err != nil {
- L.Push(LNumber(1))
- return 1
- }
-
- ps, err := process.Wait()
- if err != nil || !ps.Success() {
- L.Push(LNumber(1))
- return 1
- }
- L.Push(LNumber(0))
- return 1
-}
-
-func osExit(L *LState) int {
- L.Close()
- os.Exit(L.OptInt(1, 0))
- return 1
-}
-
-func osDate(L *LState) int {
- t := time.Now()
- cfmt := "%c"
- if L.GetTop() >= 1 {
- cfmt = L.CheckString(1)
- if strings.HasPrefix(cfmt, "!") {
- t = time.Now().UTC()
- cfmt = strings.TrimLeft(cfmt, "!")
- }
- if L.GetTop() >= 2 {
- t = time.Unix(L.CheckInt64(2), 0)
- }
- if strings.HasPrefix(cfmt, "*t") {
- ret := L.NewTable()
- ret.RawSetString("year", LNumber(t.Year()))
- ret.RawSetString("month", LNumber(t.Month()))
- ret.RawSetString("day", LNumber(t.Day()))
- ret.RawSetString("hour", LNumber(t.Hour()))
- ret.RawSetString("min", LNumber(t.Minute()))
- ret.RawSetString("sec", LNumber(t.Second()))
- ret.RawSetString("wday", LNumber(t.Weekday()+1))
- // TODO yday & dst
- ret.RawSetString("yday", LNumber(0))
- ret.RawSetString("isdst", LFalse)
- L.Push(ret)
- return 1
- }
- }
- L.Push(LString(strftime(t, cfmt)))
- return 1
-}
-
-func osGetEnv(L *LState) int {
- v := os.Getenv(L.CheckString(1))
- if len(v) == 0 {
- L.Push(LNil)
- } else {
- L.Push(LString(v))
- }
- return 1
-}
-
-func osRemove(L *LState) int {
- err := os.Remove(L.CheckString(1))
- if err != nil {
- L.Push(LNil)
- L.Push(LString(err.Error()))
- return 2
- } else {
- L.Push(LTrue)
- return 1
- }
-}
-
-func osRename(L *LState) int {
- err := os.Rename(L.CheckString(1), L.CheckString(2))
- if err != nil {
- L.Push(LNil)
- L.Push(LString(err.Error()))
- return 2
- } else {
- L.Push(LTrue)
- return 1
- }
-}
-
-func osSetLocale(L *LState) int {
- // setlocale is not supported
- L.Push(LFalse)
- return 1
-}
-
-func osSetEnv(L *LState) int {
- err := os.Setenv(L.CheckString(1), L.CheckString(2))
- if err != nil {
- L.Push(LNil)
- L.Push(LString(err.Error()))
- return 2
- } else {
- L.Push(LTrue)
- return 1
- }
-}
-
-func osTime(L *LState) int {
- if L.GetTop() == 0 {
- L.Push(LNumber(time.Now().Unix()))
- } else {
- tbl := L.CheckTable(1)
- sec := getIntField(L, tbl, "sec", 0)
- min := getIntField(L, tbl, "min", 0)
- hour := getIntField(L, tbl, "hour", 12)
- day := getIntField(L, tbl, "day", -1)
- month := getIntField(L, tbl, "month", -1)
- year := getIntField(L, tbl, "year", -1)
- isdst := getBoolField(L, tbl, "isdst", false)
- t := time.Date(year, time.Month(month), day, hour, min, sec, 0, time.Local)
- // TODO dst
- if false {
- print(isdst)
- }
- L.Push(LNumber(t.Unix()))
- }
- return 1
-}
-
-func osTmpname(L *LState) int {
- file, err := ioutil.TempFile("", "")
- if err != nil {
- L.RaiseError("unable to generate a unique filename")
- }
- file.Close()
- os.Remove(file.Name()) // ignore errors
- L.Push(LString(file.Name()))
- return 1
-}
-
-//
diff --git a/vendor/github.com/yuin/gopher-lua/package.go b/vendor/github.com/yuin/gopher-lua/package.go
deleted file mode 100644
index 9fde3f0c..00000000
--- a/vendor/github.com/yuin/gopher-lua/package.go
+++ /dev/null
@@ -1,7 +0,0 @@
-// GopherLua: VM and compiler for Lua in Go
-package lua
-
-const PackageName = "GopherLua"
-const PackageVersion = "0.1"
-const PackageAuthors = "Yusuke Inuzuka"
-const PackageCopyRight = PackageName + " " + PackageVersion + " Copyright (C) 2015 -2017 " + PackageAuthors
diff --git a/vendor/github.com/yuin/gopher-lua/parse/Makefile b/vendor/github.com/yuin/gopher-lua/parse/Makefile
deleted file mode 100644
index 6dd048c1..00000000
--- a/vendor/github.com/yuin/gopher-lua/parse/Makefile
+++ /dev/null
@@ -1,4 +0,0 @@
-all : parser.go
-
-parser.go : parser.go.y
- goyacc -o $@ parser.go.y; [ -f y.output ] && ( rm -f y.output )
diff --git a/vendor/github.com/yuin/gopher-lua/parse/lexer.go b/vendor/github.com/yuin/gopher-lua/parse/lexer.go
deleted file mode 100644
index d711e78b..00000000
--- a/vendor/github.com/yuin/gopher-lua/parse/lexer.go
+++ /dev/null
@@ -1,539 +0,0 @@
-package parse
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "github.com/yuin/gopher-lua/ast"
- "io"
- "reflect"
- "strconv"
- "strings"
-)
-
-const EOF = -1
-const whitespace1 = 1<<'\t' | 1<<' '
-const whitespace2 = 1<<'\t' | 1<<'\n' | 1<<'\r' | 1<<' '
-
-type Error struct {
- Pos ast.Position
- Message string
- Token string
-}
-
-func (e *Error) Error() string {
- pos := e.Pos
- if pos.Line == EOF {
- return fmt.Sprintf("%v at EOF: %s\n", pos.Source, e.Message)
- } else {
- return fmt.Sprintf("%v line:%d(column:%d) near '%v': %s\n", pos.Source, pos.Line, pos.Column, e.Token, e.Message)
- }
-}
-
-func writeChar(buf *bytes.Buffer, c int) { buf.WriteByte(byte(c)) }
-
-func isDecimal(ch int) bool { return '0' <= ch && ch <= '9' }
-
-func isIdent(ch int, pos int) bool {
- return ch == '_' || 'A' <= ch && ch <= 'Z' || 'a' <= ch && ch <= 'z' || isDecimal(ch) && pos > 0
-}
-
-func isDigit(ch int) bool {
- return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F'
-}
-
-type Scanner struct {
- Pos ast.Position
- reader *bufio.Reader
-}
-
-func NewScanner(reader io.Reader, source string) *Scanner {
- return &Scanner{
- Pos: ast.Position{
- Source: source,
- Line: 1,
- Column: 0,
- },
- reader: bufio.NewReaderSize(reader, 4096),
- }
-}
-
-func (sc *Scanner) Error(tok string, msg string) *Error { return &Error{sc.Pos, msg, tok} }
-
-func (sc *Scanner) TokenError(tok ast.Token, msg string) *Error { return &Error{tok.Pos, msg, tok.Str} }
-
-func (sc *Scanner) readNext() int {
- ch, err := sc.reader.ReadByte()
- if err == io.EOF {
- return EOF
- }
- return int(ch)
-}
-
-func (sc *Scanner) Newline(ch int) {
- if ch < 0 {
- return
- }
- sc.Pos.Line += 1
- sc.Pos.Column = 0
- next := sc.Peek()
- if ch == '\n' && next == '\r' || ch == '\r' && next == '\n' {
- sc.reader.ReadByte()
- }
-}
-
-func (sc *Scanner) Next() int {
- ch := sc.readNext()
- switch ch {
- case '\n', '\r':
- sc.Newline(ch)
- ch = int('\n')
- case EOF:
- sc.Pos.Line = EOF
- sc.Pos.Column = 0
- default:
- sc.Pos.Column++
- }
- return ch
-}
-
-func (sc *Scanner) Peek() int {
- ch := sc.readNext()
- if ch != EOF {
- sc.reader.UnreadByte()
- }
- return ch
-}
-
-func (sc *Scanner) skipWhiteSpace(whitespace int64) int {
- ch := sc.Next()
- for ; whitespace&(1<':
- if sc.Peek() == '=' {
- tok.Type = TGte
- tok.Str = ">="
- sc.Next()
- } else {
- tok.Type = ch
- tok.Str = string(ch)
- }
- case '.':
- ch2 := sc.Peek()
- switch {
- case isDecimal(ch2):
- tok.Type = TNumber
- err = sc.scanNumber(ch, buf)
- tok.Str = buf.String()
- case ch2 == '.':
- writeChar(buf, ch)
- writeChar(buf, sc.Next())
- if sc.Peek() == '.' {
- writeChar(buf, sc.Next())
- tok.Type = T3Comma
- } else {
- tok.Type = T2Comma
- }
- default:
- tok.Type = '.'
- }
- tok.Str = buf.String()
- case '+', '*', '/', '%', '^', '#', '(', ')', '{', '}', ']', ';', ':', ',':
- tok.Type = ch
- tok.Str = string(ch)
- default:
- writeChar(buf, ch)
- err = sc.Error(buf.String(), "Invalid token")
- goto finally
- }
- }
-
-finally:
- tok.Name = TokenName(int(tok.Type))
- return tok, err
-}
-
-// yacc interface {{{
-
-type Lexer struct {
- scanner *Scanner
- Stmts []ast.Stmt
- PNewLine bool
- Token ast.Token
- PrevTokenType int
-}
-
-func (lx *Lexer) Lex(lval *yySymType) int {
- lx.PrevTokenType = lx.Token.Type
- tok, err := lx.scanner.Scan(lx)
- if err != nil {
- panic(err)
- }
- if tok.Type < 0 {
- return 0
- }
- lval.token = tok
- lx.Token = tok
- return int(tok.Type)
-}
-
-func (lx *Lexer) Error(message string) {
- panic(lx.scanner.Error(lx.Token.Str, message))
-}
-
-func (lx *Lexer) TokenError(tok ast.Token, message string) {
- panic(lx.scanner.TokenError(tok, message))
-}
-
-func Parse(reader io.Reader, name string) (chunk []ast.Stmt, err error) {
- lexer := &Lexer{NewScanner(reader, name), nil, false, ast.Token{Str: ""}, TNil}
- chunk = nil
- defer func() {
- if e := recover(); e != nil {
- err, _ = e.(error)
- }
- }()
- yyParse(lexer)
- chunk = lexer.Stmts
- return
-}
-
-// }}}
-
-// Dump {{{
-
-func isInlineDumpNode(rv reflect.Value) bool {
- switch rv.Kind() {
- case reflect.Struct, reflect.Slice, reflect.Interface, reflect.Ptr:
- return false
- default:
- return true
- }
-}
-
-func dump(node interface{}, level int, s string) string {
- rt := reflect.TypeOf(node)
- if fmt.Sprint(rt) == "" {
- return strings.Repeat(s, level) + ""
- }
-
- rv := reflect.ValueOf(node)
- buf := []string{}
- switch rt.Kind() {
- case reflect.Slice:
- if rv.Len() == 0 {
- return strings.Repeat(s, level) + ""
- }
- for i := 0; i < rv.Len(); i++ {
- buf = append(buf, dump(rv.Index(i).Interface(), level, s))
- }
- case reflect.Ptr:
- vt := rv.Elem()
- tt := rt.Elem()
- indicies := []int{}
- for i := 0; i < tt.NumField(); i++ {
- if strings.Index(tt.Field(i).Name, "Base") > -1 {
- continue
- }
- indicies = append(indicies, i)
- }
- switch {
- case len(indicies) == 0:
- return strings.Repeat(s, level) + ""
- case len(indicies) == 1 && isInlineDumpNode(vt.Field(indicies[0])):
- for _, i := range indicies {
- buf = append(buf, strings.Repeat(s, level)+"- Node$"+tt.Name()+": "+dump(vt.Field(i).Interface(), 0, s))
- }
- default:
- buf = append(buf, strings.Repeat(s, level)+"- Node$"+tt.Name())
- for _, i := range indicies {
- if isInlineDumpNode(vt.Field(i)) {
- inf := dump(vt.Field(i).Interface(), 0, s)
- buf = append(buf, strings.Repeat(s, level+1)+tt.Field(i).Name+": "+inf)
- } else {
- buf = append(buf, strings.Repeat(s, level+1)+tt.Field(i).Name+": ")
- buf = append(buf, dump(vt.Field(i).Interface(), level+2, s))
- }
- }
- }
- default:
- buf = append(buf, strings.Repeat(s, level)+fmt.Sprint(node))
- }
- return strings.Join(buf, "\n")
-}
-
-func Dump(chunk []ast.Stmt) string {
- return dump(chunk, 0, " ")
-}
-
-// }}
diff --git a/vendor/github.com/yuin/gopher-lua/parse/parser.go b/vendor/github.com/yuin/gopher-lua/parse/parser.go
deleted file mode 100644
index f8f59b36..00000000
--- a/vendor/github.com/yuin/gopher-lua/parse/parser.go
+++ /dev/null
@@ -1,1137 +0,0 @@
-//line parser.go.y:2
-package parse
-
-import __yyfmt__ "fmt"
-
-//line parser.go.y:2
-import (
- "github.com/yuin/gopher-lua/ast"
-)
-
-//line parser.go.y:34
-type yySymType struct {
- yys int
- token ast.Token
-
- stmts []ast.Stmt
- stmt ast.Stmt
-
- funcname *ast.FuncName
- funcexpr *ast.FunctionExpr
-
- exprlist []ast.Expr
- expr ast.Expr
-
- fieldlist []*ast.Field
- field *ast.Field
- fieldsep string
-
- namelist []string
- parlist *ast.ParList
-}
-
-const TAnd = 57346
-const TBreak = 57347
-const TDo = 57348
-const TElse = 57349
-const TElseIf = 57350
-const TEnd = 57351
-const TFalse = 57352
-const TFor = 57353
-const TFunction = 57354
-const TIf = 57355
-const TIn = 57356
-const TLocal = 57357
-const TNil = 57358
-const TNot = 57359
-const TOr = 57360
-const TReturn = 57361
-const TRepeat = 57362
-const TThen = 57363
-const TTrue = 57364
-const TUntil = 57365
-const TWhile = 57366
-const TEqeq = 57367
-const TNeq = 57368
-const TLte = 57369
-const TGte = 57370
-const T2Comma = 57371
-const T3Comma = 57372
-const TIdent = 57373
-const TNumber = 57374
-const TString = 57375
-const UNARY = 57376
-
-var yyToknames = []string{
- "TAnd",
- "TBreak",
- "TDo",
- "TElse",
- "TElseIf",
- "TEnd",
- "TFalse",
- "TFor",
- "TFunction",
- "TIf",
- "TIn",
- "TLocal",
- "TNil",
- "TNot",
- "TOr",
- "TReturn",
- "TRepeat",
- "TThen",
- "TTrue",
- "TUntil",
- "TWhile",
- "TEqeq",
- "TNeq",
- "TLte",
- "TGte",
- "T2Comma",
- "T3Comma",
- "TIdent",
- "TNumber",
- "TString",
- " {",
- " (",
- " >",
- " <",
- " +",
- " -",
- " *",
- " /",
- " %",
- "UNARY",
- " ^",
-}
-var yyStatenames = []string{}
-
-const yyEofCode = 1
-const yyErrCode = 2
-const yyMaxDepth = 200
-
-//line parser.go.y:514
-func TokenName(c int) string {
- if c >= TAnd && c-TAnd < len(yyToknames) {
- if yyToknames[c-TAnd] != "" {
- return yyToknames[c-TAnd]
- }
- }
- return string([]byte{byte(c)})
-}
-
-//line yacctab:1
-var yyExca = []int{
- -1, 1,
- 1, -1,
- -2, 0,
- -1, 17,
- 46, 31,
- 47, 31,
- -2, 68,
- -1, 93,
- 46, 32,
- 47, 32,
- -2, 68,
-}
-
-const yyNprod = 95
-const yyPrivate = 57344
-
-var yyTokenNames []string
-var yyStates []string
-
-const yyLast = 579
-
-var yyAct = []int{
-
- 24, 88, 50, 23, 45, 84, 56, 65, 137, 153,
- 136, 113, 52, 142, 54, 53, 33, 134, 65, 132,
- 62, 63, 32, 61, 108, 109, 48, 111, 106, 41,
- 42, 105, 49, 155, 166, 81, 82, 83, 138, 104,
- 22, 91, 131, 80, 95, 92, 162, 74, 48, 85,
- 150, 99, 165, 148, 49, 149, 75, 76, 77, 78,
- 79, 67, 80, 107, 106, 148, 114, 115, 116, 117,
- 118, 119, 120, 121, 122, 123, 124, 125, 126, 127,
- 128, 129, 72, 73, 71, 70, 74, 65, 39, 40,
- 47, 139, 133, 68, 69, 75, 76, 77, 78, 79,
- 60, 80, 141, 144, 143, 146, 145, 31, 67, 147,
- 9, 48, 110, 97, 48, 152, 151, 49, 38, 62,
- 49, 17, 66, 77, 78, 79, 96, 80, 59, 72,
- 73, 71, 70, 74, 154, 102, 91, 156, 55, 157,
- 68, 69, 75, 76, 77, 78, 79, 21, 80, 187,
- 94, 20, 26, 184, 37, 179, 163, 112, 25, 35,
- 178, 93, 170, 172, 27, 171, 164, 173, 19, 159,
- 175, 174, 29, 89, 28, 39, 40, 20, 182, 181,
- 100, 34, 135, 183, 67, 39, 40, 47, 186, 64,
- 51, 1, 90, 87, 36, 130, 86, 30, 66, 18,
- 46, 44, 43, 8, 58, 72, 73, 71, 70, 74,
- 57, 67, 168, 169, 167, 3, 68, 69, 75, 76,
- 77, 78, 79, 160, 80, 66, 4, 2, 0, 0,
- 0, 158, 72, 73, 71, 70, 74, 0, 0, 0,
- 0, 0, 0, 68, 69, 75, 76, 77, 78, 79,
- 26, 80, 37, 0, 0, 0, 25, 35, 140, 0,
- 0, 0, 27, 0, 0, 0, 0, 0, 0, 0,
- 29, 21, 28, 39, 40, 20, 26, 0, 37, 34,
- 0, 0, 25, 35, 0, 0, 0, 0, 27, 0,
- 0, 0, 36, 98, 0, 0, 29, 89, 28, 39,
- 40, 20, 26, 0, 37, 34, 0, 0, 25, 35,
- 0, 0, 0, 0, 27, 67, 90, 176, 36, 0,
- 0, 0, 29, 21, 28, 39, 40, 20, 0, 66,
- 0, 34, 0, 0, 0, 0, 72, 73, 71, 70,
- 74, 0, 67, 0, 36, 0, 0, 68, 69, 75,
- 76, 77, 78, 79, 0, 80, 66, 0, 177, 0,
- 0, 0, 0, 72, 73, 71, 70, 74, 0, 67,
- 0, 185, 0, 0, 68, 69, 75, 76, 77, 78,
- 79, 0, 80, 66, 0, 161, 0, 0, 0, 0,
- 72, 73, 71, 70, 74, 0, 67, 0, 0, 0,
- 0, 68, 69, 75, 76, 77, 78, 79, 0, 80,
- 66, 0, 0, 180, 0, 0, 0, 72, 73, 71,
- 70, 74, 0, 67, 0, 0, 0, 0, 68, 69,
- 75, 76, 77, 78, 79, 0, 80, 66, 0, 0,
- 103, 0, 0, 0, 72, 73, 71, 70, 74, 0,
- 67, 0, 101, 0, 0, 68, 69, 75, 76, 77,
- 78, 79, 0, 80, 66, 0, 0, 0, 0, 0,
- 0, 72, 73, 71, 70, 74, 0, 67, 0, 0,
- 0, 0, 68, 69, 75, 76, 77, 78, 79, 0,
- 80, 66, 0, 0, 0, 0, 0, 0, 72, 73,
- 71, 70, 74, 0, 0, 0, 0, 0, 0, 68,
- 69, 75, 76, 77, 78, 79, 0, 80, 72, 73,
- 71, 70, 74, 0, 0, 0, 0, 0, 0, 68,
- 69, 75, 76, 77, 78, 79, 0, 80, 7, 10,
- 0, 0, 0, 0, 14, 15, 13, 0, 16, 0,
- 0, 0, 6, 12, 0, 0, 0, 11, 0, 0,
- 0, 0, 0, 0, 21, 0, 0, 0, 20, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 5,
-}
-var yyPact = []int{
-
- -1000, -1000, 533, -5, -1000, -1000, 292, -1000, -17, 152,
- -1000, 292, -1000, 292, 107, 97, 88, -1000, -1000, -1000,
- 292, -1000, -1000, -29, 473, -1000, -1000, -1000, -1000, -1000,
- -1000, 152, -1000, -1000, 292, 292, 292, 14, -1000, -1000,
- 142, 292, 116, 292, 95, -1000, 82, 240, -1000, -1000,
- 171, -1000, 446, 112, 419, -7, 17, 14, -24, -1000,
- 81, -19, -1000, 104, -42, 292, 292, 292, 292, 292,
- 292, 292, 292, 292, 292, 292, 292, 292, 292, 292,
- 292, -1, -1, -1, -1000, -11, -1000, -37, -1000, -8,
- 292, 473, -29, -1000, 152, 207, -1000, 55, -1000, -40,
- -1000, -1000, 292, -1000, 292, 292, 34, -1000, 24, 19,
- 14, 292, -1000, -1000, 473, 57, 493, 18, 18, 18,
- 18, 18, 18, 18, 83, 83, -1, -1, -1, -1,
- -44, -1000, -1000, -14, -1000, 266, -1000, -1000, 292, 180,
- -1000, -1000, -1000, 160, 473, -1000, 338, 40, -1000, -1000,
- -1000, -1000, -29, -1000, 157, 22, -1000, 473, -12, -1000,
- 205, 292, -1000, 154, -1000, -1000, 292, -1000, -1000, 292,
- 311, 151, -1000, 473, 146, 392, -1000, 292, -1000, -1000,
- -1000, 144, 365, -1000, -1000, -1000, 140, -1000,
-}
-var yyPgo = []int{
-
- 0, 190, 227, 2, 226, 223, 215, 210, 204, 203,
- 118, 6, 3, 0, 22, 107, 168, 199, 4, 197,
- 5, 195, 16, 193, 1, 182,
-}
-var yyR1 = []int{
-
- 0, 1, 1, 1, 2, 2, 2, 3, 4, 4,
- 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
- 4, 4, 5, 5, 6, 6, 6, 7, 7, 8,
- 8, 9, 9, 10, 10, 10, 11, 11, 12, 12,
- 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
- 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
- 13, 13, 13, 13, 13, 13, 13, 14, 15, 15,
- 15, 15, 17, 16, 16, 18, 18, 18, 18, 19,
- 20, 20, 21, 21, 21, 22, 22, 23, 23, 23,
- 24, 24, 24, 25, 25,
-}
-var yyR2 = []int{
-
- 0, 1, 2, 3, 0, 2, 2, 1, 3, 1,
- 3, 5, 4, 6, 8, 9, 11, 7, 3, 4,
- 4, 2, 0, 5, 1, 2, 1, 1, 3, 1,
- 3, 1, 3, 1, 4, 3, 1, 3, 1, 3,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 3,
- 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
- 3, 3, 3, 3, 2, 2, 2, 1, 1, 1,
- 1, 3, 3, 2, 4, 2, 3, 1, 1, 2,
- 5, 4, 1, 1, 3, 2, 3, 1, 3, 2,
- 3, 5, 1, 1, 1,
-}
-var yyChk = []int{
-
- -1000, -1, -2, -6, -4, 45, 19, 5, -9, -15,
- 6, 24, 20, 13, 11, 12, 15, -10, -17, -16,
- 35, 31, 45, -12, -13, 16, 10, 22, 32, 30,
- -19, -15, -14, -22, 39, 17, 52, 12, -10, 33,
- 34, 46, 47, 50, 49, -18, 48, 35, -22, -14,
- -3, -1, -13, -3, -13, 31, -11, -7, -8, 31,
- 12, -11, 31, -13, -16, 47, 18, 4, 36, 37,
- 28, 27, 25, 26, 29, 38, 39, 40, 41, 42,
- 44, -13, -13, -13, -20, 35, 54, -23, -24, 31,
- 50, -13, -12, -10, -15, -13, 31, 31, 53, -12,
- 9, 6, 23, 21, 46, 14, 47, -20, 48, 49,
- 31, 46, 53, 53, -13, -13, -13, -13, -13, -13,
- -13, -13, -13, -13, -13, -13, -13, -13, -13, -13,
- -21, 53, 30, -11, 54, -25, 47, 45, 46, -13,
- 51, -18, 53, -3, -13, -3, -13, -12, 31, 31,
- 31, -20, -12, 53, -3, 47, -24, -13, 51, 9,
- -5, 47, 6, -3, 9, 30, 46, 9, 7, 8,
- -13, -3, 9, -13, -3, -13, 6, 47, 9, 9,
- 21, -3, -13, -3, 9, 6, -3, 9,
-}
-var yyDef = []int{
-
- 4, -2, 1, 2, 5, 6, 24, 26, 0, 9,
- 4, 0, 4, 0, 0, 0, 0, -2, 69, 70,
- 0, 33, 3, 25, 38, 40, 41, 42, 43, 44,
- 45, 46, 47, 48, 0, 0, 0, 0, 68, 67,
- 0, 0, 0, 0, 0, 73, 0, 0, 77, 78,
- 0, 7, 0, 0, 0, 36, 0, 0, 27, 29,
- 0, 21, 36, 0, 70, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 64, 65, 66, 79, 0, 85, 0, 87, 33,
- 0, 92, 8, -2, 0, 0, 35, 0, 75, 0,
- 10, 4, 0, 4, 0, 0, 0, 18, 0, 0,
- 0, 0, 71, 72, 39, 49, 50, 51, 52, 53,
- 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
- 0, 4, 82, 83, 86, 89, 93, 94, 0, 0,
- 34, 74, 76, 0, 12, 22, 0, 0, 37, 28,
- 30, 19, 20, 4, 0, 0, 88, 90, 0, 11,
- 0, 0, 4, 0, 81, 84, 0, 13, 4, 0,
- 0, 0, 80, 91, 0, 0, 4, 0, 17, 14,
- 4, 0, 0, 23, 15, 4, 0, 16,
-}
-var yyTok1 = []int{
-
- 1, 3, 3, 3, 3, 3, 3, 3, 3, 3,
- 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
- 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
- 3, 3, 3, 3, 3, 52, 3, 42, 3, 3,
- 35, 53, 40, 38, 47, 39, 49, 41, 3, 3,
- 3, 3, 3, 3, 3, 3, 3, 3, 48, 45,
- 37, 46, 36, 3, 3, 3, 3, 3, 3, 3,
- 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
- 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
- 3, 50, 3, 51, 44, 3, 3, 3, 3, 3,
- 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
- 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
- 3, 3, 3, 34, 3, 54,
-}
-var yyTok2 = []int{
-
- 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
- 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
- 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
- 32, 33, 43,
-}
-var yyTok3 = []int{
- 0,
-}
-
-//line yaccpar:1
-
-/* parser for yacc output */
-
-var yyDebug = 0
-
-type yyLexer interface {
- Lex(lval *yySymType) int
- Error(s string)
-}
-
-const yyFlag = -1000
-
-func yyTokname(c int) string {
- // 4 is TOKSTART above
- if c >= 4 && c-4 < len(yyToknames) {
- if yyToknames[c-4] != "" {
- return yyToknames[c-4]
- }
- }
- return __yyfmt__.Sprintf("tok-%v", c)
-}
-
-func yyStatname(s int) string {
- if s >= 0 && s < len(yyStatenames) {
- if yyStatenames[s] != "" {
- return yyStatenames[s]
- }
- }
- return __yyfmt__.Sprintf("state-%v", s)
-}
-
-func yylex1(lex yyLexer, lval *yySymType) int {
- c := 0
- char := lex.Lex(lval)
- if char <= 0 {
- c = yyTok1[0]
- goto out
- }
- if char < len(yyTok1) {
- c = yyTok1[char]
- goto out
- }
- if char >= yyPrivate {
- if char < yyPrivate+len(yyTok2) {
- c = yyTok2[char-yyPrivate]
- goto out
- }
- }
- for i := 0; i < len(yyTok3); i += 2 {
- c = yyTok3[i+0]
- if c == char {
- c = yyTok3[i+1]
- goto out
- }
- }
-
-out:
- if c == 0 {
- c = yyTok2[1] /* unknown char */
- }
- if yyDebug >= 3 {
- __yyfmt__.Printf("lex %s(%d)\n", yyTokname(c), uint(char))
- }
- return c
-}
-
-func yyParse(yylex yyLexer) int {
- var yyn int
- var yylval yySymType
- var yyVAL yySymType
- yyS := make([]yySymType, yyMaxDepth)
-
- Nerrs := 0 /* number of errors */
- Errflag := 0 /* error recovery flag */
- yystate := 0
- yychar := -1
- yyp := -1
- goto yystack
-
-ret0:
- return 0
-
-ret1:
- return 1
-
-yystack:
- /* put a state and value onto the stack */
- if yyDebug >= 4 {
- __yyfmt__.Printf("char %v in %v\n", yyTokname(yychar), yyStatname(yystate))
- }
-
- yyp++
- if yyp >= len(yyS) {
- nyys := make([]yySymType, len(yyS)*2)
- copy(nyys, yyS)
- yyS = nyys
- }
- yyS[yyp] = yyVAL
- yyS[yyp].yys = yystate
-
-yynewstate:
- yyn = yyPact[yystate]
- if yyn <= yyFlag {
- goto yydefault /* simple state */
- }
- if yychar < 0 {
- yychar = yylex1(yylex, &yylval)
- }
- yyn += yychar
- if yyn < 0 || yyn >= yyLast {
- goto yydefault
- }
- yyn = yyAct[yyn]
- if yyChk[yyn] == yychar { /* valid shift */
- yychar = -1
- yyVAL = yylval
- yystate = yyn
- if Errflag > 0 {
- Errflag--
- }
- goto yystack
- }
-
-yydefault:
- /* default state action */
- yyn = yyDef[yystate]
- if yyn == -2 {
- if yychar < 0 {
- yychar = yylex1(yylex, &yylval)
- }
-
- /* look through exception table */
- xi := 0
- for {
- if yyExca[xi+0] == -1 && yyExca[xi+1] == yystate {
- break
- }
- xi += 2
- }
- for xi += 2; ; xi += 2 {
- yyn = yyExca[xi+0]
- if yyn < 0 || yyn == yychar {
- break
- }
- }
- yyn = yyExca[xi+1]
- if yyn < 0 {
- goto ret0
- }
- }
- if yyn == 0 {
- /* error ... attempt to resume parsing */
- switch Errflag {
- case 0: /* brand new error */
- yylex.Error("syntax error")
- Nerrs++
- if yyDebug >= 1 {
- __yyfmt__.Printf("%s", yyStatname(yystate))
- __yyfmt__.Printf(" saw %s\n", yyTokname(yychar))
- }
- fallthrough
-
- case 1, 2: /* incompletely recovered error ... try again */
- Errflag = 3
-
- /* find a state where "error" is a legal shift action */
- for yyp >= 0 {
- yyn = yyPact[yyS[yyp].yys] + yyErrCode
- if yyn >= 0 && yyn < yyLast {
- yystate = yyAct[yyn] /* simulate a shift of "error" */
- if yyChk[yystate] == yyErrCode {
- goto yystack
- }
- }
-
- /* the current p has no shift on "error", pop stack */
- if yyDebug >= 2 {
- __yyfmt__.Printf("error recovery pops state %d\n", yyS[yyp].yys)
- }
- yyp--
- }
- /* there is no state on the stack with an error shift ... abort */
- goto ret1
-
- case 3: /* no shift yet; clobber input char */
- if yyDebug >= 2 {
- __yyfmt__.Printf("error recovery discards %s\n", yyTokname(yychar))
- }
- if yychar == yyEofCode {
- goto ret1
- }
- yychar = -1
- goto yynewstate /* try again in the same state */
- }
- }
-
- /* reduction by production yyn */
- if yyDebug >= 2 {
- __yyfmt__.Printf("reduce %v in:\n\t%v\n", yyn, yyStatname(yystate))
- }
-
- yynt := yyn
- yypt := yyp
- _ = yypt // guard against "declared and not used"
-
- yyp -= yyR2[yyn]
- yyVAL = yyS[yyp+1]
-
- /* consult goto table to find next state */
- yyn = yyR1[yyn]
- yyg := yyPgo[yyn]
- yyj := yyg + yyS[yyp].yys + 1
-
- if yyj >= yyLast {
- yystate = yyAct[yyg]
- } else {
- yystate = yyAct[yyj]
- if yyChk[yystate] != -yyn {
- yystate = yyAct[yyg]
- }
- }
- // dummy call; replaced with literal code
- switch yynt {
-
- case 1:
- //line parser.go.y:73
- {
- yyVAL.stmts = yyS[yypt-0].stmts
- if l, ok := yylex.(*Lexer); ok {
- l.Stmts = yyVAL.stmts
- }
- }
- case 2:
- //line parser.go.y:79
- {
- yyVAL.stmts = append(yyS[yypt-1].stmts, yyS[yypt-0].stmt)
- if l, ok := yylex.(*Lexer); ok {
- l.Stmts = yyVAL.stmts
- }
- }
- case 3:
- //line parser.go.y:85
- {
- yyVAL.stmts = append(yyS[yypt-2].stmts, yyS[yypt-1].stmt)
- if l, ok := yylex.(*Lexer); ok {
- l.Stmts = yyVAL.stmts
- }
- }
- case 4:
- //line parser.go.y:93
- {
- yyVAL.stmts = []ast.Stmt{}
- }
- case 5:
- //line parser.go.y:96
- {
- yyVAL.stmts = append(yyS[yypt-1].stmts, yyS[yypt-0].stmt)
- }
- case 6:
- //line parser.go.y:99
- {
- yyVAL.stmts = yyS[yypt-1].stmts
- }
- case 7:
- //line parser.go.y:104
- {
- yyVAL.stmts = yyS[yypt-0].stmts
- }
- case 8:
- //line parser.go.y:109
- {
- yyVAL.stmt = &ast.AssignStmt{Lhs: yyS[yypt-2].exprlist, Rhs: yyS[yypt-0].exprlist}
- yyVAL.stmt.SetLine(yyS[yypt-2].exprlist[0].Line())
- }
- case 9:
- //line parser.go.y:114
- {
- if _, ok := yyS[yypt-0].expr.(*ast.FuncCallExpr); !ok {
- yylex.(*Lexer).Error("parse error")
- } else {
- yyVAL.stmt = &ast.FuncCallStmt{Expr: yyS[yypt-0].expr}
- yyVAL.stmt.SetLine(yyS[yypt-0].expr.Line())
- }
- }
- case 10:
- //line parser.go.y:122
- {
- yyVAL.stmt = &ast.DoBlockStmt{Stmts: yyS[yypt-1].stmts}
- yyVAL.stmt.SetLine(yyS[yypt-2].token.Pos.Line)
- yyVAL.stmt.SetLastLine(yyS[yypt-0].token.Pos.Line)
- }
- case 11:
- //line parser.go.y:127
- {
- yyVAL.stmt = &ast.WhileStmt{Condition: yyS[yypt-3].expr, Stmts: yyS[yypt-1].stmts}
- yyVAL.stmt.SetLine(yyS[yypt-4].token.Pos.Line)
- yyVAL.stmt.SetLastLine(yyS[yypt-0].token.Pos.Line)
- }
- case 12:
- //line parser.go.y:132
- {
- yyVAL.stmt = &ast.RepeatStmt{Condition: yyS[yypt-0].expr, Stmts: yyS[yypt-2].stmts}
- yyVAL.stmt.SetLine(yyS[yypt-3].token.Pos.Line)
- yyVAL.stmt.SetLastLine(yyS[yypt-0].expr.Line())
- }
- case 13:
- //line parser.go.y:137
- {
- yyVAL.stmt = &ast.IfStmt{Condition: yyS[yypt-4].expr, Then: yyS[yypt-2].stmts}
- cur := yyVAL.stmt
- for _, elseif := range yyS[yypt-1].stmts {
- cur.(*ast.IfStmt).Else = []ast.Stmt{elseif}
- cur = elseif
- }
- yyVAL.stmt.SetLine(yyS[yypt-5].token.Pos.Line)
- yyVAL.stmt.SetLastLine(yyS[yypt-0].token.Pos.Line)
- }
- case 14:
- //line parser.go.y:147
- {
- yyVAL.stmt = &ast.IfStmt{Condition: yyS[yypt-6].expr, Then: yyS[yypt-4].stmts}
- cur := yyVAL.stmt
- for _, elseif := range yyS[yypt-3].stmts {
- cur.(*ast.IfStmt).Else = []ast.Stmt{elseif}
- cur = elseif
- }
- cur.(*ast.IfStmt).Else = yyS[yypt-1].stmts
- yyVAL.stmt.SetLine(yyS[yypt-7].token.Pos.Line)
- yyVAL.stmt.SetLastLine(yyS[yypt-0].token.Pos.Line)
- }
- case 15:
- //line parser.go.y:158
- {
- yyVAL.stmt = &ast.NumberForStmt{Name: yyS[yypt-7].token.Str, Init: yyS[yypt-5].expr, Limit: yyS[yypt-3].expr, Stmts: yyS[yypt-1].stmts}
- yyVAL.stmt.SetLine(yyS[yypt-8].token.Pos.Line)
- yyVAL.stmt.SetLastLine(yyS[yypt-0].token.Pos.Line)
- }
- case 16:
- //line parser.go.y:163
- {
- yyVAL.stmt = &ast.NumberForStmt{Name: yyS[yypt-9].token.Str, Init: yyS[yypt-7].expr, Limit: yyS[yypt-5].expr, Step: yyS[yypt-3].expr, Stmts: yyS[yypt-1].stmts}
- yyVAL.stmt.SetLine(yyS[yypt-10].token.Pos.Line)
- yyVAL.stmt.SetLastLine(yyS[yypt-0].token.Pos.Line)
- }
- case 17:
- //line parser.go.y:168
- {
- yyVAL.stmt = &ast.GenericForStmt{Names: yyS[yypt-5].namelist, Exprs: yyS[yypt-3].exprlist, Stmts: yyS[yypt-1].stmts}
- yyVAL.stmt.SetLine(yyS[yypt-6].token.Pos.Line)
- yyVAL.stmt.SetLastLine(yyS[yypt-0].token.Pos.Line)
- }
- case 18:
- //line parser.go.y:173
- {
- yyVAL.stmt = &ast.FuncDefStmt{Name: yyS[yypt-1].funcname, Func: yyS[yypt-0].funcexpr}
- yyVAL.stmt.SetLine(yyS[yypt-2].token.Pos.Line)
- yyVAL.stmt.SetLastLine(yyS[yypt-0].funcexpr.LastLine())
- }
- case 19:
- //line parser.go.y:178
- {
- yyVAL.stmt = &ast.LocalAssignStmt{Names: []string{yyS[yypt-1].token.Str}, Exprs: []ast.Expr{yyS[yypt-0].funcexpr}}
- yyVAL.stmt.SetLine(yyS[yypt-3].token.Pos.Line)
- yyVAL.stmt.SetLastLine(yyS[yypt-0].funcexpr.LastLine())
- }
- case 20:
- //line parser.go.y:183
- {
- yyVAL.stmt = &ast.LocalAssignStmt{Names: yyS[yypt-2].namelist, Exprs: yyS[yypt-0].exprlist}
- yyVAL.stmt.SetLine(yyS[yypt-3].token.Pos.Line)
- }
- case 21:
- //line parser.go.y:187
- {
- yyVAL.stmt = &ast.LocalAssignStmt{Names: yyS[yypt-0].namelist, Exprs: []ast.Expr{}}
- yyVAL.stmt.SetLine(yyS[yypt-1].token.Pos.Line)
- }
- case 22:
- //line parser.go.y:193
- {
- yyVAL.stmts = []ast.Stmt{}
- }
- case 23:
- //line parser.go.y:196
- {
- yyVAL.stmts = append(yyS[yypt-4].stmts, &ast.IfStmt{Condition: yyS[yypt-2].expr, Then: yyS[yypt-0].stmts})
- yyVAL.stmts[len(yyVAL.stmts)-1].SetLine(yyS[yypt-3].token.Pos.Line)
- }
- case 24:
- //line parser.go.y:202
- {
- yyVAL.stmt = &ast.ReturnStmt{Exprs: nil}
- yyVAL.stmt.SetLine(yyS[yypt-0].token.Pos.Line)
- }
- case 25:
- //line parser.go.y:206
- {
- yyVAL.stmt = &ast.ReturnStmt{Exprs: yyS[yypt-0].exprlist}
- yyVAL.stmt.SetLine(yyS[yypt-1].token.Pos.Line)
- }
- case 26:
- //line parser.go.y:210
- {
- yyVAL.stmt = &ast.BreakStmt{}
- yyVAL.stmt.SetLine(yyS[yypt-0].token.Pos.Line)
- }
- case 27:
- //line parser.go.y:216
- {
- yyVAL.funcname = yyS[yypt-0].funcname
- }
- case 28:
- //line parser.go.y:219
- {
- yyVAL.funcname = &ast.FuncName{Func: nil, Receiver: yyS[yypt-2].funcname.Func, Method: yyS[yypt-0].token.Str}
- }
- case 29:
- //line parser.go.y:224
- {
- yyVAL.funcname = &ast.FuncName{Func: &ast.IdentExpr{Value: yyS[yypt-0].token.Str}}
- yyVAL.funcname.Func.SetLine(yyS[yypt-0].token.Pos.Line)
- }
- case 30:
- //line parser.go.y:228
- {
- key := &ast.StringExpr{Value: yyS[yypt-0].token.Str}
- key.SetLine(yyS[yypt-0].token.Pos.Line)
- fn := &ast.AttrGetExpr{Object: yyS[yypt-2].funcname.Func, Key: key}
- fn.SetLine(yyS[yypt-0].token.Pos.Line)
- yyVAL.funcname = &ast.FuncName{Func: fn}
- }
- case 31:
- //line parser.go.y:237
- {
- yyVAL.exprlist = []ast.Expr{yyS[yypt-0].expr}
- }
- case 32:
- //line parser.go.y:240
- {
- yyVAL.exprlist = append(yyS[yypt-2].exprlist, yyS[yypt-0].expr)
- }
- case 33:
- //line parser.go.y:245
- {
- yyVAL.expr = &ast.IdentExpr{Value: yyS[yypt-0].token.Str}
- yyVAL.expr.SetLine(yyS[yypt-0].token.Pos.Line)
- }
- case 34:
- //line parser.go.y:249
- {
- yyVAL.expr = &ast.AttrGetExpr{Object: yyS[yypt-3].expr, Key: yyS[yypt-1].expr}
- yyVAL.expr.SetLine(yyS[yypt-3].expr.Line())
- }
- case 35:
- //line parser.go.y:253
- {
- key := &ast.StringExpr{Value: yyS[yypt-0].token.Str}
- key.SetLine(yyS[yypt-0].token.Pos.Line)
- yyVAL.expr = &ast.AttrGetExpr{Object: yyS[yypt-2].expr, Key: key}
- yyVAL.expr.SetLine(yyS[yypt-2].expr.Line())
- }
- case 36:
- //line parser.go.y:261
- {
- yyVAL.namelist = []string{yyS[yypt-0].token.Str}
- }
- case 37:
- //line parser.go.y:264
- {
- yyVAL.namelist = append(yyS[yypt-2].namelist, yyS[yypt-0].token.Str)
- }
- case 38:
- //line parser.go.y:269
- {
- yyVAL.exprlist = []ast.Expr{yyS[yypt-0].expr}
- }
- case 39:
- //line parser.go.y:272
- {
- yyVAL.exprlist = append(yyS[yypt-2].exprlist, yyS[yypt-0].expr)
- }
- case 40:
- //line parser.go.y:277
- {
- yyVAL.expr = &ast.NilExpr{}
- yyVAL.expr.SetLine(yyS[yypt-0].token.Pos.Line)
- }
- case 41:
- //line parser.go.y:281
- {
- yyVAL.expr = &ast.FalseExpr{}
- yyVAL.expr.SetLine(yyS[yypt-0].token.Pos.Line)
- }
- case 42:
- //line parser.go.y:285
- {
- yyVAL.expr = &ast.TrueExpr{}
- yyVAL.expr.SetLine(yyS[yypt-0].token.Pos.Line)
- }
- case 43:
- //line parser.go.y:289
- {
- yyVAL.expr = &ast.NumberExpr{Value: yyS[yypt-0].token.Str}
- yyVAL.expr.SetLine(yyS[yypt-0].token.Pos.Line)
- }
- case 44:
- //line parser.go.y:293
- {
- yyVAL.expr = &ast.Comma3Expr{}
- yyVAL.expr.SetLine(yyS[yypt-0].token.Pos.Line)
- }
- case 45:
- //line parser.go.y:297
- {
- yyVAL.expr = yyS[yypt-0].expr
- }
- case 46:
- //line parser.go.y:300
- {
- yyVAL.expr = yyS[yypt-0].expr
- }
- case 47:
- //line parser.go.y:303
- {
- yyVAL.expr = yyS[yypt-0].expr
- }
- case 48:
- //line parser.go.y:306
- {
- yyVAL.expr = yyS[yypt-0].expr
- }
- case 49:
- //line parser.go.y:309
- {
- yyVAL.expr = &ast.LogicalOpExpr{Lhs: yyS[yypt-2].expr, Operator: "or", Rhs: yyS[yypt-0].expr}
- yyVAL.expr.SetLine(yyS[yypt-2].expr.Line())
- }
- case 50:
- //line parser.go.y:313
- {
- yyVAL.expr = &ast.LogicalOpExpr{Lhs: yyS[yypt-2].expr, Operator: "and", Rhs: yyS[yypt-0].expr}
- yyVAL.expr.SetLine(yyS[yypt-2].expr.Line())
- }
- case 51:
- //line parser.go.y:317
- {
- yyVAL.expr = &ast.RelationalOpExpr{Lhs: yyS[yypt-2].expr, Operator: ">", Rhs: yyS[yypt-0].expr}
- yyVAL.expr.SetLine(yyS[yypt-2].expr.Line())
- }
- case 52:
- //line parser.go.y:321
- {
- yyVAL.expr = &ast.RelationalOpExpr{Lhs: yyS[yypt-2].expr, Operator: "<", Rhs: yyS[yypt-0].expr}
- yyVAL.expr.SetLine(yyS[yypt-2].expr.Line())
- }
- case 53:
- //line parser.go.y:325
- {
- yyVAL.expr = &ast.RelationalOpExpr{Lhs: yyS[yypt-2].expr, Operator: ">=", Rhs: yyS[yypt-0].expr}
- yyVAL.expr.SetLine(yyS[yypt-2].expr.Line())
- }
- case 54:
- //line parser.go.y:329
- {
- yyVAL.expr = &ast.RelationalOpExpr{Lhs: yyS[yypt-2].expr, Operator: "<=", Rhs: yyS[yypt-0].expr}
- yyVAL.expr.SetLine(yyS[yypt-2].expr.Line())
- }
- case 55:
- //line parser.go.y:333
- {
- yyVAL.expr = &ast.RelationalOpExpr{Lhs: yyS[yypt-2].expr, Operator: "==", Rhs: yyS[yypt-0].expr}
- yyVAL.expr.SetLine(yyS[yypt-2].expr.Line())
- }
- case 56:
- //line parser.go.y:337
- {
- yyVAL.expr = &ast.RelationalOpExpr{Lhs: yyS[yypt-2].expr, Operator: "~=", Rhs: yyS[yypt-0].expr}
- yyVAL.expr.SetLine(yyS[yypt-2].expr.Line())
- }
- case 57:
- //line parser.go.y:341
- {
- yyVAL.expr = &ast.StringConcatOpExpr{Lhs: yyS[yypt-2].expr, Rhs: yyS[yypt-0].expr}
- yyVAL.expr.SetLine(yyS[yypt-2].expr.Line())
- }
- case 58:
- //line parser.go.y:345
- {
- yyVAL.expr = &ast.ArithmeticOpExpr{Lhs: yyS[yypt-2].expr, Operator: "+", Rhs: yyS[yypt-0].expr}
- yyVAL.expr.SetLine(yyS[yypt-2].expr.Line())
- }
- case 59:
- //line parser.go.y:349
- {
- yyVAL.expr = &ast.ArithmeticOpExpr{Lhs: yyS[yypt-2].expr, Operator: "-", Rhs: yyS[yypt-0].expr}
- yyVAL.expr.SetLine(yyS[yypt-2].expr.Line())
- }
- case 60:
- //line parser.go.y:353
- {
- yyVAL.expr = &ast.ArithmeticOpExpr{Lhs: yyS[yypt-2].expr, Operator: "*", Rhs: yyS[yypt-0].expr}
- yyVAL.expr.SetLine(yyS[yypt-2].expr.Line())
- }
- case 61:
- //line parser.go.y:357
- {
- yyVAL.expr = &ast.ArithmeticOpExpr{Lhs: yyS[yypt-2].expr, Operator: "/", Rhs: yyS[yypt-0].expr}
- yyVAL.expr.SetLine(yyS[yypt-2].expr.Line())
- }
- case 62:
- //line parser.go.y:361
- {
- yyVAL.expr = &ast.ArithmeticOpExpr{Lhs: yyS[yypt-2].expr, Operator: "%", Rhs: yyS[yypt-0].expr}
- yyVAL.expr.SetLine(yyS[yypt-2].expr.Line())
- }
- case 63:
- //line parser.go.y:365
- {
- yyVAL.expr = &ast.ArithmeticOpExpr{Lhs: yyS[yypt-2].expr, Operator: "^", Rhs: yyS[yypt-0].expr}
- yyVAL.expr.SetLine(yyS[yypt-2].expr.Line())
- }
- case 64:
- //line parser.go.y:369
- {
- yyVAL.expr = &ast.UnaryMinusOpExpr{Expr: yyS[yypt-0].expr}
- yyVAL.expr.SetLine(yyS[yypt-0].expr.Line())
- }
- case 65:
- //line parser.go.y:373
- {
- yyVAL.expr = &ast.UnaryNotOpExpr{Expr: yyS[yypt-0].expr}
- yyVAL.expr.SetLine(yyS[yypt-0].expr.Line())
- }
- case 66:
- //line parser.go.y:377
- {
- yyVAL.expr = &ast.UnaryLenOpExpr{Expr: yyS[yypt-0].expr}
- yyVAL.expr.SetLine(yyS[yypt-0].expr.Line())
- }
- case 67:
- //line parser.go.y:383
- {
- yyVAL.expr = &ast.StringExpr{Value: yyS[yypt-0].token.Str}
- yyVAL.expr.SetLine(yyS[yypt-0].token.Pos.Line)
- }
- case 68:
- //line parser.go.y:389
- {
- yyVAL.expr = yyS[yypt-0].expr
- }
- case 69:
- //line parser.go.y:392
- {
- yyVAL.expr = yyS[yypt-0].expr
- }
- case 70:
- //line parser.go.y:395
- {
- yyVAL.expr = yyS[yypt-0].expr
- }
- case 71:
- //line parser.go.y:398
- {
- yyVAL.expr = yyS[yypt-1].expr
- yyVAL.expr.SetLine(yyS[yypt-2].token.Pos.Line)
- }
- case 72:
- //line parser.go.y:404
- {
- yyS[yypt-1].expr.(*ast.FuncCallExpr).AdjustRet = true
- yyVAL.expr = yyS[yypt-1].expr
- }
- case 73:
- //line parser.go.y:410
- {
- yyVAL.expr = &ast.FuncCallExpr{Func: yyS[yypt-1].expr, Args: yyS[yypt-0].exprlist}
- yyVAL.expr.SetLine(yyS[yypt-1].expr.Line())
- }
- case 74:
- //line parser.go.y:414
- {
- yyVAL.expr = &ast.FuncCallExpr{Method: yyS[yypt-1].token.Str, Receiver: yyS[yypt-3].expr, Args: yyS[yypt-0].exprlist}
- yyVAL.expr.SetLine(yyS[yypt-3].expr.Line())
- }
- case 75:
- //line parser.go.y:420
- {
- if yylex.(*Lexer).PNewLine {
- yylex.(*Lexer).TokenError(yyS[yypt-1].token, "ambiguous syntax (function call x new statement)")
- }
- yyVAL.exprlist = []ast.Expr{}
- }
- case 76:
- //line parser.go.y:426
- {
- if yylex.(*Lexer).PNewLine {
- yylex.(*Lexer).TokenError(yyS[yypt-2].token, "ambiguous syntax (function call x new statement)")
- }
- yyVAL.exprlist = yyS[yypt-1].exprlist
- }
- case 77:
- //line parser.go.y:432
- {
- yyVAL.exprlist = []ast.Expr{yyS[yypt-0].expr}
- }
- case 78:
- //line parser.go.y:435
- {
- yyVAL.exprlist = []ast.Expr{yyS[yypt-0].expr}
- }
- case 79:
- //line parser.go.y:440
- {
- yyVAL.expr = &ast.FunctionExpr{ParList: yyS[yypt-0].funcexpr.ParList, Stmts: yyS[yypt-0].funcexpr.Stmts}
- yyVAL.expr.SetLine(yyS[yypt-1].token.Pos.Line)
- yyVAL.expr.SetLastLine(yyS[yypt-0].funcexpr.LastLine())
- }
- case 80:
- //line parser.go.y:447
- {
- yyVAL.funcexpr = &ast.FunctionExpr{ParList: yyS[yypt-3].parlist, Stmts: yyS[yypt-1].stmts}
- yyVAL.funcexpr.SetLine(yyS[yypt-4].token.Pos.Line)
- yyVAL.funcexpr.SetLastLine(yyS[yypt-0].token.Pos.Line)
- }
- case 81:
- //line parser.go.y:452
- {
- yyVAL.funcexpr = &ast.FunctionExpr{ParList: &ast.ParList{HasVargs: false, Names: []string{}}, Stmts: yyS[yypt-1].stmts}
- yyVAL.funcexpr.SetLine(yyS[yypt-3].token.Pos.Line)
- yyVAL.funcexpr.SetLastLine(yyS[yypt-0].token.Pos.Line)
- }
- case 82:
- //line parser.go.y:459
- {
- yyVAL.parlist = &ast.ParList{HasVargs: true, Names: []string{}}
- }
- case 83:
- //line parser.go.y:462
- {
- yyVAL.parlist = &ast.ParList{HasVargs: false, Names: []string{}}
- yyVAL.parlist.Names = append(yyVAL.parlist.Names, yyS[yypt-0].namelist...)
- }
- case 84:
- //line parser.go.y:466
- {
- yyVAL.parlist = &ast.ParList{HasVargs: true, Names: []string{}}
- yyVAL.parlist.Names = append(yyVAL.parlist.Names, yyS[yypt-2].namelist...)
- }
- case 85:
- //line parser.go.y:473
- {
- yyVAL.expr = &ast.TableExpr{Fields: []*ast.Field{}}
- yyVAL.expr.SetLine(yyS[yypt-1].token.Pos.Line)
- }
- case 86:
- //line parser.go.y:477
- {
- yyVAL.expr = &ast.TableExpr{Fields: yyS[yypt-1].fieldlist}
- yyVAL.expr.SetLine(yyS[yypt-2].token.Pos.Line)
- }
- case 87:
- //line parser.go.y:484
- {
- yyVAL.fieldlist = []*ast.Field{yyS[yypt-0].field}
- }
- case 88:
- //line parser.go.y:487
- {
- yyVAL.fieldlist = append(yyS[yypt-2].fieldlist, yyS[yypt-0].field)
- }
- case 89:
- //line parser.go.y:490
- {
- yyVAL.fieldlist = yyS[yypt-1].fieldlist
- }
- case 90:
- //line parser.go.y:495
- {
- yyVAL.field = &ast.Field{Key: &ast.StringExpr{Value: yyS[yypt-2].token.Str}, Value: yyS[yypt-0].expr}
- yyVAL.field.Key.SetLine(yyS[yypt-2].token.Pos.Line)
- }
- case 91:
- //line parser.go.y:499
- {
- yyVAL.field = &ast.Field{Key: yyS[yypt-3].expr, Value: yyS[yypt-0].expr}
- }
- case 92:
- //line parser.go.y:502
- {
- yyVAL.field = &ast.Field{Value: yyS[yypt-0].expr}
- }
- case 93:
- //line parser.go.y:507
- {
- yyVAL.fieldsep = ","
- }
- case 94:
- //line parser.go.y:510
- {
- yyVAL.fieldsep = ";"
- }
- }
- goto yystack /* stack new state and value */
-}
diff --git a/vendor/github.com/yuin/gopher-lua/parse/parser.go.y b/vendor/github.com/yuin/gopher-lua/parse/parser.go.y
deleted file mode 100644
index 956133db..00000000
--- a/vendor/github.com/yuin/gopher-lua/parse/parser.go.y
+++ /dev/null
@@ -1,524 +0,0 @@
-%{
-package parse
-
-import (
- "github.com/yuin/gopher-lua/ast"
-)
-%}
-%type chunk
-%type chunk1
-%type block
-%type stat
-%type elseifs
-%type laststat
-%type funcname
-%type funcname1
-%type varlist
-%type var
-%type namelist
-%type exprlist
-%type expr
-%type string
-%type prefixexp
-%type functioncall
-%type afunctioncall
-%type args
-%type function
-%type funcbody
-%type parlist
-%type tableconstructor
-%type fieldlist
-%type field
-%type fieldsep
-
-%union {
- token ast.Token
-
- stmts []ast.Stmt
- stmt ast.Stmt
-
- funcname *ast.FuncName
- funcexpr *ast.FunctionExpr
-
- exprlist []ast.Expr
- expr ast.Expr
-
- fieldlist []*ast.Field
- field *ast.Field
- fieldsep string
-
- namelist []string
- parlist *ast.ParList
-}
-
-/* Reserved words */
-%token TAnd TBreak TDo TElse TElseIf TEnd TFalse TFor TFunction TIf TIn TLocal TNil TNot TOr TReturn TRepeat TThen TTrue TUntil TWhile
-
-/* Literals */
-%token TEqeq TNeq TLte TGte T2Comma T3Comma TIdent TNumber TString '{' '('
-
-/* Operators */
-%left TOr
-%left TAnd
-%left '>' '<' TGte TLte TEqeq TNeq
-%right T2Comma
-%left '+' '-'
-%left '*' '/' '%'
-%right UNARY /* not # -(unary) */
-%right '^'
-
-%%
-
-chunk:
- chunk1 {
- $$ = $1
- if l, ok := yylex.(*Lexer); ok {
- l.Stmts = $$
- }
- } |
- chunk1 laststat {
- $$ = append($1, $2)
- if l, ok := yylex.(*Lexer); ok {
- l.Stmts = $$
- }
- } |
- chunk1 laststat ';' {
- $$ = append($1, $2)
- if l, ok := yylex.(*Lexer); ok {
- l.Stmts = $$
- }
- }
-
-chunk1:
- {
- $$ = []ast.Stmt{}
- } |
- chunk1 stat {
- $$ = append($1, $2)
- } |
- chunk1 ';' {
- $$ = $1
- }
-
-block:
- chunk {
- $$ = $1
- }
-
-stat:
- varlist '=' exprlist {
- $$ = &ast.AssignStmt{Lhs: $1, Rhs: $3}
- $$.SetLine($1[0].Line())
- } |
- /* 'stat = functioncal' causes a reduce/reduce conflict */
- prefixexp {
- if _, ok := $1.(*ast.FuncCallExpr); !ok {
- yylex.(*Lexer).Error("parse error")
- } else {
- $$ = &ast.FuncCallStmt{Expr: $1}
- $$.SetLine($1.Line())
- }
- } |
- TDo block TEnd {
- $$ = &ast.DoBlockStmt{Stmts: $2}
- $$.SetLine($1.Pos.Line)
- $$.SetLastLine($3.Pos.Line)
- } |
- TWhile expr TDo block TEnd {
- $$ = &ast.WhileStmt{Condition: $2, Stmts: $4}
- $$.SetLine($1.Pos.Line)
- $$.SetLastLine($5.Pos.Line)
- } |
- TRepeat block TUntil expr {
- $$ = &ast.RepeatStmt{Condition: $4, Stmts: $2}
- $$.SetLine($1.Pos.Line)
- $$.SetLastLine($4.Line())
- } |
- TIf expr TThen block elseifs TEnd {
- $$ = &ast.IfStmt{Condition: $2, Then: $4}
- cur := $$
- for _, elseif := range $5 {
- cur.(*ast.IfStmt).Else = []ast.Stmt{elseif}
- cur = elseif
- }
- $$.SetLine($1.Pos.Line)
- $$.SetLastLine($6.Pos.Line)
- } |
- TIf expr TThen block elseifs TElse block TEnd {
- $$ = &ast.IfStmt{Condition: $2, Then: $4}
- cur := $$
- for _, elseif := range $5 {
- cur.(*ast.IfStmt).Else = []ast.Stmt{elseif}
- cur = elseif
- }
- cur.(*ast.IfStmt).Else = $7
- $$.SetLine($1.Pos.Line)
- $$.SetLastLine($8.Pos.Line)
- } |
- TFor TIdent '=' expr ',' expr TDo block TEnd {
- $$ = &ast.NumberForStmt{Name: $2.Str, Init: $4, Limit: $6, Stmts: $8}
- $$.SetLine($1.Pos.Line)
- $$.SetLastLine($9.Pos.Line)
- } |
- TFor TIdent '=' expr ',' expr ',' expr TDo block TEnd {
- $$ = &ast.NumberForStmt{Name: $2.Str, Init: $4, Limit: $6, Step:$8, Stmts: $10}
- $$.SetLine($1.Pos.Line)
- $$.SetLastLine($11.Pos.Line)
- } |
- TFor namelist TIn exprlist TDo block TEnd {
- $$ = &ast.GenericForStmt{Names:$2, Exprs:$4, Stmts: $6}
- $$.SetLine($1.Pos.Line)
- $$.SetLastLine($7.Pos.Line)
- } |
- TFunction funcname funcbody {
- $$ = &ast.FuncDefStmt{Name: $2, Func: $3}
- $$.SetLine($1.Pos.Line)
- $$.SetLastLine($3.LastLine())
- } |
- TLocal TFunction TIdent funcbody {
- $$ = &ast.LocalAssignStmt{Names:[]string{$3.Str}, Exprs: []ast.Expr{$4}}
- $$.SetLine($1.Pos.Line)
- $$.SetLastLine($4.LastLine())
- } |
- TLocal namelist '=' exprlist {
- $$ = &ast.LocalAssignStmt{Names: $2, Exprs:$4}
- $$.SetLine($1.Pos.Line)
- } |
- TLocal namelist {
- $$ = &ast.LocalAssignStmt{Names: $2, Exprs:[]ast.Expr{}}
- $$.SetLine($1.Pos.Line)
- }
-
-elseifs:
- {
- $$ = []ast.Stmt{}
- } |
- elseifs TElseIf expr TThen block {
- $$ = append($1, &ast.IfStmt{Condition: $3, Then: $5})
- $$[len($$)-1].SetLine($2.Pos.Line)
- }
-
-laststat:
- TReturn {
- $$ = &ast.ReturnStmt{Exprs:nil}
- $$.SetLine($1.Pos.Line)
- } |
- TReturn exprlist {
- $$ = &ast.ReturnStmt{Exprs:$2}
- $$.SetLine($1.Pos.Line)
- } |
- TBreak {
- $$ = &ast.BreakStmt{}
- $$.SetLine($1.Pos.Line)
- }
-
-funcname:
- funcname1 {
- $$ = $1
- } |
- funcname1 ':' TIdent {
- $$ = &ast.FuncName{Func:nil, Receiver:$1.Func, Method: $3.Str}
- }
-
-funcname1:
- TIdent {
- $$ = &ast.FuncName{Func: &ast.IdentExpr{Value:$1.Str}}
- $$.Func.SetLine($1.Pos.Line)
- } |
- funcname1 '.' TIdent {
- key:= &ast.StringExpr{Value:$3.Str}
- key.SetLine($3.Pos.Line)
- fn := &ast.AttrGetExpr{Object: $1.Func, Key: key}
- fn.SetLine($3.Pos.Line)
- $$ = &ast.FuncName{Func: fn}
- }
-
-varlist:
- var {
- $$ = []ast.Expr{$1}
- } |
- varlist ',' var {
- $$ = append($1, $3)
- }
-
-var:
- TIdent {
- $$ = &ast.IdentExpr{Value:$1.Str}
- $$.SetLine($1.Pos.Line)
- } |
- prefixexp '[' expr ']' {
- $$ = &ast.AttrGetExpr{Object: $1, Key: $3}
- $$.SetLine($1.Line())
- } |
- prefixexp '.' TIdent {
- key := &ast.StringExpr{Value:$3.Str}
- key.SetLine($3.Pos.Line)
- $$ = &ast.AttrGetExpr{Object: $1, Key: key}
- $$.SetLine($1.Line())
- }
-
-namelist:
- TIdent {
- $$ = []string{$1.Str}
- } |
- namelist ',' TIdent {
- $$ = append($1, $3.Str)
- }
-
-exprlist:
- expr {
- $$ = []ast.Expr{$1}
- } |
- exprlist ',' expr {
- $$ = append($1, $3)
- }
-
-expr:
- TNil {
- $$ = &ast.NilExpr{}
- $$.SetLine($1.Pos.Line)
- } |
- TFalse {
- $$ = &ast.FalseExpr{}
- $$.SetLine($1.Pos.Line)
- } |
- TTrue {
- $$ = &ast.TrueExpr{}
- $$.SetLine($1.Pos.Line)
- } |
- TNumber {
- $$ = &ast.NumberExpr{Value: $1.Str}
- $$.SetLine($1.Pos.Line)
- } |
- T3Comma {
- $$ = &ast.Comma3Expr{}
- $$.SetLine($1.Pos.Line)
- } |
- function {
- $$ = $1
- } |
- prefixexp {
- $$ = $1
- } |
- string {
- $$ = $1
- } |
- tableconstructor {
- $$ = $1
- } |
- expr TOr expr {
- $$ = &ast.LogicalOpExpr{Lhs: $1, Operator: "or", Rhs: $3}
- $$.SetLine($1.Line())
- } |
- expr TAnd expr {
- $$ = &ast.LogicalOpExpr{Lhs: $1, Operator: "and", Rhs: $3}
- $$.SetLine($1.Line())
- } |
- expr '>' expr {
- $$ = &ast.RelationalOpExpr{Lhs: $1, Operator: ">", Rhs: $3}
- $$.SetLine($1.Line())
- } |
- expr '<' expr {
- $$ = &ast.RelationalOpExpr{Lhs: $1, Operator: "<", Rhs: $3}
- $$.SetLine($1.Line())
- } |
- expr TGte expr {
- $$ = &ast.RelationalOpExpr{Lhs: $1, Operator: ">=", Rhs: $3}
- $$.SetLine($1.Line())
- } |
- expr TLte expr {
- $$ = &ast.RelationalOpExpr{Lhs: $1, Operator: "<=", Rhs: $3}
- $$.SetLine($1.Line())
- } |
- expr TEqeq expr {
- $$ = &ast.RelationalOpExpr{Lhs: $1, Operator: "==", Rhs: $3}
- $$.SetLine($1.Line())
- } |
- expr TNeq expr {
- $$ = &ast.RelationalOpExpr{Lhs: $1, Operator: "~=", Rhs: $3}
- $$.SetLine($1.Line())
- } |
- expr T2Comma expr {
- $$ = &ast.StringConcatOpExpr{Lhs: $1, Rhs: $3}
- $$.SetLine($1.Line())
- } |
- expr '+' expr {
- $$ = &ast.ArithmeticOpExpr{Lhs: $1, Operator: "+", Rhs: $3}
- $$.SetLine($1.Line())
- } |
- expr '-' expr {
- $$ = &ast.ArithmeticOpExpr{Lhs: $1, Operator: "-", Rhs: $3}
- $$.SetLine($1.Line())
- } |
- expr '*' expr {
- $$ = &ast.ArithmeticOpExpr{Lhs: $1, Operator: "*", Rhs: $3}
- $$.SetLine($1.Line())
- } |
- expr '/' expr {
- $$ = &ast.ArithmeticOpExpr{Lhs: $1, Operator: "/", Rhs: $3}
- $$.SetLine($1.Line())
- } |
- expr '%' expr {
- $$ = &ast.ArithmeticOpExpr{Lhs: $1, Operator: "%", Rhs: $3}
- $$.SetLine($1.Line())
- } |
- expr '^' expr {
- $$ = &ast.ArithmeticOpExpr{Lhs: $1, Operator: "^", Rhs: $3}
- $$.SetLine($1.Line())
- } |
- '-' expr %prec UNARY {
- $$ = &ast.UnaryMinusOpExpr{Expr: $2}
- $$.SetLine($2.Line())
- } |
- TNot expr %prec UNARY {
- $$ = &ast.UnaryNotOpExpr{Expr: $2}
- $$.SetLine($2.Line())
- } |
- '#' expr %prec UNARY {
- $$ = &ast.UnaryLenOpExpr{Expr: $2}
- $$.SetLine($2.Line())
- }
-
-string:
- TString {
- $$ = &ast.StringExpr{Value: $1.Str}
- $$.SetLine($1.Pos.Line)
- }
-
-prefixexp:
- var {
- $$ = $1
- } |
- afunctioncall {
- $$ = $1
- } |
- functioncall {
- $$ = $1
- } |
- '(' expr ')' {
- $$ = $2
- $$.SetLine($1.Pos.Line)
- }
-
-afunctioncall:
- '(' functioncall ')' {
- $2.(*ast.FuncCallExpr).AdjustRet = true
- $$ = $2
- }
-
-functioncall:
- prefixexp args {
- $$ = &ast.FuncCallExpr{Func: $1, Args: $2}
- $$.SetLine($1.Line())
- } |
- prefixexp ':' TIdent args {
- $$ = &ast.FuncCallExpr{Method: $3.Str, Receiver: $1, Args: $4}
- $$.SetLine($1.Line())
- }
-
-args:
- '(' ')' {
- if yylex.(*Lexer).PNewLine {
- yylex.(*Lexer).TokenError($1, "ambiguous syntax (function call x new statement)")
- }
- $$ = []ast.Expr{}
- } |
- '(' exprlist ')' {
- if yylex.(*Lexer).PNewLine {
- yylex.(*Lexer).TokenError($1, "ambiguous syntax (function call x new statement)")
- }
- $$ = $2
- } |
- tableconstructor {
- $$ = []ast.Expr{$1}
- } |
- string {
- $$ = []ast.Expr{$1}
- }
-
-function:
- TFunction funcbody {
- $$ = &ast.FunctionExpr{ParList:$2.ParList, Stmts: $2.Stmts}
- $$.SetLine($1.Pos.Line)
- $$.SetLastLine($2.LastLine())
- }
-
-funcbody:
- '(' parlist ')' block TEnd {
- $$ = &ast.FunctionExpr{ParList: $2, Stmts: $4}
- $$.SetLine($1.Pos.Line)
- $$.SetLastLine($5.Pos.Line)
- } |
- '(' ')' block TEnd {
- $$ = &ast.FunctionExpr{ParList: &ast.ParList{HasVargs: false, Names: []string{}}, Stmts: $3}
- $$.SetLine($1.Pos.Line)
- $$.SetLastLine($4.Pos.Line)
- }
-
-parlist:
- T3Comma {
- $$ = &ast.ParList{HasVargs: true, Names: []string{}}
- } |
- namelist {
- $$ = &ast.ParList{HasVargs: false, Names: []string{}}
- $$.Names = append($$.Names, $1...)
- } |
- namelist ',' T3Comma {
- $$ = &ast.ParList{HasVargs: true, Names: []string{}}
- $$.Names = append($$.Names, $1...)
- }
-
-
-tableconstructor:
- '{' '}' {
- $$ = &ast.TableExpr{Fields: []*ast.Field{}}
- $$.SetLine($1.Pos.Line)
- } |
- '{' fieldlist '}' {
- $$ = &ast.TableExpr{Fields: $2}
- $$.SetLine($1.Pos.Line)
- }
-
-
-fieldlist:
- field {
- $$ = []*ast.Field{$1}
- } |
- fieldlist fieldsep field {
- $$ = append($1, $3)
- } |
- fieldlist fieldsep {
- $$ = $1
- }
-
-field:
- TIdent '=' expr {
- $$ = &ast.Field{Key: &ast.StringExpr{Value:$1.Str}, Value: $3}
- $$.Key.SetLine($1.Pos.Line)
- } |
- '[' expr ']' '=' expr {
- $$ = &ast.Field{Key: $2, Value: $5}
- } |
- expr {
- $$ = &ast.Field{Value: $1}
- }
-
-fieldsep:
- ',' {
- $$ = ","
- } |
- ';' {
- $$ = ";"
- }
-
-%%
-
-func TokenName(c int) string {
- if c >= TAnd && c-TAnd < len(yyToknames) {
- if yyToknames[c-TAnd] != "" {
- return yyToknames[c-TAnd]
- }
- }
- return string([]byte{byte(c)})
-}
-
diff --git a/vendor/github.com/yuin/gopher-lua/pm/pm.go b/vendor/github.com/yuin/gopher-lua/pm/pm.go
deleted file mode 100644
index e15bc210..00000000
--- a/vendor/github.com/yuin/gopher-lua/pm/pm.go
+++ /dev/null
@@ -1,638 +0,0 @@
-// Lua pattern match functions for Go
-package pm
-
-import (
- "fmt"
-)
-
-const EOS = -1
-const _UNKNOWN = -2
-
-/* Error {{{ */
-
-type Error struct {
- Pos int
- Message string
-}
-
-func newError(pos int, message string, args ...interface{}) *Error {
- if len(args) == 0 {
- return &Error{pos, message}
- }
- return &Error{pos, fmt.Sprintf(message, args...)}
-}
-
-func (e *Error) Error() string {
- switch e.Pos {
- case EOS:
- return fmt.Sprintf("%s at EOS", e.Message)
- case _UNKNOWN:
- return fmt.Sprintf("%s", e.Message)
- default:
- return fmt.Sprintf("%s at %d", e.Message, e.Pos)
- }
-}
-
-/* }}} */
-
-/* MatchData {{{ */
-
-type MatchData struct {
- // captured positions
- // layout
- // xxxx xxxx xxxx xxx0 : caputured positions
- // xxxx xxxx xxxx xxx1 : position captured positions
- captures []uint32
-}
-
-func newMatchState() *MatchData { return &MatchData{[]uint32{}} }
-
-func (st *MatchData) addPosCapture(s, pos int) {
- for s+1 >= len(st.captures) {
- st.captures = append(st.captures, 0)
- }
- st.captures[s] = (uint32(pos) << 1) | 1
- st.captures[s+1] = (uint32(pos) << 1) | 1
-}
-
-func (st *MatchData) setCapture(s, pos int) uint32 {
- for s >= len(st.captures) {
- st.captures = append(st.captures, 0)
- }
- v := st.captures[s]
- st.captures[s] = (uint32(pos) << 1)
- return v
-}
-
-func (st *MatchData) restoreCapture(s int, pos uint32) { st.captures[s] = pos }
-
-func (st *MatchData) CaptureLength() int { return len(st.captures) }
-
-func (st *MatchData) IsPosCapture(idx int) bool { return (st.captures[idx] & 1) == 1 }
-
-func (st *MatchData) Capture(idx int) int { return int(st.captures[idx] >> 1) }
-
-/* }}} */
-
-/* scanner {{{ */
-
-type scannerState struct {
- Pos int
- started bool
-}
-
-type scanner struct {
- src []byte
- State scannerState
- saved scannerState
-}
-
-func newScanner(src []byte) *scanner {
- return &scanner{
- src: src,
- State: scannerState{
- Pos: 0,
- started: false,
- },
- saved: scannerState{},
- }
-}
-
-func (sc *scanner) Length() int { return len(sc.src) }
-
-func (sc *scanner) Next() int {
- if !sc.State.started {
- sc.State.started = true
- if len(sc.src) == 0 {
- sc.State.Pos = EOS
- }
- } else {
- sc.State.Pos = sc.NextPos()
- }
- if sc.State.Pos == EOS {
- return EOS
- }
- return int(sc.src[sc.State.Pos])
-}
-
-func (sc *scanner) CurrentPos() int {
- return sc.State.Pos
-}
-
-func (sc *scanner) NextPos() int {
- if sc.State.Pos == EOS || sc.State.Pos >= len(sc.src)-1 {
- return EOS
- }
- if !sc.State.started {
- return 0
- } else {
- return sc.State.Pos + 1
- }
-}
-
-func (sc *scanner) Peek() int {
- cureof := sc.State.Pos == EOS
- ch := sc.Next()
- if !cureof {
- if sc.State.Pos == EOS {
- sc.State.Pos = len(sc.src) - 1
- } else {
- sc.State.Pos--
- if sc.State.Pos < 0 {
- sc.State.Pos = 0
- sc.State.started = false
- }
- }
- }
- return ch
-}
-
-func (sc *scanner) Save() { sc.saved = sc.State }
-
-func (sc *scanner) Restore() { sc.State = sc.saved }
-
-/* }}} */
-
-/* bytecode {{{ */
-
-type opCode int
-
-const (
- opChar opCode = iota
- opMatch
- opTailMatch
- opJmp
- opSplit
- opSave
- opPSave
- opBrace
- opNumber
-)
-
-type inst struct {
- OpCode opCode
- Class class
- Operand1 int
- Operand2 int
-}
-
-/* }}} */
-
-/* classes {{{ */
-
-type class interface {
- Matches(ch int) bool
-}
-
-type dotClass struct{}
-
-func (pn *dotClass) Matches(ch int) bool { return true }
-
-type charClass struct {
- Ch int
-}
-
-func (pn *charClass) Matches(ch int) bool { return pn.Ch == ch }
-
-type singleClass struct {
- Class int
-}
-
-func (pn *singleClass) Matches(ch int) bool {
- ret := false
- switch pn.Class {
- case 'a', 'A':
- ret = 'A' <= ch && ch <= 'Z' || 'a' <= ch && ch <= 'z'
- case 'c', 'C':
- ret = (0x00 <= ch && ch <= 0x1F) || ch == 0x7F
- case 'd', 'D':
- ret = '0' <= ch && ch <= '9'
- case 'l', 'L':
- ret = 'a' <= ch && ch <= 'z'
- case 'p', 'P':
- ret = (0x21 <= ch && ch <= 0x2f) || (0x30 <= ch && ch <= 0x40) || (0x5b <= ch && ch <= 0x60) || (0x7b <= ch && ch <= 0x7e)
- case 's', 'S':
- switch ch {
- case ' ', '\f', '\n', '\r', '\t', '\v':
- ret = true
- }
- case 'u', 'U':
- ret = 'A' <= ch && ch <= 'Z'
- case 'w', 'W':
- ret = '0' <= ch && ch <= '9' || 'A' <= ch && ch <= 'Z' || 'a' <= ch && ch <= 'z'
- case 'x', 'X':
- ret = '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F'
- case 'z', 'Z':
- ret = ch == 0
- default:
- return ch == pn.Class
- }
- if 'A' <= pn.Class && pn.Class <= 'Z' {
- return !ret
- }
- return ret
-}
-
-type setClass struct {
- IsNot bool
- Classes []class
-}
-
-func (pn *setClass) Matches(ch int) bool {
- for _, class := range pn.Classes {
- if class.Matches(ch) {
- return !pn.IsNot
- }
- }
- return pn.IsNot
-}
-
-type rangeClass struct {
- Begin class
- End class
-}
-
-func (pn *rangeClass) Matches(ch int) bool {
- switch begin := pn.Begin.(type) {
- case *charClass:
- end, ok := pn.End.(*charClass)
- if !ok {
- return false
- }
- return begin.Ch <= ch && ch <= end.Ch
- }
- return false
-}
-
-// }}}
-
-// patterns {{{
-
-type pattern interface{}
-
-type singlePattern struct {
- Class class
-}
-
-type seqPattern struct {
- MustHead bool
- MustTail bool
- Patterns []pattern
-}
-
-type repeatPattern struct {
- Type int
- Class class
-}
-
-type posCapPattern struct{}
-
-type capPattern struct {
- Pattern pattern
-}
-
-type numberPattern struct {
- N int
-}
-
-type bracePattern struct {
- Begin int
- End int
-}
-
-// }}}
-
-/* parse {{{ */
-
-func parseClass(sc *scanner, allowset bool) class {
- ch := sc.Next()
- switch ch {
- case '%':
- return &singleClass{sc.Next()}
- case '.':
- if allowset {
- return &dotClass{}
- }
- return &charClass{ch}
- case '[':
- if allowset {
- return parseClassSet(sc)
- }
- return &charClass{ch}
- //case '^' '$', '(', ')', ']', '*', '+', '-', '?':
- // panic(newError(sc.CurrentPos(), "invalid %c", ch))
- case EOS:
- panic(newError(sc.CurrentPos(), "unexpected EOS"))
- default:
- return &charClass{ch}
- }
-}
-
-func parseClassSet(sc *scanner) class {
- set := &setClass{false, []class{}}
- if sc.Peek() == '^' {
- set.IsNot = true
- sc.Next()
- }
- isrange := false
- for {
- ch := sc.Peek()
- switch ch {
- // case '[':
- // panic(newError(sc.CurrentPos(), "'[' can not be nested"))
- case EOS:
- panic(newError(sc.CurrentPos(), "unexpected EOS"))
- case ']':
- if len(set.Classes) > 0 {
- sc.Next()
- goto exit
- }
- fallthrough
- case '-':
- if len(set.Classes) > 0 {
- sc.Next()
- isrange = true
- continue
- }
- fallthrough
- default:
- set.Classes = append(set.Classes, parseClass(sc, false))
- }
- if isrange {
- begin := set.Classes[len(set.Classes)-2]
- end := set.Classes[len(set.Classes)-1]
- set.Classes = set.Classes[0 : len(set.Classes)-2]
- set.Classes = append(set.Classes, &rangeClass{begin, end})
- isrange = false
- }
- }
-exit:
- if isrange {
- set.Classes = append(set.Classes, &charClass{'-'})
- }
-
- return set
-}
-
-func parsePattern(sc *scanner, toplevel bool) *seqPattern {
- pat := &seqPattern{}
- if toplevel {
- if sc.Peek() == '^' {
- sc.Next()
- pat.MustHead = true
- }
- }
- for {
- ch := sc.Peek()
- switch ch {
- case '%':
- sc.Save()
- sc.Next()
- switch sc.Peek() {
- case '0':
- panic(newError(sc.CurrentPos(), "invalid capture index"))
- case '1', '2', '3', '4', '5', '6', '7', '8', '9':
- pat.Patterns = append(pat.Patterns, &numberPattern{sc.Next() - 48})
- case 'b':
- sc.Next()
- pat.Patterns = append(pat.Patterns, &bracePattern{sc.Next(), sc.Next()})
- default:
- sc.Restore()
- pat.Patterns = append(pat.Patterns, &singlePattern{parseClass(sc, true)})
- }
- case '.', '[', ']':
- pat.Patterns = append(pat.Patterns, &singlePattern{parseClass(sc, true)})
- //case ']':
- // panic(newError(sc.CurrentPos(), "invalid ']'"))
- case ')':
- if toplevel {
- panic(newError(sc.CurrentPos(), "invalid ')'"))
- }
- return pat
- case '(':
- sc.Next()
- if sc.Peek() == ')' {
- sc.Next()
- pat.Patterns = append(pat.Patterns, &posCapPattern{})
- } else {
- ret := &capPattern{parsePattern(sc, false)}
- if sc.Peek() != ')' {
- panic(newError(sc.CurrentPos(), "unfinished capture"))
- }
- sc.Next()
- pat.Patterns = append(pat.Patterns, ret)
- }
- case '*', '+', '-', '?':
- sc.Next()
- if len(pat.Patterns) > 0 {
- spat, ok := pat.Patterns[len(pat.Patterns)-1].(*singlePattern)
- if ok {
- pat.Patterns = pat.Patterns[0 : len(pat.Patterns)-1]
- pat.Patterns = append(pat.Patterns, &repeatPattern{ch, spat.Class})
- continue
- }
- }
- pat.Patterns = append(pat.Patterns, &singlePattern{&charClass{ch}})
- case '$':
- if toplevel && (sc.NextPos() == sc.Length()-1 || sc.NextPos() == EOS) {
- pat.MustTail = true
- } else {
- pat.Patterns = append(pat.Patterns, &singlePattern{&charClass{ch}})
- }
- sc.Next()
- case EOS:
- sc.Next()
- goto exit
- default:
- sc.Next()
- pat.Patterns = append(pat.Patterns, &singlePattern{&charClass{ch}})
- }
- }
-exit:
- return pat
-}
-
-type iptr struct {
- insts []inst
- capture int
-}
-
-func compilePattern(p pattern, ps ...*iptr) []inst {
- var ptr *iptr
- toplevel := false
- if len(ps) == 0 {
- toplevel = true
- ptr = &iptr{[]inst{inst{opSave, nil, 0, -1}}, 2}
- } else {
- ptr = ps[0]
- }
- switch pat := p.(type) {
- case *singlePattern:
- ptr.insts = append(ptr.insts, inst{opChar, pat.Class, -1, -1})
- case *seqPattern:
- for _, cp := range pat.Patterns {
- compilePattern(cp, ptr)
- }
- case *repeatPattern:
- idx := len(ptr.insts)
- switch pat.Type {
- case '*':
- ptr.insts = append(ptr.insts,
- inst{opSplit, nil, idx + 1, idx + 3},
- inst{opChar, pat.Class, -1, -1},
- inst{opJmp, nil, idx, -1})
- case '+':
- ptr.insts = append(ptr.insts,
- inst{opChar, pat.Class, -1, -1},
- inst{opSplit, nil, idx, idx + 2})
- case '-':
- ptr.insts = append(ptr.insts,
- inst{opSplit, nil, idx + 3, idx + 1},
- inst{opChar, pat.Class, -1, -1},
- inst{opJmp, nil, idx, -1})
- case '?':
- ptr.insts = append(ptr.insts,
- inst{opSplit, nil, idx + 1, idx + 2},
- inst{opChar, pat.Class, -1, -1})
- }
- case *posCapPattern:
- ptr.insts = append(ptr.insts, inst{opPSave, nil, ptr.capture, -1})
- ptr.capture += 2
- case *capPattern:
- c0, c1 := ptr.capture, ptr.capture+1
- ptr.capture += 2
- ptr.insts = append(ptr.insts, inst{opSave, nil, c0, -1})
- compilePattern(pat.Pattern, ptr)
- ptr.insts = append(ptr.insts, inst{opSave, nil, c1, -1})
- case *bracePattern:
- ptr.insts = append(ptr.insts, inst{opBrace, nil, pat.Begin, pat.End})
- case *numberPattern:
- ptr.insts = append(ptr.insts, inst{opNumber, nil, pat.N, -1})
- }
- if toplevel {
- if p.(*seqPattern).MustTail {
- ptr.insts = append(ptr.insts, inst{opSave, nil, 1, -1}, inst{opTailMatch, nil, -1, -1})
- }
- ptr.insts = append(ptr.insts, inst{opSave, nil, 1, -1}, inst{opMatch, nil, -1, -1})
- }
- return ptr.insts
-}
-
-/* }}} parse */
-
-/* VM {{{ */
-
-// Simple recursive virtual machine based on the
-// "Regular Expression Matching: the Virtual Machine Approach" (https://swtch.com/~rsc/regexp/regexp2.html)
-func recursiveVM(src []byte, insts []inst, pc, sp int, ms ...*MatchData) (bool, int, *MatchData) {
- var m *MatchData
- if len(ms) == 0 {
- m = newMatchState()
- } else {
- m = ms[0]
- }
-redo:
- inst := insts[pc]
- switch inst.OpCode {
- case opChar:
- if sp >= len(src) || !inst.Class.Matches(int(src[sp])) {
- return false, sp, m
- }
- pc++
- sp++
- goto redo
- case opMatch:
- return true, sp, m
- case opTailMatch:
- return sp >= len(src), sp, m
- case opJmp:
- pc = inst.Operand1
- goto redo
- case opSplit:
- if ok, nsp, _ := recursiveVM(src, insts, inst.Operand1, sp, m); ok {
- return true, nsp, m
- }
- pc = inst.Operand2
- goto redo
- case opSave:
- s := m.setCapture(inst.Operand1, sp)
- if ok, nsp, _ := recursiveVM(src, insts, pc+1, sp, m); ok {
- return true, nsp, m
- }
- m.restoreCapture(inst.Operand1, s)
- return false, sp, m
- case opPSave:
- m.addPosCapture(inst.Operand1, sp+1)
- pc++
- goto redo
- case opBrace:
- if sp >= len(src) || int(src[sp]) != inst.Operand1 {
- return false, sp, m
- }
- count := 1
- for sp = sp + 1; sp < len(src); sp++ {
- if int(src[sp]) == inst.Operand2 {
- count--
- }
- if count == 0 {
- pc++
- sp++
- goto redo
- }
- if int(src[sp]) == inst.Operand1 {
- count++
- }
- }
- return false, sp, m
- case opNumber:
- idx := inst.Operand1 * 2
- if idx >= m.CaptureLength()-1 {
- panic(newError(_UNKNOWN, "invalid capture index"))
- }
- capture := src[m.Capture(idx):m.Capture(idx+1)]
- for i := 0; i < len(capture); i++ {
- if i+sp >= len(src) || capture[i] != src[i+sp] {
- return false, sp, m
- }
- }
- pc++
- sp += len(capture)
- goto redo
- }
- panic("should not reach here")
-}
-
-/* }}} */
-
-/* API {{{ */
-
-func Find(p string, src []byte, offset, limit int) (matches []*MatchData, err error) {
- defer func() {
- if v := recover(); v != nil {
- if perr, ok := v.(*Error); ok {
- err = perr
- } else {
- panic(v)
- }
- }
- }()
- pat := parsePattern(newScanner([]byte(p)), true)
- insts := compilePattern(pat)
- matches = []*MatchData{}
- for sp := offset; sp <= len(src); {
- ok, nsp, ms := recursiveVM(src, insts, 0, sp)
- sp++
- if ok {
- if sp < nsp {
- sp = nsp
- }
- matches = append(matches, ms)
- }
- if len(matches) == limit || pat.MustHead {
- break
- }
- }
- return
-}
-
-/* }}} */
diff --git a/vendor/github.com/yuin/gopher-lua/state.go b/vendor/github.com/yuin/gopher-lua/state.go
deleted file mode 100644
index a1ee672e..00000000
--- a/vendor/github.com/yuin/gopher-lua/state.go
+++ /dev/null
@@ -1,2244 +0,0 @@
-package lua
-
-////////////////////////////////////////////////////////
-// This file was generated by go-inline. DO NOT EDIT. //
-////////////////////////////////////////////////////////
-
-import (
- "context"
- "fmt"
- "io"
- "math"
- "os"
- "runtime"
- "strings"
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/yuin/gopher-lua/parse"
-)
-
-const MultRet = -1
-const RegistryIndex = -10000
-const EnvironIndex = -10001
-const GlobalsIndex = -10002
-
-/* ApiError {{{ */
-
-type ApiError struct {
- Type ApiErrorType
- Object LValue
- StackTrace string
- // Underlying error. This attribute is set only if the Type is ApiErrorFile or ApiErrorSyntax
- Cause error
-}
-
-func newApiError(code ApiErrorType, object LValue) *ApiError {
- return &ApiError{code, object, "", nil}
-}
-
-func newApiErrorS(code ApiErrorType, message string) *ApiError {
- return newApiError(code, LString(message))
-}
-
-func newApiErrorE(code ApiErrorType, err error) *ApiError {
- return &ApiError{code, LString(err.Error()), "", err}
-}
-
-func (e *ApiError) Error() string {
- if len(e.StackTrace) > 0 {
- return fmt.Sprintf("%s\n%s", e.Object.String(), e.StackTrace)
- }
- return e.Object.String()
-}
-
-type ApiErrorType int
-
-const (
- ApiErrorSyntax ApiErrorType = iota
- ApiErrorFile
- ApiErrorRun
- ApiErrorError
- ApiErrorPanic
-)
-
-/* }}} */
-
-/* ResumeState {{{ */
-
-type ResumeState int
-
-const (
- ResumeOK ResumeState = iota
- ResumeYield
- ResumeError
-)
-
-/* }}} */
-
-/* P {{{ */
-
-type P struct {
- Fn LValue
- NRet int
- Protect bool
- Handler *LFunction
-}
-
-/* }}} */
-
-/* Options {{{ */
-
-// Options is a configuration that is used to create a new LState.
-type Options struct {
- // Call stack size. This defaults to `lua.CallStackSize`.
- CallStackSize int
- // Data stack size. This defaults to `lua.RegistrySize`.
- RegistrySize int
- // Allow the registry to grow from the registry size specified up to a value of RegistryMaxSize. A value of 0
- // indicates no growth is permitted. The registry will not shrink again after any growth.
- RegistryMaxSize int
- // If growth is enabled, step up by an additional `RegistryGrowStep` each time to avoid having to resize too often.
- // This defaults to `lua.RegistryGrowStep`
- RegistryGrowStep int
- // Controls whether or not libraries are opened by default
- SkipOpenLibs bool
- // Tells whether a Go stacktrace should be included in a Lua stacktrace when panics occur.
- IncludeGoStackTrace bool
- // If `MinimizeStackMemory` is set, the call stack will be automatically grown or shrank up to a limit of
- // `CallStackSize` in order to minimize memory usage. This does incur a slight performance penalty.
- MinimizeStackMemory bool
-}
-
-/* }}} */
-
-/* Debug {{{ */
-
-type Debug struct {
- frame *callFrame
- Name string
- What string
- Source string
- CurrentLine int
- NUpvalues int
- LineDefined int
- LastLineDefined int
-}
-
-/* }}} */
-
-/* callFrame {{{ */
-
-type callFrame struct {
- Idx int
- Fn *LFunction
- Parent *callFrame
- Pc int
- Base int
- LocalBase int
- ReturnBase int
- NArgs int
- NRet int
- TailCall int
-}
-
-type callFrameStack interface {
- Push(v callFrame)
- Pop() *callFrame
- Last() *callFrame
-
- SetSp(sp int)
- Sp() int
- At(sp int) *callFrame
-
- IsFull() bool
- IsEmpty() bool
-
- FreeAll()
-}
-
-type fixedCallFrameStack struct {
- array []callFrame
- sp int
-}
-
-func newFixedCallFrameStack(size int) callFrameStack {
- return &fixedCallFrameStack{
- array: make([]callFrame, size),
- sp: 0,
- }
-}
-
-func (cs *fixedCallFrameStack) IsEmpty() bool { return cs.sp == 0 }
-
-func (cs *fixedCallFrameStack) IsFull() bool { return cs.sp == len(cs.array) }
-
-func (cs *fixedCallFrameStack) Clear() {
- cs.sp = 0
-}
-
-func (cs *fixedCallFrameStack) Push(v callFrame) {
- cs.array[cs.sp] = v
- cs.array[cs.sp].Idx = cs.sp
- cs.sp++
-}
-
-func (cs *fixedCallFrameStack) Sp() int {
- return cs.sp
-}
-
-func (cs *fixedCallFrameStack) SetSp(sp int) {
- cs.sp = sp
-}
-
-func (cs *fixedCallFrameStack) Last() *callFrame {
- if cs.sp == 0 {
- return nil
- }
- return &cs.array[cs.sp-1]
-}
-
-func (cs *fixedCallFrameStack) At(sp int) *callFrame {
- return &cs.array[sp]
-}
-
-func (cs *fixedCallFrameStack) Pop() *callFrame {
- cs.sp--
- return &cs.array[cs.sp]
-}
-
-func (cs *fixedCallFrameStack) FreeAll() {
- // nothing to do for fixed callframestack
-}
-
-// FramesPerSegment should be a power of 2 constant for performance reasons. It will allow the go compiler to change
-// the divs and mods into bitshifts. Max is 256 due to current use of uint8 to count how many frames in a segment are
-// used.
-const FramesPerSegment = 8
-
-type callFrameStackSegment struct {
- array [FramesPerSegment]callFrame
-}
-type segIdx uint16
-type autoGrowingCallFrameStack struct {
- segments []*callFrameStackSegment
- segIdx segIdx
- // segSp is the number of frames in the current segment which are used. Full 'sp' value is segIdx * FramesPerSegment + segSp.
- // It points to the next stack slot to use, so 0 means to use the 0th element in the segment, and a value of
- // FramesPerSegment indicates that the segment is full and cannot accommodate another frame.
- segSp uint8
-}
-
-var segmentPool sync.Pool
-
-func newCallFrameStackSegment() *callFrameStackSegment {
- seg := segmentPool.Get()
- if seg == nil {
- return &callFrameStackSegment{}
- }
- return seg.(*callFrameStackSegment)
-}
-
-func freeCallFrameStackSegment(seg *callFrameStackSegment) {
- segmentPool.Put(seg)
-}
-
-// newCallFrameStack allocates a new stack for a lua state, which will auto grow up to a max size of at least maxSize.
-// it will actually grow up to the next segment size multiple after maxSize, where the segment size is dictated by
-// FramesPerSegment.
-func newAutoGrowingCallFrameStack(maxSize int) callFrameStack {
- cs := &autoGrowingCallFrameStack{
- segments: make([]*callFrameStackSegment, (maxSize+(FramesPerSegment-1))/FramesPerSegment),
- segIdx: 0,
- }
- cs.segments[0] = newCallFrameStackSegment()
- return cs
-}
-
-func (cs *autoGrowingCallFrameStack) IsEmpty() bool {
- return cs.segIdx == 0 && cs.segSp == 0
-}
-
-// IsFull returns true if the stack cannot receive any more stack pushes without overflowing
-func (cs *autoGrowingCallFrameStack) IsFull() bool {
- return int(cs.segIdx) == len(cs.segments) && cs.segSp >= FramesPerSegment
-}
-
-func (cs *autoGrowingCallFrameStack) Clear() {
- for i := segIdx(1); i <= cs.segIdx; i++ {
- freeCallFrameStackSegment(cs.segments[i])
- cs.segments[i] = nil
- }
- cs.segIdx = 0
- cs.segSp = 0
-}
-
-func (cs *autoGrowingCallFrameStack) FreeAll() {
- for i := segIdx(0); i <= cs.segIdx; i++ {
- freeCallFrameStackSegment(cs.segments[i])
- cs.segments[i] = nil
- }
-}
-
-// Push pushes the passed callFrame onto the stack. it panics if the stack is full, caller should call IsFull() before
-// invoking this to avoid this.
-func (cs *autoGrowingCallFrameStack) Push(v callFrame) {
- curSeg := cs.segments[cs.segIdx]
- if cs.segSp >= FramesPerSegment {
- // segment full, push new segment if allowed
- if cs.segIdx < segIdx(len(cs.segments)-1) {
- curSeg = newCallFrameStackSegment()
- cs.segIdx++
- cs.segments[cs.segIdx] = curSeg
- cs.segSp = 0
- } else {
- panic("lua callstack overflow")
- }
- }
- curSeg.array[cs.segSp] = v
- curSeg.array[cs.segSp].Idx = int(cs.segSp) + FramesPerSegment*int(cs.segIdx)
- cs.segSp++
-}
-
-// Sp retrieves the current stack depth, which is the number of frames currently pushed on the stack.
-func (cs *autoGrowingCallFrameStack) Sp() int {
- return int(cs.segSp) + int(cs.segIdx)*FramesPerSegment
-}
-
-// SetSp can be used to rapidly unwind the stack, freeing all stack frames on the way. It should not be used to
-// allocate new stack space, use Push() for that.
-func (cs *autoGrowingCallFrameStack) SetSp(sp int) {
- desiredSegIdx := segIdx(sp / FramesPerSegment)
- desiredFramesInLastSeg := uint8(sp % FramesPerSegment)
- for {
- if cs.segIdx <= desiredSegIdx {
- break
- }
- freeCallFrameStackSegment(cs.segments[cs.segIdx])
- cs.segments[cs.segIdx] = nil
- cs.segIdx--
- }
- cs.segSp = desiredFramesInLastSeg
-}
-
-func (cs *autoGrowingCallFrameStack) Last() *callFrame {
- curSeg := cs.segments[cs.segIdx]
- segSp := cs.segSp
- if segSp == 0 {
- if cs.segIdx == 0 {
- return nil
- }
- curSeg = cs.segments[cs.segIdx-1]
- segSp = FramesPerSegment
- }
- return &curSeg.array[segSp-1]
-}
-
-func (cs *autoGrowingCallFrameStack) At(sp int) *callFrame {
- segIdx := segIdx(sp / FramesPerSegment)
- frameIdx := uint8(sp % FramesPerSegment)
- return &cs.segments[segIdx].array[frameIdx]
-}
-
-// Pop pops off the most recent stack frame and returns it
-func (cs *autoGrowingCallFrameStack) Pop() *callFrame {
- curSeg := cs.segments[cs.segIdx]
- if cs.segSp == 0 {
- if cs.segIdx == 0 {
- // stack empty
- return nil
- }
- freeCallFrameStackSegment(curSeg)
- cs.segments[cs.segIdx] = nil
- cs.segIdx--
- cs.segSp = FramesPerSegment
- curSeg = cs.segments[cs.segIdx]
- }
- cs.segSp--
- return &curSeg.array[cs.segSp]
-}
-
-/* }}} */
-
-/* registry {{{ */
-
-type registryHandler interface {
- registryOverflow()
-}
-type registry struct {
- array []LValue
- top int
- growBy int
- maxSize int
- alloc *allocator
- handler registryHandler
-}
-
-func newRegistry(handler registryHandler, initialSize int, growBy int, maxSize int, alloc *allocator) *registry {
- return ®istry{make([]LValue, initialSize), 0, growBy, maxSize, alloc, handler}
-}
-
-func (rg *registry) checkSize(requiredSize int) { // +inline-start
- if requiredSize > cap(rg.array) {
- rg.resize(requiredSize)
- }
-} // +inline-end
-
-func (rg *registry) resize(requiredSize int) { // +inline-start
- newSize := requiredSize + rg.growBy // give some padding
- if newSize > rg.maxSize {
- newSize = rg.maxSize
- }
- if newSize < requiredSize {
- rg.handler.registryOverflow()
- return
- }
- rg.forceResize(newSize)
-} // +inline-end
-
-func (rg *registry) forceResize(newSize int) {
- newSlice := make([]LValue, newSize)
- copy(newSlice, rg.array[:rg.top]) // should we copy the area beyond top? there shouldn't be any valid values there so it shouldn't be necessary.
- rg.array = newSlice
-}
-func (rg *registry) SetTop(top int) {
- // this section is inlined by go-inline
- // source function is 'func (rg *registry) checkSize(requiredSize int) ' in '_state.go'
- {
- requiredSize := top
- if requiredSize > cap(rg.array) {
- rg.resize(requiredSize)
- }
- }
- oldtop := rg.top
- rg.top = top
- for i := oldtop; i < rg.top; i++ {
- rg.array[i] = LNil
- }
- // values beyond top don't need to be valid LValues, so setting them to nil is fine
- // setting them to nil rather than LNil lets us invoke the golang memclr opto
- if rg.top < oldtop {
- nilRange := rg.array[rg.top:oldtop]
- for i := range nilRange {
- nilRange[i] = nil
- }
- }
- //for i := rg.top; i < oldtop; i++ {
- // rg.array[i] = LNil
- //}
-}
-
-func (rg *registry) Top() int {
- return rg.top
-}
-
-func (rg *registry) Push(v LValue) {
- newSize := rg.top + 1
- // this section is inlined by go-inline
- // source function is 'func (rg *registry) checkSize(requiredSize int) ' in '_state.go'
- {
- requiredSize := newSize
- if requiredSize > cap(rg.array) {
- rg.resize(requiredSize)
- }
- }
- rg.array[rg.top] = v
- rg.top++
-}
-
-func (rg *registry) Pop() LValue {
- v := rg.array[rg.top-1]
- rg.array[rg.top-1] = LNil
- rg.top--
- return v
-}
-
-func (rg *registry) Get(reg int) LValue {
- return rg.array[reg]
-}
-
-// CopyRange will move a section of values from index `start` to index `regv`
-// It will move `n` values.
-// `limit` specifies the maximum end range that can be copied from. If it's set to -1, then it defaults to stopping at
-// the top of the registry (values beyond the top are not initialized, so if specifying an alternative `limit` you should
-// pass a value <= rg.top.
-// If start+n is beyond the limit, then nil values will be copied to the destination slots.
-// After the copy, the registry is truncated to be at the end of the copied range, ie the original of the copied values
-// are nilled out. (So top will be regv+n)
-// CopyRange should ideally be renamed to MoveRange.
-func (rg *registry) CopyRange(regv, start, limit, n int) { // +inline-start
- newSize := regv + n
- // this section is inlined by go-inline
- // source function is 'func (rg *registry) checkSize(requiredSize int) ' in '_state.go'
- {
- requiredSize := newSize
- if requiredSize > cap(rg.array) {
- rg.resize(requiredSize)
- }
- }
- if limit == -1 || limit > rg.top {
- limit = rg.top
- }
- for i := 0; i < n; i++ {
- srcIdx := start + i
- if srcIdx >= limit || srcIdx < 0 {
- rg.array[regv+i] = LNil
- } else {
- rg.array[regv+i] = rg.array[srcIdx]
- }
- }
-
- // values beyond top don't need to be valid LValues, so setting them to nil is fine
- // setting them to nil rather than LNil lets us invoke the golang memclr opto
- oldtop := rg.top
- rg.top = regv + n
- if rg.top < oldtop {
- nilRange := rg.array[rg.top:oldtop]
- for i := range nilRange {
- nilRange[i] = nil
- }
- }
-} // +inline-end
-
-// FillNil fills the registry with nil values from regm to regm+n and then sets the registry top to regm+n
-func (rg *registry) FillNil(regm, n int) { // +inline-start
- newSize := regm + n
- // this section is inlined by go-inline
- // source function is 'func (rg *registry) checkSize(requiredSize int) ' in '_state.go'
- {
- requiredSize := newSize
- if requiredSize > cap(rg.array) {
- rg.resize(requiredSize)
- }
- }
- for i := 0; i < n; i++ {
- rg.array[regm+i] = LNil
- }
- // values beyond top don't need to be valid LValues, so setting them to nil is fine
- // setting them to nil rather than LNil lets us invoke the golang memclr opto
- oldtop := rg.top
- rg.top = regm + n
- if rg.top < oldtop {
- nilRange := rg.array[rg.top:oldtop]
- for i := range nilRange {
- nilRange[i] = nil
- }
- }
-} // +inline-end
-
-func (rg *registry) Insert(value LValue, reg int) {
- top := rg.Top()
- if reg >= top {
- rg.Set(reg, value)
- return
- }
- top--
- for ; top >= reg; top-- {
- // FIXME consider using copy() here if Insert() is called enough
- rg.Set(top+1, rg.Get(top))
- }
- rg.Set(reg, value)
-}
-
-func (rg *registry) Set(reg int, val LValue) {
- newSize := reg + 1
- // this section is inlined by go-inline
- // source function is 'func (rg *registry) checkSize(requiredSize int) ' in '_state.go'
- {
- requiredSize := newSize
- if requiredSize > cap(rg.array) {
- rg.resize(requiredSize)
- }
- }
- rg.array[reg] = val
- if reg >= rg.top {
- rg.top = reg + 1
- }
-}
-
-func (rg *registry) SetNumber(reg int, val LNumber) {
- newSize := reg + 1
- // this section is inlined by go-inline
- // source function is 'func (rg *registry) checkSize(requiredSize int) ' in '_state.go'
- {
- requiredSize := newSize
- if requiredSize > cap(rg.array) {
- rg.resize(requiredSize)
- }
- }
- rg.array[reg] = rg.alloc.LNumber2I(val)
- if reg >= rg.top {
- rg.top = reg + 1
- }
-}
-
-func (rg *registry) IsFull() bool {
- return rg.top >= cap(rg.array)
-}
-
-/* }}} */
-
-/* Global {{{ */
-
-func newGlobal() *Global {
- return &Global{
- MainThread: nil,
- Registry: newLTable(0, 32),
- Global: newLTable(0, 64),
- builtinMts: make(map[int]LValue),
- tempFiles: make([]*os.File, 0, 10),
- }
-}
-
-/* }}} */
-
-/* package local methods {{{ */
-
-func panicWithTraceback(L *LState) {
- err := newApiError(ApiErrorRun, L.Get(-1))
- err.StackTrace = L.stackTrace(0)
- panic(err)
-}
-
-func panicWithoutTraceback(L *LState) {
- err := newApiError(ApiErrorRun, L.Get(-1))
- panic(err)
-}
-
-func newLState(options Options) *LState {
- al := newAllocator(32)
- ls := &LState{
- G: newGlobal(),
- Parent: nil,
- Panic: panicWithTraceback,
- Dead: false,
- Options: options,
-
- stop: 0,
- alloc: al,
- currentFrame: nil,
- wrapped: false,
- uvcache: nil,
- hasErrorFunc: false,
- mainLoop: mainLoop,
- ctx: nil,
- }
- if options.MinimizeStackMemory {
- ls.stack = newAutoGrowingCallFrameStack(options.CallStackSize)
- } else {
- ls.stack = newFixedCallFrameStack(options.CallStackSize)
- }
- ls.reg = newRegistry(ls, options.RegistrySize, options.RegistryGrowStep, options.RegistryMaxSize, al)
- ls.Env = ls.G.Global
- return ls
-}
-
-func (ls *LState) printReg() {
- println("-------------------------")
- println("thread:", ls)
- println("top:", ls.reg.Top())
- if ls.currentFrame != nil {
- println("function base:", ls.currentFrame.Base)
- println("return base:", ls.currentFrame.ReturnBase)
- } else {
- println("(vm not started)")
- }
- println("local base:", ls.currentLocalBase())
- for i := 0; i < ls.reg.Top(); i++ {
- println(i, ls.reg.Get(i).String())
- }
- println("-------------------------")
-}
-
-func (ls *LState) printCallStack() {
- println("-------------------------")
- for i := 0; i < ls.stack.Sp(); i++ {
- print(i)
- print(" ")
- frame := ls.stack.At(i)
- if frame == nil {
- break
- }
- if frame.Fn.IsG {
- println("IsG:", true, "Frame:", frame, "Fn:", frame.Fn)
- } else {
- println("IsG:", false, "Frame:", frame, "Fn:", frame.Fn, "pc:", frame.Pc)
- }
- }
- println("-------------------------")
-}
-
-func (ls *LState) closeAllUpvalues() { // +inline-start
- for cf := ls.currentFrame; cf != nil; cf = cf.Parent {
- if !cf.Fn.IsG {
- ls.closeUpvalues(cf.LocalBase)
- }
- }
-} // +inline-end
-
-func (ls *LState) raiseError(level int, format string, args ...interface{}) {
- if !ls.hasErrorFunc {
- ls.closeAllUpvalues()
- }
- message := format
- if len(args) > 0 {
- message = fmt.Sprintf(format, args...)
- }
- if level > 0 {
- message = fmt.Sprintf("%v %v", ls.where(level-1, true), message)
- }
- if ls.reg.IsFull() {
- // if the registry is full then it won't be possible to push a value, in this case, force a larger size
- ls.reg.forceResize(ls.reg.Top() + 1)
- }
- ls.reg.Push(LString(message))
- ls.Panic(ls)
-}
-
-func (ls *LState) findLocal(frame *callFrame, no int) string {
- fn := frame.Fn
- if !fn.IsG {
- if name, ok := fn.LocalName(no, frame.Pc-1); ok {
- return name
- }
- }
- var top int
- if ls.currentFrame == frame {
- top = ls.reg.Top()
- } else if frame.Idx+1 < ls.stack.Sp() {
- top = ls.stack.At(frame.Idx + 1).Base
- } else {
- return ""
- }
- if top-frame.LocalBase >= no {
- return "(*temporary)"
- }
- return ""
-}
-
-func (ls *LState) where(level int, skipg bool) string {
- dbg, ok := ls.GetStack(level)
- if !ok {
- return ""
- }
- cf := dbg.frame
- proto := cf.Fn.Proto
- sourcename := "[G]"
- if proto != nil {
- sourcename = proto.SourceName
- } else if skipg {
- return ls.where(level+1, skipg)
- }
- line := ""
- if proto != nil {
- line = fmt.Sprintf("%v:", proto.DbgSourcePositions[cf.Pc-1])
- }
- return fmt.Sprintf("%v:%v", sourcename, line)
-}
-
-func (ls *LState) stackTrace(level int) string {
- buf := []string{}
- header := "stack traceback:"
- if ls.currentFrame != nil {
- i := 0
- for dbg, ok := ls.GetStack(i); ok; dbg, ok = ls.GetStack(i) {
- cf := dbg.frame
- buf = append(buf, fmt.Sprintf("\t%v in %v", ls.Where(i), ls.formattedFrameFuncName(cf)))
- if !cf.Fn.IsG && cf.TailCall > 0 {
- for tc := cf.TailCall; tc > 0; tc-- {
- buf = append(buf, "\t(tailcall): ?")
- i++
- }
- }
- i++
- }
- }
- buf = append(buf, fmt.Sprintf("\t%v: %v", "[G]", "?"))
- buf = buf[intMax(0, intMin(level, len(buf))):len(buf)]
- if len(buf) > 20 {
- newbuf := make([]string, 0, 20)
- newbuf = append(newbuf, buf[0:7]...)
- newbuf = append(newbuf, "\t...")
- newbuf = append(newbuf, buf[len(buf)-7:len(buf)]...)
- buf = newbuf
- }
- return fmt.Sprintf("%s\n%s", header, strings.Join(buf, "\n"))
-}
-
-func (ls *LState) formattedFrameFuncName(fr *callFrame) string {
- name, ischunk := ls.frameFuncName(fr)
- if ischunk {
- return name
- }
- if name[0] != '(' && name[0] != '<' {
- return fmt.Sprintf("function '%s'", name)
- }
- return fmt.Sprintf("function %s", name)
-}
-
-func (ls *LState) rawFrameFuncName(fr *callFrame) string {
- name, _ := ls.frameFuncName(fr)
- return name
-}
-
-func (ls *LState) frameFuncName(fr *callFrame) (string, bool) {
- frame := fr.Parent
- if frame == nil {
- if ls.Parent == nil {
- return "main chunk", true
- } else {
- return "corountine", true
- }
- }
- if !frame.Fn.IsG {
- pc := frame.Pc - 1
- for _, call := range frame.Fn.Proto.DbgCalls {
- if call.Pc == pc {
- name := call.Name
- if (name == "?" || fr.TailCall > 0) && !fr.Fn.IsG {
- name = fmt.Sprintf("<%v:%v>", fr.Fn.Proto.SourceName, fr.Fn.Proto.LineDefined)
- }
- return name, false
- }
- }
- }
- if !fr.Fn.IsG {
- return fmt.Sprintf("<%v:%v>", fr.Fn.Proto.SourceName, fr.Fn.Proto.LineDefined), false
- }
- return "(anonymous)", false
-}
-
-func (ls *LState) isStarted() bool {
- return ls.currentFrame != nil
-}
-
-func (ls *LState) kill() {
- ls.Dead = true
-}
-
-func (ls *LState) indexToReg(idx int) int {
- base := ls.currentLocalBase()
- if idx > 0 {
- return base + idx - 1
- } else if idx == 0 {
- return -1
- } else {
- tidx := ls.reg.Top() + idx
- if tidx < base {
- return -1
- }
- return tidx
- }
-}
-
-func (ls *LState) currentLocalBase() int {
- base := 0
- if ls.currentFrame != nil {
- base = ls.currentFrame.LocalBase
- }
- return base
-}
-
-func (ls *LState) currentEnv() *LTable {
- return ls.Env
- /*
- if ls.currentFrame == nil {
- return ls.Env
- }
- return ls.currentFrame.Fn.Env
- */
-}
-
-func (ls *LState) rkValue(idx int) LValue {
- /*
- if OpIsK(idx) {
- return ls.currentFrame.Fn.Proto.Constants[opIndexK(idx)]
- }
- return ls.reg.Get(ls.currentFrame.LocalBase + idx)
- */
- if (idx & opBitRk) != 0 {
- return ls.currentFrame.Fn.Proto.Constants[idx & ^opBitRk]
- }
- return ls.reg.array[ls.currentFrame.LocalBase+idx]
-}
-
-func (ls *LState) rkString(idx int) string {
- if (idx & opBitRk) != 0 {
- return ls.currentFrame.Fn.Proto.stringConstants[idx & ^opBitRk]
- }
- return string(ls.reg.array[ls.currentFrame.LocalBase+idx].(LString))
-}
-
-func (ls *LState) closeUpvalues(idx int) { // +inline-start
- if ls.uvcache != nil {
- var prev *Upvalue
- for uv := ls.uvcache; uv != nil; uv = uv.next {
- if uv.index >= idx {
- if prev != nil {
- prev.next = nil
- } else {
- ls.uvcache = nil
- }
- uv.Close()
- }
- prev = uv
- }
- }
-} // +inline-end
-
-func (ls *LState) findUpvalue(idx int) *Upvalue {
- var prev *Upvalue
- var next *Upvalue
- if ls.uvcache != nil {
- for uv := ls.uvcache; uv != nil; uv = uv.next {
- if uv.index == idx {
- return uv
- }
- if uv.index > idx {
- next = uv
- break
- }
- prev = uv
- }
- }
- uv := &Upvalue{reg: ls.reg, index: idx, closed: false}
- if prev != nil {
- prev.next = uv
- } else {
- ls.uvcache = uv
- }
- if next != nil {
- uv.next = next
- }
- return uv
-}
-
-func (ls *LState) metatable(lvalue LValue, rawget bool) LValue {
- var metatable LValue = LNil
- switch obj := lvalue.(type) {
- case *LTable:
- metatable = obj.Metatable
- case *LUserData:
- metatable = obj.Metatable
- default:
- if table, ok := ls.G.builtinMts[int(obj.Type())]; ok {
- metatable = table
- }
- }
-
- if !rawget && metatable != LNil {
- oldmt := metatable
- if tb, ok := metatable.(*LTable); ok {
- metatable = tb.RawGetString("__metatable")
- if metatable == LNil {
- metatable = oldmt
- }
- }
- }
-
- return metatable
-}
-
-func (ls *LState) metaOp1(lvalue LValue, event string) LValue {
- if mt := ls.metatable(lvalue, true); mt != LNil {
- if tb, ok := mt.(*LTable); ok {
- return tb.RawGetString(event)
- }
- }
- return LNil
-}
-
-func (ls *LState) metaOp2(value1, value2 LValue, event string) LValue {
- if mt := ls.metatable(value1, true); mt != LNil {
- if tb, ok := mt.(*LTable); ok {
- if ret := tb.RawGetString(event); ret != LNil {
- return ret
- }
- }
- }
- if mt := ls.metatable(value2, true); mt != LNil {
- if tb, ok := mt.(*LTable); ok {
- return tb.RawGetString(event)
- }
- }
- return LNil
-}
-
-func (ls *LState) metaCall(lvalue LValue) (*LFunction, bool) {
- if fn, ok := lvalue.(*LFunction); ok {
- return fn, false
- }
- if fn, ok := ls.metaOp1(lvalue, "__call").(*LFunction); ok {
- return fn, true
- }
- return nil, false
-}
-
-func (ls *LState) initCallFrame(cf *callFrame) { // +inline-start
- if cf.Fn.IsG {
- ls.reg.SetTop(cf.LocalBase + cf.NArgs)
- } else {
- proto := cf.Fn.Proto
- nargs := cf.NArgs
- np := int(proto.NumParameters)
- if nargs < np {
- // default any missing arguments to nil
- newSize := cf.LocalBase + np
- // this section is inlined by go-inline
- // source function is 'func (rg *registry) checkSize(requiredSize int) ' in '_state.go'
- {
- rg := ls.reg
- requiredSize := newSize
- if requiredSize > cap(rg.array) {
- rg.resize(requiredSize)
- }
- }
- for i := nargs; i < np; i++ {
- ls.reg.array[cf.LocalBase+i] = LNil
- }
- nargs = np
- ls.reg.top = newSize
- }
-
- if (proto.IsVarArg & VarArgIsVarArg) == 0 {
- if nargs < int(proto.NumUsedRegisters) {
- nargs = int(proto.NumUsedRegisters)
- }
- newSize := cf.LocalBase + nargs
- // this section is inlined by go-inline
- // source function is 'func (rg *registry) checkSize(requiredSize int) ' in '_state.go'
- {
- rg := ls.reg
- requiredSize := newSize
- if requiredSize > cap(rg.array) {
- rg.resize(requiredSize)
- }
- }
- for i := np; i < nargs; i++ {
- ls.reg.array[cf.LocalBase+i] = LNil
- }
- ls.reg.top = cf.LocalBase + int(proto.NumUsedRegisters)
- } else {
- /* swap vararg positions:
- closure
- namedparam1 <- lbase
- namedparam2
- vararg1
- vararg2
-
- TO
-
- closure
- nil
- nil
- vararg1
- vararg2
- namedparam1 <- lbase
- namedparam2
- */
- nvarargs := nargs - np
- if nvarargs < 0 {
- nvarargs = 0
- }
-
- ls.reg.SetTop(cf.LocalBase + nargs + np)
- for i := 0; i < np; i++ {
- //ls.reg.Set(cf.LocalBase+nargs+i, ls.reg.Get(cf.LocalBase+i))
- ls.reg.array[cf.LocalBase+nargs+i] = ls.reg.array[cf.LocalBase+i]
- //ls.reg.Set(cf.LocalBase+i, LNil)
- ls.reg.array[cf.LocalBase+i] = LNil
- }
-
- if CompatVarArg {
- ls.reg.SetTop(cf.LocalBase + nargs + np + 1)
- if (proto.IsVarArg & VarArgNeedsArg) != 0 {
- argtb := newLTable(nvarargs, 0)
- for i := 0; i < nvarargs; i++ {
- argtb.RawSetInt(i+1, ls.reg.Get(cf.LocalBase+np+i))
- }
- argtb.RawSetString("n", LNumber(nvarargs))
- //ls.reg.Set(cf.LocalBase+nargs+np, argtb)
- ls.reg.array[cf.LocalBase+nargs+np] = argtb
- } else {
- ls.reg.array[cf.LocalBase+nargs+np] = LNil
- }
- }
- cf.LocalBase += nargs
- maxreg := cf.LocalBase + int(proto.NumUsedRegisters)
- ls.reg.SetTop(maxreg)
- }
- }
-} // +inline-end
-
-func (ls *LState) pushCallFrame(cf callFrame, fn LValue, meta bool) { // +inline-start
- if meta {
- cf.NArgs++
- ls.reg.Insert(fn, cf.LocalBase)
- }
- if cf.Fn == nil {
- ls.RaiseError("attempt to call a non-function object")
- }
- if ls.stack.IsFull() {
- ls.RaiseError("stack overflow")
- }
- ls.stack.Push(cf)
- newcf := ls.stack.Last()
- // this section is inlined by go-inline
- // source function is 'func (ls *LState) initCallFrame(cf *callFrame) ' in '_state.go'
- {
- cf := newcf
- if cf.Fn.IsG {
- ls.reg.SetTop(cf.LocalBase + cf.NArgs)
- } else {
- proto := cf.Fn.Proto
- nargs := cf.NArgs
- np := int(proto.NumParameters)
- if nargs < np {
- // default any missing arguments to nil
- newSize := cf.LocalBase + np
- // this section is inlined by go-inline
- // source function is 'func (rg *registry) checkSize(requiredSize int) ' in '_state.go'
- {
- rg := ls.reg
- requiredSize := newSize
- if requiredSize > cap(rg.array) {
- rg.resize(requiredSize)
- }
- }
- for i := nargs; i < np; i++ {
- ls.reg.array[cf.LocalBase+i] = LNil
- }
- nargs = np
- ls.reg.top = newSize
- }
-
- if (proto.IsVarArg & VarArgIsVarArg) == 0 {
- if nargs < int(proto.NumUsedRegisters) {
- nargs = int(proto.NumUsedRegisters)
- }
- newSize := cf.LocalBase + nargs
- // this section is inlined by go-inline
- // source function is 'func (rg *registry) checkSize(requiredSize int) ' in '_state.go'
- {
- rg := ls.reg
- requiredSize := newSize
- if requiredSize > cap(rg.array) {
- rg.resize(requiredSize)
- }
- }
- for i := np; i < nargs; i++ {
- ls.reg.array[cf.LocalBase+i] = LNil
- }
- ls.reg.top = cf.LocalBase + int(proto.NumUsedRegisters)
- } else {
- /* swap vararg positions:
- closure
- namedparam1 <- lbase
- namedparam2
- vararg1
- vararg2
-
- TO
-
- closure
- nil
- nil
- vararg1
- vararg2
- namedparam1 <- lbase
- namedparam2
- */
- nvarargs := nargs - np
- if nvarargs < 0 {
- nvarargs = 0
- }
-
- ls.reg.SetTop(cf.LocalBase + nargs + np)
- for i := 0; i < np; i++ {
- //ls.reg.Set(cf.LocalBase+nargs+i, ls.reg.Get(cf.LocalBase+i))
- ls.reg.array[cf.LocalBase+nargs+i] = ls.reg.array[cf.LocalBase+i]
- //ls.reg.Set(cf.LocalBase+i, LNil)
- ls.reg.array[cf.LocalBase+i] = LNil
- }
-
- if CompatVarArg {
- ls.reg.SetTop(cf.LocalBase + nargs + np + 1)
- if (proto.IsVarArg & VarArgNeedsArg) != 0 {
- argtb := newLTable(nvarargs, 0)
- for i := 0; i < nvarargs; i++ {
- argtb.RawSetInt(i+1, ls.reg.Get(cf.LocalBase+np+i))
- }
- argtb.RawSetString("n", LNumber(nvarargs))
- //ls.reg.Set(cf.LocalBase+nargs+np, argtb)
- ls.reg.array[cf.LocalBase+nargs+np] = argtb
- } else {
- ls.reg.array[cf.LocalBase+nargs+np] = LNil
- }
- }
- cf.LocalBase += nargs
- maxreg := cf.LocalBase + int(proto.NumUsedRegisters)
- ls.reg.SetTop(maxreg)
- }
- }
- }
- ls.currentFrame = newcf
-} // +inline-end
-
-func (ls *LState) callR(nargs, nret, rbase int) {
- base := ls.reg.Top() - nargs - 1
- if rbase < 0 {
- rbase = base
- }
- lv := ls.reg.Get(base)
- fn, meta := ls.metaCall(lv)
- ls.pushCallFrame(callFrame{
- Fn: fn,
- Pc: 0,
- Base: base,
- LocalBase: base + 1,
- ReturnBase: rbase,
- NArgs: nargs,
- NRet: nret,
- Parent: ls.currentFrame,
- TailCall: 0,
- }, lv, meta)
- if ls.G.MainThread == nil {
- ls.G.MainThread = ls
- ls.G.CurrentThread = ls
- ls.mainLoop(ls, nil)
- } else {
- ls.mainLoop(ls, ls.currentFrame)
- }
- if nret != MultRet {
- ls.reg.SetTop(rbase + nret)
- }
-}
-
-func (ls *LState) getField(obj LValue, key LValue) LValue {
- curobj := obj
- for i := 0; i < MaxTableGetLoop; i++ {
- tb, istable := curobj.(*LTable)
- if istable {
- ret := tb.RawGet(key)
- if ret != LNil {
- return ret
- }
- }
- metaindex := ls.metaOp1(curobj, "__index")
- if metaindex == LNil {
- if !istable {
- ls.RaiseError("attempt to index a non-table object(%v) with key '%s'", curobj.Type().String(), key.String())
- }
- return LNil
- }
- if metaindex.Type() == LTFunction {
- ls.reg.Push(metaindex)
- ls.reg.Push(curobj)
- ls.reg.Push(key)
- ls.Call(2, 1)
- return ls.reg.Pop()
- } else {
- curobj = metaindex
- }
- }
- ls.RaiseError("too many recursions in gettable")
- return nil
-}
-
-func (ls *LState) getFieldString(obj LValue, key string) LValue {
- curobj := obj
- for i := 0; i < MaxTableGetLoop; i++ {
- tb, istable := curobj.(*LTable)
- if istable {
- ret := tb.RawGetString(key)
- if ret != LNil {
- return ret
- }
- }
- metaindex := ls.metaOp1(curobj, "__index")
- if metaindex == LNil {
- if !istable {
- ls.RaiseError("attempt to index a non-table object(%v) with key '%s'", curobj.Type().String(), key)
- }
- return LNil
- }
- if metaindex.Type() == LTFunction {
- ls.reg.Push(metaindex)
- ls.reg.Push(curobj)
- ls.reg.Push(LString(key))
- ls.Call(2, 1)
- return ls.reg.Pop()
- } else {
- curobj = metaindex
- }
- }
- ls.RaiseError("too many recursions in gettable")
- return nil
-}
-
-func (ls *LState) setField(obj LValue, key LValue, value LValue) {
- curobj := obj
- for i := 0; i < MaxTableGetLoop; i++ {
- tb, istable := curobj.(*LTable)
- if istable {
- if tb.RawGet(key) != LNil {
- ls.RawSet(tb, key, value)
- return
- }
- }
- metaindex := ls.metaOp1(curobj, "__newindex")
- if metaindex == LNil {
- if !istable {
- ls.RaiseError("attempt to index a non-table object(%v) with key '%s'", curobj.Type().String(), key.String())
- }
- ls.RawSet(tb, key, value)
- return
- }
- if metaindex.Type() == LTFunction {
- ls.reg.Push(metaindex)
- ls.reg.Push(curobj)
- ls.reg.Push(key)
- ls.reg.Push(value)
- ls.Call(3, 0)
- return
- } else {
- curobj = metaindex
- }
- }
- ls.RaiseError("too many recursions in settable")
-}
-
-func (ls *LState) setFieldString(obj LValue, key string, value LValue) {
- curobj := obj
- for i := 0; i < MaxTableGetLoop; i++ {
- tb, istable := curobj.(*LTable)
- if istable {
- if tb.RawGetString(key) != LNil {
- tb.RawSetString(key, value)
- return
- }
- }
- metaindex := ls.metaOp1(curobj, "__newindex")
- if metaindex == LNil {
- if !istable {
- ls.RaiseError("attempt to index a non-table object(%v) with key '%s'", curobj.Type().String(), key)
- }
- tb.RawSetString(key, value)
- return
- }
- if metaindex.Type() == LTFunction {
- ls.reg.Push(metaindex)
- ls.reg.Push(curobj)
- ls.reg.Push(LString(key))
- ls.reg.Push(value)
- ls.Call(3, 0)
- return
- } else {
- curobj = metaindex
- }
- }
- ls.RaiseError("too many recursions in settable")
-}
-
-/* }}} */
-
-/* api methods {{{ */
-
-func NewState(opts ...Options) *LState {
- var ls *LState
- if len(opts) == 0 {
- ls = newLState(Options{
- CallStackSize: CallStackSize,
- RegistrySize: RegistrySize,
- })
- ls.OpenLibs()
- } else {
- if opts[0].CallStackSize < 1 {
- opts[0].CallStackSize = CallStackSize
- }
- if opts[0].RegistrySize < 128 {
- opts[0].RegistrySize = RegistrySize
- }
- if opts[0].RegistryMaxSize < opts[0].RegistrySize {
- opts[0].RegistryMaxSize = 0 // disable growth if max size is smaller than initial size
- } else {
- // if growth enabled, grow step is set
- if opts[0].RegistryGrowStep < 1 {
- opts[0].RegistryGrowStep = RegistryGrowStep
- }
- }
- ls = newLState(opts[0])
- if !opts[0].SkipOpenLibs {
- ls.OpenLibs()
- }
- }
- return ls
-}
-
-func (ls *LState) IsClosed() bool {
- return ls.stack == nil
-}
-
-func (ls *LState) Close() {
- atomic.AddInt32(&ls.stop, 1)
- for _, file := range ls.G.tempFiles {
- // ignore errors in these operations
- file.Close()
- os.Remove(file.Name())
- }
- ls.stack.FreeAll()
- ls.stack = nil
-}
-
-/* registry operations {{{ */
-
-func (ls *LState) GetTop() int {
- return ls.reg.Top() - ls.currentLocalBase()
-}
-
-func (ls *LState) SetTop(idx int) {
- base := ls.currentLocalBase()
- newtop := ls.indexToReg(idx) + 1
- if newtop < base {
- ls.reg.SetTop(base)
- } else {
- ls.reg.SetTop(newtop)
- }
-}
-
-func (ls *LState) Replace(idx int, value LValue) {
- base := ls.currentLocalBase()
- if idx > 0 {
- reg := base + idx - 1
- if reg < ls.reg.Top() {
- ls.reg.Set(reg, value)
- }
- } else if idx == 0 {
- } else if idx > RegistryIndex {
- if tidx := ls.reg.Top() + idx; tidx >= base {
- ls.reg.Set(tidx, value)
- }
- } else {
- switch idx {
- case RegistryIndex:
- if tb, ok := value.(*LTable); ok {
- ls.G.Registry = tb
- } else {
- ls.RaiseError("registry must be a table(%v)", value.Type().String())
- }
- case EnvironIndex:
- if ls.currentFrame == nil {
- ls.RaiseError("no calling environment")
- }
- if tb, ok := value.(*LTable); ok {
- ls.currentFrame.Fn.Env = tb
- } else {
- ls.RaiseError("environment must be a table(%v)", value.Type().String())
- }
- case GlobalsIndex:
- if tb, ok := value.(*LTable); ok {
- ls.G.Global = tb
- } else {
- ls.RaiseError("_G must be a table(%v)", value.Type().String())
- }
- default:
- fn := ls.currentFrame.Fn
- index := GlobalsIndex - idx - 1
- if index < len(fn.Upvalues) {
- fn.Upvalues[index].SetValue(value)
- }
- }
- }
-}
-
-func (ls *LState) Get(idx int) LValue {
- base := ls.currentLocalBase()
- if idx > 0 {
- reg := base + idx - 1
- if reg < ls.reg.Top() {
- return ls.reg.Get(reg)
- }
- return LNil
- } else if idx == 0 {
- return LNil
- } else if idx > RegistryIndex {
- tidx := ls.reg.Top() + idx
- if tidx < base {
- return LNil
- }
- return ls.reg.Get(tidx)
- } else {
- switch idx {
- case RegistryIndex:
- return ls.G.Registry
- case EnvironIndex:
- if ls.currentFrame == nil {
- return ls.Env
- }
- return ls.currentFrame.Fn.Env
- case GlobalsIndex:
- return ls.G.Global
- default:
- fn := ls.currentFrame.Fn
- index := GlobalsIndex - idx - 1
- if index < len(fn.Upvalues) {
- return fn.Upvalues[index].Value()
- }
- return LNil
- }
- }
- return LNil
-}
-
-func (ls *LState) Push(value LValue) {
- ls.reg.Push(value)
-}
-
-func (ls *LState) Pop(n int) {
- for i := 0; i < n; i++ {
- if ls.GetTop() == 0 {
- ls.RaiseError("register underflow")
- }
- ls.reg.Pop()
- }
-}
-
-func (ls *LState) Insert(value LValue, index int) {
- reg := ls.indexToReg(index)
- top := ls.reg.Top()
- if reg >= top {
- ls.reg.Set(reg, value)
- return
- }
- if reg <= ls.currentLocalBase() {
- reg = ls.currentLocalBase()
- }
- top--
- for ; top >= reg; top-- {
- ls.reg.Set(top+1, ls.reg.Get(top))
- }
- ls.reg.Set(reg, value)
-}
-
-func (ls *LState) Remove(index int) {
- reg := ls.indexToReg(index)
- top := ls.reg.Top()
- switch {
- case reg >= top:
- return
- case reg < ls.currentLocalBase():
- return
- case reg == top-1:
- ls.Pop(1)
- return
- }
- for i := reg; i < top-1; i++ {
- ls.reg.Set(i, ls.reg.Get(i+1))
- }
- ls.reg.SetTop(top - 1)
-}
-
-/* }}} */
-
-/* object allocation {{{ */
-
-func (ls *LState) NewTable() *LTable {
- return newLTable(defaultArrayCap, defaultHashCap)
-}
-
-func (ls *LState) CreateTable(acap, hcap int) *LTable {
- return newLTable(acap, hcap)
-}
-
-// NewThread returns a new LState that shares with the original state all global objects.
-// If the original state has context.Context, the new state has a new child context of the original state and this function returns its cancel function.
-func (ls *LState) NewThread() (*LState, context.CancelFunc) {
- thread := newLState(ls.Options)
- thread.G = ls.G
- thread.Env = ls.Env
- var f context.CancelFunc = nil
- if ls.ctx != nil {
- thread.mainLoop = mainLoopWithContext
- thread.ctx, f = context.WithCancel(ls.ctx)
- }
- return thread, f
-}
-
-func (ls *LState) NewFunctionFromProto(proto *FunctionProto) *LFunction {
- return newLFunctionL(proto, ls.Env, int(proto.NumUpvalues))
-}
-
-func (ls *LState) NewUserData() *LUserData {
- return &LUserData{
- Env: ls.currentEnv(),
- Metatable: LNil,
- }
-}
-
-func (ls *LState) NewFunction(fn LGFunction) *LFunction {
- return newLFunctionG(fn, ls.currentEnv(), 0)
-}
-
-func (ls *LState) NewClosure(fn LGFunction, upvalues ...LValue) *LFunction {
- cl := newLFunctionG(fn, ls.currentEnv(), len(upvalues))
- for i, lv := range upvalues {
- cl.Upvalues[i] = &Upvalue{}
- cl.Upvalues[i].Close()
- cl.Upvalues[i].SetValue(lv)
- }
- return cl
-}
-
-/* }}} */
-
-/* toType {{{ */
-
-func (ls *LState) ToBool(n int) bool {
- return LVAsBool(ls.Get(n))
-}
-
-func (ls *LState) ToInt(n int) int {
- if lv, ok := ls.Get(n).(LNumber); ok {
- return int(lv)
- }
- if lv, ok := ls.Get(n).(LString); ok {
- if num, err := parseNumber(string(lv)); err == nil {
- return int(num)
- }
- }
- return 0
-}
-
-func (ls *LState) ToInt64(n int) int64 {
- if lv, ok := ls.Get(n).(LNumber); ok {
- return int64(lv)
- }
- if lv, ok := ls.Get(n).(LString); ok {
- if num, err := parseNumber(string(lv)); err == nil {
- return int64(num)
- }
- }
- return 0
-}
-
-func (ls *LState) ToNumber(n int) LNumber {
- return LVAsNumber(ls.Get(n))
-}
-
-func (ls *LState) ToString(n int) string {
- return LVAsString(ls.Get(n))
-}
-
-func (ls *LState) ToTable(n int) *LTable {
- if lv, ok := ls.Get(n).(*LTable); ok {
- return lv
- }
- return nil
-}
-
-func (ls *LState) ToFunction(n int) *LFunction {
- if lv, ok := ls.Get(n).(*LFunction); ok {
- return lv
- }
- return nil
-}
-
-func (ls *LState) ToUserData(n int) *LUserData {
- if lv, ok := ls.Get(n).(*LUserData); ok {
- return lv
- }
- return nil
-}
-
-func (ls *LState) ToThread(n int) *LState {
- if lv, ok := ls.Get(n).(*LState); ok {
- return lv
- }
- return nil
-}
-
-/* }}} */
-
-/* error & debug operations {{{ */
-
-func (ls *LState) registryOverflow() {
- ls.RaiseError("registry overflow")
-}
-
-// This function is equivalent to luaL_error( http://www.lua.org/manual/5.1/manual.html#luaL_error ).
-func (ls *LState) RaiseError(format string, args ...interface{}) {
- ls.raiseError(1, format, args...)
-}
-
-// This function is equivalent to lua_error( http://www.lua.org/manual/5.1/manual.html#lua_error ).
-func (ls *LState) Error(lv LValue, level int) {
- if str, ok := lv.(LString); ok {
- ls.raiseError(level, string(str))
- } else {
- if !ls.hasErrorFunc {
- ls.closeAllUpvalues()
- }
- ls.Push(lv)
- ls.Panic(ls)
- }
-}
-
-func (ls *LState) GetInfo(what string, dbg *Debug, fn LValue) (LValue, error) {
- if !strings.HasPrefix(what, ">") {
- fn = dbg.frame.Fn
- } else {
- what = what[1:]
- }
- f, ok := fn.(*LFunction)
- if !ok {
- return LNil, newApiErrorS(ApiErrorRun, "can not get debug info(an object in not a function)")
- }
-
- retfn := false
- for _, c := range what {
- switch c {
- case 'f':
- retfn = true
- case 'S':
- if dbg.frame != nil && dbg.frame.Parent == nil {
- dbg.What = "main"
- } else if f.IsG {
- dbg.What = "G"
- } else if dbg.frame != nil && dbg.frame.TailCall > 0 {
- dbg.What = "tail"
- } else {
- dbg.What = "Lua"
- }
- if !f.IsG {
- dbg.Source = f.Proto.SourceName
- dbg.LineDefined = f.Proto.LineDefined
- dbg.LastLineDefined = f.Proto.LastLineDefined
- }
- case 'l':
- if !f.IsG && dbg.frame != nil {
- if dbg.frame.Pc > 0 {
- dbg.CurrentLine = f.Proto.DbgSourcePositions[dbg.frame.Pc-1]
- }
- } else {
- dbg.CurrentLine = -1
- }
- case 'u':
- dbg.NUpvalues = len(f.Upvalues)
- case 'n':
- if dbg.frame != nil {
- dbg.Name = ls.rawFrameFuncName(dbg.frame)
- }
- default:
- return LNil, newApiErrorS(ApiErrorRun, "invalid what: "+string(c))
- }
- }
-
- if retfn {
- return f, nil
- }
- return LNil, nil
-
-}
-
-func (ls *LState) GetStack(level int) (*Debug, bool) {
- frame := ls.currentFrame
- for ; level > 0 && frame != nil; frame = frame.Parent {
- level--
- if !frame.Fn.IsG {
- level -= frame.TailCall
- }
- }
-
- if level == 0 && frame != nil {
- return &Debug{frame: frame}, true
- } else if level < 0 && ls.stack.Sp() > 0 {
- return &Debug{frame: ls.stack.At(0)}, true
- }
- return &Debug{}, false
-}
-
-func (ls *LState) GetLocal(dbg *Debug, no int) (string, LValue) {
- frame := dbg.frame
- if name := ls.findLocal(frame, no); len(name) > 0 {
- return name, ls.reg.Get(frame.LocalBase + no - 1)
- }
- return "", LNil
-}
-
-func (ls *LState) SetLocal(dbg *Debug, no int, lv LValue) string {
- frame := dbg.frame
- if name := ls.findLocal(frame, no); len(name) > 0 {
- ls.reg.Set(frame.LocalBase+no-1, lv)
- return name
- }
- return ""
-}
-
-func (ls *LState) GetUpvalue(fn *LFunction, no int) (string, LValue) {
- if fn.IsG {
- return "", LNil
- }
-
- no--
- if no >= 0 && no < len(fn.Upvalues) {
- return fn.Proto.DbgUpvalues[no], fn.Upvalues[no].Value()
- }
- return "", LNil
-}
-
-func (ls *LState) SetUpvalue(fn *LFunction, no int, lv LValue) string {
- if fn.IsG {
- return ""
- }
-
- no--
- if no >= 0 && no < len(fn.Upvalues) {
- fn.Upvalues[no].SetValue(lv)
- return fn.Proto.DbgUpvalues[no]
- }
- return ""
-}
-
-/* }}} */
-
-/* env operations {{{ */
-
-func (ls *LState) GetFEnv(obj LValue) LValue {
- switch lv := obj.(type) {
- case *LFunction:
- return lv.Env
- case *LUserData:
- return lv.Env
- case *LState:
- return lv.Env
- }
- return LNil
-}
-
-func (ls *LState) SetFEnv(obj LValue, env LValue) {
- tb, ok := env.(*LTable)
- if !ok {
- ls.RaiseError("cannot use %v as an environment", env.Type().String())
- }
-
- switch lv := obj.(type) {
- case *LFunction:
- lv.Env = tb
- case *LUserData:
- lv.Env = tb
- case *LState:
- lv.Env = tb
- }
- /* do nothing */
-}
-
-/* }}} */
-
-/* table operations {{{ */
-
-func (ls *LState) RawGet(tb *LTable, key LValue) LValue {
- return tb.RawGet(key)
-}
-
-func (ls *LState) RawGetInt(tb *LTable, key int) LValue {
- return tb.RawGetInt(key)
-}
-
-func (ls *LState) GetField(obj LValue, skey string) LValue {
- return ls.getFieldString(obj, skey)
-}
-
-func (ls *LState) GetTable(obj LValue, key LValue) LValue {
- return ls.getField(obj, key)
-}
-
-func (ls *LState) RawSet(tb *LTable, key LValue, value LValue) {
- if n, ok := key.(LNumber); ok && math.IsNaN(float64(n)) {
- ls.RaiseError("table index is NaN")
- } else if key == LNil {
- ls.RaiseError("table index is nil")
- }
- tb.RawSet(key, value)
-}
-
-func (ls *LState) RawSetInt(tb *LTable, key int, value LValue) {
- tb.RawSetInt(key, value)
-}
-
-func (ls *LState) SetField(obj LValue, key string, value LValue) {
- ls.setFieldString(obj, key, value)
-}
-
-func (ls *LState) SetTable(obj LValue, key LValue, value LValue) {
- ls.setField(obj, key, value)
-}
-
-func (ls *LState) ForEach(tb *LTable, cb func(LValue, LValue)) {
- tb.ForEach(cb)
-}
-
-func (ls *LState) GetGlobal(name string) LValue {
- return ls.GetField(ls.Get(GlobalsIndex), name)
-}
-
-func (ls *LState) SetGlobal(name string, value LValue) {
- ls.SetField(ls.Get(GlobalsIndex), name, value)
-}
-
-func (ls *LState) Next(tb *LTable, key LValue) (LValue, LValue) {
- return tb.Next(key)
-}
-
-/* }}} */
-
-/* unary operations {{{ */
-
-func (ls *LState) ObjLen(v1 LValue) int {
- if v1.Type() == LTString {
- return len(string(v1.(LString)))
- }
- op := ls.metaOp1(v1, "__len")
- if op.Type() == LTFunction {
- ls.Push(op)
- ls.Push(v1)
- ls.Call(1, 1)
- ret := ls.reg.Pop()
- if ret.Type() == LTNumber {
- return int(ret.(LNumber))
- }
- } else if v1.Type() == LTTable {
- return v1.(*LTable).Len()
- }
- return 0
-}
-
-/* }}} */
-
-/* binary operations {{{ */
-
-func (ls *LState) Concat(values ...LValue) string {
- top := ls.reg.Top()
- for _, value := range values {
- ls.reg.Push(value)
- }
- ret := stringConcat(ls, len(values), ls.reg.Top()-1)
- ls.reg.SetTop(top)
- return LVAsString(ret)
-}
-
-func (ls *LState) LessThan(lhs, rhs LValue) bool {
- return lessThan(ls, lhs, rhs)
-}
-
-func (ls *LState) Equal(lhs, rhs LValue) bool {
- return equals(ls, lhs, rhs, false)
-}
-
-func (ls *LState) RawEqual(lhs, rhs LValue) bool {
- return equals(ls, lhs, rhs, true)
-}
-
-/* }}} */
-
-/* register operations {{{ */
-
-func (ls *LState) Register(name string, fn LGFunction) {
- ls.SetGlobal(name, ls.NewFunction(fn))
-}
-
-/* }}} */
-
-/* load and function call operations {{{ */
-
-func (ls *LState) Load(reader io.Reader, name string) (*LFunction, error) {
- chunk, err := parse.Parse(reader, name)
- if err != nil {
- return nil, newApiErrorE(ApiErrorSyntax, err)
- }
- proto, err := Compile(chunk, name)
- if err != nil {
- return nil, newApiErrorE(ApiErrorSyntax, err)
- }
- return newLFunctionL(proto, ls.currentEnv(), 0), nil
-}
-
-func (ls *LState) Call(nargs, nret int) {
- ls.callR(nargs, nret, -1)
-}
-
-func (ls *LState) PCall(nargs, nret int, errfunc *LFunction) (err error) {
- err = nil
- sp := ls.stack.Sp()
- base := ls.reg.Top() - nargs - 1
- oldpanic := ls.Panic
- ls.Panic = panicWithoutTraceback
- if errfunc != nil {
- ls.hasErrorFunc = true
- }
- defer func() {
- ls.Panic = oldpanic
- ls.hasErrorFunc = false
- rcv := recover()
- if rcv != nil {
- if _, ok := rcv.(*ApiError); !ok {
- err = newApiErrorS(ApiErrorPanic, fmt.Sprint(rcv))
- if ls.Options.IncludeGoStackTrace {
- buf := make([]byte, 4096)
- runtime.Stack(buf, false)
- err.(*ApiError).StackTrace = strings.Trim(string(buf), "\000") + "\n" + ls.stackTrace(0)
- }
- } else {
- err = rcv.(*ApiError)
- }
- if errfunc != nil {
- ls.Push(errfunc)
- ls.Push(err.(*ApiError).Object)
- ls.Panic = panicWithoutTraceback
- defer func() {
- ls.Panic = oldpanic
- rcv := recover()
- if rcv != nil {
- if _, ok := rcv.(*ApiError); !ok {
- err = newApiErrorS(ApiErrorPanic, fmt.Sprint(rcv))
- if ls.Options.IncludeGoStackTrace {
- buf := make([]byte, 4096)
- runtime.Stack(buf, false)
- err.(*ApiError).StackTrace = strings.Trim(string(buf), "\000") + ls.stackTrace(0)
- }
- } else {
- err = rcv.(*ApiError)
- err.(*ApiError).StackTrace = ls.stackTrace(0)
- }
- }
- }()
- ls.Call(1, 1)
- err = newApiError(ApiErrorError, ls.Get(-1))
- } else if len(err.(*ApiError).StackTrace) == 0 {
- err.(*ApiError).StackTrace = ls.stackTrace(0)
- }
- ls.stack.SetSp(sp)
- ls.currentFrame = ls.stack.Last()
- ls.reg.SetTop(base)
- }
- ls.stack.SetSp(sp)
- if sp == 0 {
- ls.currentFrame = nil
- }
- }()
-
- ls.Call(nargs, nret)
-
- return
-}
-
-func (ls *LState) GPCall(fn LGFunction, data LValue) error {
- ls.Push(newLFunctionG(fn, ls.currentEnv(), 0))
- ls.Push(data)
- return ls.PCall(1, MultRet, nil)
-}
-
-func (ls *LState) CallByParam(cp P, args ...LValue) error {
- ls.Push(cp.Fn)
- for _, arg := range args {
- ls.Push(arg)
- }
-
- if cp.Protect {
- return ls.PCall(len(args), cp.NRet, cp.Handler)
- }
- ls.Call(len(args), cp.NRet)
- return nil
-}
-
-/* }}} */
-
-/* metatable operations {{{ */
-
-func (ls *LState) GetMetatable(obj LValue) LValue {
- return ls.metatable(obj, false)
-}
-
-func (ls *LState) SetMetatable(obj LValue, mt LValue) {
- switch mt.(type) {
- case *LNilType, *LTable:
- default:
- ls.RaiseError("metatable must be a table or nil, but got %v", mt.Type().String())
- }
-
- switch v := obj.(type) {
- case *LTable:
- v.Metatable = mt
- case *LUserData:
- v.Metatable = mt
- default:
- ls.G.builtinMts[int(obj.Type())] = mt
- }
-}
-
-/* }}} */
-
-/* coroutine operations {{{ */
-
-func (ls *LState) Status(th *LState) string {
- status := "suspended"
- if th.Dead {
- status = "dead"
- } else if ls.G.CurrentThread == th {
- status = "running"
- } else if ls.Parent == th {
- status = "normal"
- }
- return status
-}
-
-func (ls *LState) Resume(th *LState, fn *LFunction, args ...LValue) (ResumeState, error, []LValue) {
- isstarted := th.isStarted()
- if !isstarted {
- base := 0
- th.stack.Push(callFrame{
- Fn: fn,
- Pc: 0,
- Base: base,
- LocalBase: base + 1,
- ReturnBase: base,
- NArgs: 0,
- NRet: MultRet,
- Parent: nil,
- TailCall: 0,
- })
- }
-
- if ls.G.CurrentThread == th {
- return ResumeError, newApiErrorS(ApiErrorRun, "can not resume a running thread"), nil
- }
- if th.Dead {
- return ResumeError, newApiErrorS(ApiErrorRun, "can not resume a dead thread"), nil
- }
- th.Parent = ls
- ls.G.CurrentThread = th
- if !isstarted {
- cf := th.stack.Last()
- th.currentFrame = cf
- th.SetTop(0)
- for _, arg := range args {
- th.Push(arg)
- }
- cf.NArgs = len(args)
- th.initCallFrame(cf)
- th.Panic = panicWithoutTraceback
- } else {
- for _, arg := range args {
- th.Push(arg)
- }
- }
- top := ls.GetTop()
- threadRun(th)
- haserror := LVIsFalse(ls.Get(top + 1))
- ret := make([]LValue, 0, ls.GetTop())
- for idx := top + 2; idx <= ls.GetTop(); idx++ {
- ret = append(ret, ls.Get(idx))
- }
- if len(ret) == 0 {
- ret = append(ret, LNil)
- }
- ls.SetTop(top)
-
- if haserror {
- return ResumeError, newApiError(ApiErrorRun, ret[0]), nil
- } else if th.stack.IsEmpty() {
- return ResumeOK, nil, ret
- }
- return ResumeYield, nil, ret
-}
-
-func (ls *LState) Yield(values ...LValue) int {
- ls.SetTop(0)
- for _, lv := range values {
- ls.Push(lv)
- }
- return -1
-}
-
-func (ls *LState) XMoveTo(other *LState, n int) {
- if ls == other {
- return
- }
- top := ls.GetTop()
- n = intMin(n, top)
- for i := n; i > 0; i-- {
- other.Push(ls.Get(top - i + 1))
- }
- ls.SetTop(top - n)
-}
-
-/* }}} */
-
-/* GopherLua original APIs {{{ */
-
-// Set maximum memory size. This function can only be called from the main thread.
-func (ls *LState) SetMx(mx int) {
- if ls.Parent != nil {
- ls.RaiseError("sub threads are not allowed to set a memory limit")
- }
- go func() {
- limit := uint64(mx * 1024 * 1024) //MB
- var s runtime.MemStats
- for atomic.LoadInt32(&ls.stop) == 0 {
- runtime.ReadMemStats(&s)
- if s.Alloc >= limit {
- fmt.Println("out of memory")
- os.Exit(3)
- }
- time.Sleep(100 * time.Millisecond)
- }
- }()
-}
-
-// SetContext set a context ctx to this LState. The provided ctx must be non-nil.
-func (ls *LState) SetContext(ctx context.Context) {
- ls.mainLoop = mainLoopWithContext
- ls.ctx = ctx
-}
-
-// Context returns the LState's context. To change the context, use WithContext.
-func (ls *LState) Context() context.Context {
- return ls.ctx
-}
-
-// RemoveContext removes the context associated with this LState and returns this context.
-func (ls *LState) RemoveContext() context.Context {
- oldctx := ls.ctx
- ls.mainLoop = mainLoop
- ls.ctx = nil
- return oldctx
-}
-
-// Converts the Lua value at the given acceptable index to the chan LValue.
-func (ls *LState) ToChannel(n int) chan LValue {
- if lv, ok := ls.Get(n).(LChannel); ok {
- return (chan LValue)(lv)
- }
- return nil
-}
-
-// RemoveCallerFrame removes the stack frame above the current stack frame. This is useful in tail calls. It returns
-// the new current frame.
-func (ls *LState) RemoveCallerFrame() *callFrame {
- cs := ls.stack
- sp := cs.Sp()
- parentFrame := cs.At(sp - 2)
- currentFrame := cs.At(sp - 1)
- parentsParentFrame := parentFrame.Parent
- *parentFrame = *currentFrame
- parentFrame.Parent = parentsParentFrame
- parentFrame.Idx = sp - 2
- cs.Pop()
- return parentFrame
-}
-
-/* }}} */
-
-/* }}} */
-
-//
diff --git a/vendor/github.com/yuin/gopher-lua/stringlib.go b/vendor/github.com/yuin/gopher-lua/stringlib.go
deleted file mode 100644
index f484c2b3..00000000
--- a/vendor/github.com/yuin/gopher-lua/stringlib.go
+++ /dev/null
@@ -1,448 +0,0 @@
-package lua
-
-import (
- "fmt"
- "strings"
-
- "github.com/yuin/gopher-lua/pm"
-)
-
-const emptyLString LString = LString("")
-
-func OpenString(L *LState) int {
- var mod *LTable
- //_, ok := L.G.builtinMts[int(LTString)]
- //if !ok {
- mod = L.RegisterModule(StringLibName, strFuncs).(*LTable)
- gmatch := L.NewClosure(strGmatch, L.NewFunction(strGmatchIter))
- mod.RawSetString("gmatch", gmatch)
- mod.RawSetString("gfind", gmatch)
- mod.RawSetString("__index", mod)
- L.G.builtinMts[int(LTString)] = mod
- //}
- L.Push(mod)
- return 1
-}
-
-var strFuncs = map[string]LGFunction{
- "byte": strByte,
- "char": strChar,
- "dump": strDump,
- "find": strFind,
- "format": strFormat,
- "gsub": strGsub,
- "len": strLen,
- "lower": strLower,
- "match": strMatch,
- "rep": strRep,
- "reverse": strReverse,
- "sub": strSub,
- "upper": strUpper,
-}
-
-func strByte(L *LState) int {
- str := L.CheckString(1)
- start := L.OptInt(2, 1) - 1
- end := L.OptInt(3, -1)
- l := len(str)
- if start < 0 {
- start = l + start + 1
- }
- if end < 0 {
- end = l + end + 1
- }
-
- if L.GetTop() == 2 {
- if start < 0 || start >= l {
- return 0
- }
- L.Push(LNumber(str[start]))
- return 1
- }
-
- start = intMax(start, 0)
- end = intMin(end, l)
- if end < 0 || end <= start || start >= l {
- return 0
- }
-
- for i := start; i < end; i++ {
- L.Push(LNumber(str[i]))
- }
- return end - start
-}
-
-func strChar(L *LState) int {
- top := L.GetTop()
- bytes := make([]byte, L.GetTop())
- for i := 1; i <= top; i++ {
- bytes[i-1] = uint8(L.CheckInt(i))
- }
- L.Push(LString(string(bytes)))
- return 1
-}
-
-func strDump(L *LState) int {
- L.RaiseError("GopherLua does not support the string.dump")
- return 0
-}
-
-func strFind(L *LState) int {
- str := L.CheckString(1)
- pattern := L.CheckString(2)
- if len(pattern) == 0 {
- L.Push(LNumber(1))
- L.Push(LNumber(0))
- return 2
- }
- init := luaIndex2StringIndex(str, L.OptInt(3, 1), true)
- plain := false
- if L.GetTop() == 4 {
- plain = LVAsBool(L.Get(4))
- }
-
- if plain {
- pos := strings.Index(str[init:], pattern)
- if pos < 0 {
- L.Push(LNil)
- return 1
- }
- L.Push(LNumber(init+pos) + 1)
- L.Push(LNumber(init + pos + len(pattern)))
- return 2
- }
-
- mds, err := pm.Find(pattern, unsafeFastStringToReadOnlyBytes(str), init, 1)
- if err != nil {
- L.RaiseError(err.Error())
- }
- if len(mds) == 0 {
- L.Push(LNil)
- return 1
- }
- md := mds[0]
- L.Push(LNumber(md.Capture(0) + 1))
- L.Push(LNumber(md.Capture(1)))
- for i := 2; i < md.CaptureLength(); i += 2 {
- if md.IsPosCapture(i) {
- L.Push(LNumber(md.Capture(i)))
- } else {
- L.Push(LString(str[md.Capture(i):md.Capture(i+1)]))
- }
- }
- return md.CaptureLength()/2 + 1
-}
-
-func strFormat(L *LState) int {
- str := L.CheckString(1)
- args := make([]interface{}, L.GetTop()-1)
- top := L.GetTop()
- for i := 2; i <= top; i++ {
- args[i-2] = L.Get(i)
- }
- npat := strings.Count(str, "%") - strings.Count(str, "%%")
- L.Push(LString(fmt.Sprintf(str, args[:intMin(npat, len(args))]...)))
- return 1
-}
-
-func strGsub(L *LState) int {
- str := L.CheckString(1)
- pat := L.CheckString(2)
- L.CheckTypes(3, LTString, LTTable, LTFunction)
- repl := L.CheckAny(3)
- limit := L.OptInt(4, -1)
-
- mds, err := pm.Find(pat, unsafeFastStringToReadOnlyBytes(str), 0, limit)
- if err != nil {
- L.RaiseError(err.Error())
- }
- if len(mds) == 0 {
- L.SetTop(1)
- L.Push(LNumber(0))
- return 2
- }
- switch lv := repl.(type) {
- case LString:
- L.Push(LString(strGsubStr(L, str, string(lv), mds)))
- case *LTable:
- L.Push(LString(strGsubTable(L, str, lv, mds)))
- case *LFunction:
- L.Push(LString(strGsubFunc(L, str, lv, mds)))
- }
- L.Push(LNumber(len(mds)))
- return 2
-}
-
-type replaceInfo struct {
- Indicies []int
- String string
-}
-
-func checkCaptureIndex(L *LState, m *pm.MatchData, idx int) {
- if idx <= 2 {
- return
- }
- if idx >= m.CaptureLength() {
- L.RaiseError("invalid capture index")
- }
-}
-
-func capturedString(L *LState, m *pm.MatchData, str string, idx int) string {
- checkCaptureIndex(L, m, idx)
- if idx >= m.CaptureLength() && idx == 2 {
- idx = 0
- }
- if m.IsPosCapture(idx) {
- return fmt.Sprint(m.Capture(idx))
- } else {
- return str[m.Capture(idx):m.Capture(idx+1)]
- }
-
-}
-
-func strGsubDoReplace(str string, info []replaceInfo) string {
- offset := 0
- buf := []byte(str)
- for _, replace := range info {
- oldlen := len(buf)
- b1 := append([]byte(""), buf[0:offset+replace.Indicies[0]]...)
- b2 := []byte("")
- index2 := offset + replace.Indicies[1]
- if index2 <= len(buf) {
- b2 = append(b2, buf[index2:len(buf)]...)
- }
- buf = append(b1, replace.String...)
- buf = append(buf, b2...)
- offset += len(buf) - oldlen
- }
- return string(buf)
-}
-
-func strGsubStr(L *LState, str string, repl string, matches []*pm.MatchData) string {
- infoList := make([]replaceInfo, 0, len(matches))
- for _, match := range matches {
- start, end := match.Capture(0), match.Capture(1)
- sc := newFlagScanner('%', "", "", repl)
- for c, eos := sc.Next(); !eos; c, eos = sc.Next() {
- if !sc.ChangeFlag {
- if sc.HasFlag {
- if c >= '0' && c <= '9' {
- sc.AppendString(capturedString(L, match, str, 2*(int(c)-48)))
- } else {
- sc.AppendChar('%')
- sc.AppendChar(c)
- }
- sc.HasFlag = false
- } else {
- sc.AppendChar(c)
- }
- }
- }
- infoList = append(infoList, replaceInfo{[]int{start, end}, sc.String()})
- }
-
- return strGsubDoReplace(str, infoList)
-}
-
-func strGsubTable(L *LState, str string, repl *LTable, matches []*pm.MatchData) string {
- infoList := make([]replaceInfo, 0, len(matches))
- for _, match := range matches {
- idx := 0
- if match.CaptureLength() > 2 { // has captures
- idx = 2
- }
- var value LValue
- if match.IsPosCapture(idx) {
- value = L.GetTable(repl, LNumber(match.Capture(idx)))
- } else {
- value = L.GetField(repl, str[match.Capture(idx):match.Capture(idx+1)])
- }
- if !LVIsFalse(value) {
- infoList = append(infoList, replaceInfo{[]int{match.Capture(0), match.Capture(1)}, LVAsString(value)})
- }
- }
- return strGsubDoReplace(str, infoList)
-}
-
-func strGsubFunc(L *LState, str string, repl *LFunction, matches []*pm.MatchData) string {
- infoList := make([]replaceInfo, 0, len(matches))
- for _, match := range matches {
- start, end := match.Capture(0), match.Capture(1)
- L.Push(repl)
- nargs := 0
- if match.CaptureLength() > 2 { // has captures
- for i := 2; i < match.CaptureLength(); i += 2 {
- if match.IsPosCapture(i) {
- L.Push(LNumber(match.Capture(i)))
- } else {
- L.Push(LString(capturedString(L, match, str, i)))
- }
- nargs++
- }
- } else {
- L.Push(LString(capturedString(L, match, str, 0)))
- nargs++
- }
- L.Call(nargs, 1)
- ret := L.reg.Pop()
- if !LVIsFalse(ret) {
- infoList = append(infoList, replaceInfo{[]int{start, end}, LVAsString(ret)})
- }
- }
- return strGsubDoReplace(str, infoList)
-}
-
-type strMatchData struct {
- str string
- pos int
- matches []*pm.MatchData
-}
-
-func strGmatchIter(L *LState) int {
- md := L.CheckUserData(1).Value.(*strMatchData)
- str := md.str
- matches := md.matches
- idx := md.pos
- md.pos += 1
- if idx == len(matches) {
- return 0
- }
- L.Push(L.Get(1))
- match := matches[idx]
- if match.CaptureLength() == 2 {
- L.Push(LString(str[match.Capture(0):match.Capture(1)]))
- return 1
- }
-
- for i := 2; i < match.CaptureLength(); i += 2 {
- if match.IsPosCapture(i) {
- L.Push(LNumber(match.Capture(i)))
- } else {
- L.Push(LString(str[match.Capture(i):match.Capture(i+1)]))
- }
- }
- return match.CaptureLength()/2 - 1
-}
-
-func strGmatch(L *LState) int {
- str := L.CheckString(1)
- pattern := L.CheckString(2)
- mds, err := pm.Find(pattern, []byte(str), 0, -1)
- if err != nil {
- L.RaiseError(err.Error())
- }
- L.Push(L.Get(UpvalueIndex(1)))
- ud := L.NewUserData()
- ud.Value = &strMatchData{str, 0, mds}
- L.Push(ud)
- return 2
-}
-
-func strLen(L *LState) int {
- str := L.CheckString(1)
- L.Push(LNumber(len(str)))
- return 1
-}
-
-func strLower(L *LState) int {
- str := L.CheckString(1)
- L.Push(LString(strings.ToLower(str)))
- return 1
-}
-
-func strMatch(L *LState) int {
- str := L.CheckString(1)
- pattern := L.CheckString(2)
- offset := L.OptInt(3, 1)
- l := len(str)
- if offset < 0 {
- offset = l + offset + 1
- }
- offset--
- if offset < 0 {
- offset = 0
- }
-
- mds, err := pm.Find(pattern, unsafeFastStringToReadOnlyBytes(str), offset, 1)
- if err != nil {
- L.RaiseError(err.Error())
- }
- if len(mds) == 0 {
- L.Push(LNil)
- return 0
- }
- md := mds[0]
- nsubs := md.CaptureLength() / 2
- switch nsubs {
- case 1:
- L.Push(LString(str[md.Capture(0):md.Capture(1)]))
- return 1
- default:
- for i := 2; i < md.CaptureLength(); i += 2 {
- if md.IsPosCapture(i) {
- L.Push(LNumber(md.Capture(i)))
- } else {
- L.Push(LString(str[md.Capture(i):md.Capture(i+1)]))
- }
- }
- return nsubs - 1
- }
-}
-
-func strRep(L *LState) int {
- str := L.CheckString(1)
- n := L.CheckInt(2)
- if n < 0 {
- L.Push(emptyLString)
- } else {
- L.Push(LString(strings.Repeat(str, n)))
- }
- return 1
-}
-
-func strReverse(L *LState) int {
- str := L.CheckString(1)
- bts := []byte(str)
- out := make([]byte, len(bts))
- for i, j := 0, len(bts)-1; j >= 0; i, j = i+1, j-1 {
- out[i] = bts[j]
- }
- L.Push(LString(string(out)))
- return 1
-}
-
-func strSub(L *LState) int {
- str := L.CheckString(1)
- start := luaIndex2StringIndex(str, L.CheckInt(2), true)
- end := luaIndex2StringIndex(str, L.OptInt(3, -1), false)
- l := len(str)
- if start >= l || end < start {
- L.Push(emptyLString)
- } else {
- L.Push(LString(str[start:end]))
- }
- return 1
-}
-
-func strUpper(L *LState) int {
- str := L.CheckString(1)
- L.Push(LString(strings.ToUpper(str)))
- return 1
-}
-
-func luaIndex2StringIndex(str string, i int, start bool) int {
- if start && i != 0 {
- i -= 1
- }
- l := len(str)
- if i < 0 {
- i = l + i + 1
- }
- i = intMax(0, i)
- if !start && i > l {
- i = l
- }
- return i
-}
-
-//
diff --git a/vendor/github.com/yuin/gopher-lua/table.go b/vendor/github.com/yuin/gopher-lua/table.go
deleted file mode 100644
index e220bd9c..00000000
--- a/vendor/github.com/yuin/gopher-lua/table.go
+++ /dev/null
@@ -1,387 +0,0 @@
-package lua
-
-const defaultArrayCap = 32
-const defaultHashCap = 32
-
-type lValueArraySorter struct {
- L *LState
- Fn *LFunction
- Values []LValue
-}
-
-func (lv lValueArraySorter) Len() int {
- return len(lv.Values)
-}
-
-func (lv lValueArraySorter) Swap(i, j int) {
- lv.Values[i], lv.Values[j] = lv.Values[j], lv.Values[i]
-}
-
-func (lv lValueArraySorter) Less(i, j int) bool {
- if lv.Fn != nil {
- lv.L.Push(lv.Fn)
- lv.L.Push(lv.Values[i])
- lv.L.Push(lv.Values[j])
- lv.L.Call(2, 1)
- return LVAsBool(lv.L.reg.Pop())
- }
- return lessThan(lv.L, lv.Values[i], lv.Values[j])
-}
-
-func newLTable(acap int, hcap int) *LTable {
- if acap < 0 {
- acap = 0
- }
- if hcap < 0 {
- hcap = 0
- }
- tb := <able{}
- tb.Metatable = LNil
- if acap != 0 {
- tb.array = make([]LValue, 0, acap)
- }
- if hcap != 0 {
- tb.strdict = make(map[string]LValue, hcap)
- }
- return tb
-}
-
-// Len returns length of this LTable.
-func (tb *LTable) Len() int {
- if tb.array == nil {
- return 0
- }
- var prev LValue = LNil
- for i := len(tb.array) - 1; i >= 0; i-- {
- v := tb.array[i]
- if prev == LNil && v != LNil {
- return i + 1
- }
- prev = v
- }
- return 0
-}
-
-// Append appends a given LValue to this LTable.
-func (tb *LTable) Append(value LValue) {
- if value == LNil {
- return
- }
- if tb.array == nil {
- tb.array = make([]LValue, 0, defaultArrayCap)
- }
- if len(tb.array) == 0 || tb.array[len(tb.array)-1] != LNil {
- tb.array = append(tb.array, value)
- } else {
- i := len(tb.array) - 2
- for ; i >= 0; i-- {
- if tb.array[i] != LNil {
- break
- }
- }
- tb.array[i+1] = value
- }
-}
-
-// Insert inserts a given LValue at position `i` in this table.
-func (tb *LTable) Insert(i int, value LValue) {
- if tb.array == nil {
- tb.array = make([]LValue, 0, defaultArrayCap)
- }
- if i > len(tb.array) {
- tb.RawSetInt(i, value)
- return
- }
- if i <= 0 {
- tb.RawSet(LNumber(i), value)
- return
- }
- i -= 1
- tb.array = append(tb.array, LNil)
- copy(tb.array[i+1:], tb.array[i:])
- tb.array[i] = value
-}
-
-// MaxN returns a maximum number key that nil value does not exist before it.
-func (tb *LTable) MaxN() int {
- if tb.array == nil {
- return 0
- }
- for i := len(tb.array) - 1; i >= 0; i-- {
- if tb.array[i] != LNil {
- return i + 1
- }
- }
- return 0
-}
-
-// Remove removes from this table the element at a given position.
-func (tb *LTable) Remove(pos int) LValue {
- if tb.array == nil {
- return LNil
- }
- larray := len(tb.array)
- if larray == 0 {
- return LNil
- }
- i := pos - 1
- oldval := LNil
- switch {
- case i >= larray:
- // nothing to do
- case i == larray-1 || i < 0:
- oldval = tb.array[larray-1]
- tb.array = tb.array[:larray-1]
- default:
- oldval = tb.array[i]
- copy(tb.array[i:], tb.array[i+1:])
- tb.array[larray-1] = nil
- tb.array = tb.array[:larray-1]
- }
- return oldval
-}
-
-// RawSet sets a given LValue to a given index without the __newindex metamethod.
-// It is recommended to use `RawSetString` or `RawSetInt` for performance
-// if you already know the given LValue is a string or number.
-func (tb *LTable) RawSet(key LValue, value LValue) {
- switch v := key.(type) {
- case LNumber:
- if isArrayKey(v) {
- if tb.array == nil {
- tb.array = make([]LValue, 0, defaultArrayCap)
- }
- index := int(v) - 1
- alen := len(tb.array)
- switch {
- case index == alen:
- tb.array = append(tb.array, value)
- case index > alen:
- for i := 0; i < (index - alen); i++ {
- tb.array = append(tb.array, LNil)
- }
- tb.array = append(tb.array, value)
- case index < alen:
- tb.array[index] = value
- }
- return
- }
- case LString:
- tb.RawSetString(string(v), value)
- return
- }
-
- tb.RawSetH(key, value)
-}
-
-// RawSetInt sets a given LValue at a position `key` without the __newindex metamethod.
-func (tb *LTable) RawSetInt(key int, value LValue) {
- if key < 1 || key >= MaxArrayIndex {
- tb.RawSetH(LNumber(key), value)
- return
- }
- if tb.array == nil {
- tb.array = make([]LValue, 0, 32)
- }
- index := key - 1
- alen := len(tb.array)
- switch {
- case index == alen:
- tb.array = append(tb.array, value)
- case index > alen:
- for i := 0; i < (index - alen); i++ {
- tb.array = append(tb.array, LNil)
- }
- tb.array = append(tb.array, value)
- case index < alen:
- tb.array[index] = value
- }
-}
-
-// RawSetString sets a given LValue to a given string index without the __newindex metamethod.
-func (tb *LTable) RawSetString(key string, value LValue) {
- if tb.strdict == nil {
- tb.strdict = make(map[string]LValue, defaultHashCap)
- }
- if tb.keys == nil {
- tb.keys = []LValue{}
- tb.k2i = map[LValue]int{}
- }
-
- if value == LNil {
- // TODO tb.keys and tb.k2i should also be removed
- delete(tb.strdict, key)
- } else {
- tb.strdict[key] = value
- lkey := LString(key)
- if _, ok := tb.k2i[lkey]; !ok {
- tb.k2i[lkey] = len(tb.keys)
- tb.keys = append(tb.keys, lkey)
- }
- }
-}
-
-// RawSetH sets a given LValue to a given index without the __newindex metamethod.
-func (tb *LTable) RawSetH(key LValue, value LValue) {
- if s, ok := key.(LString); ok {
- tb.RawSetString(string(s), value)
- return
- }
- if tb.dict == nil {
- tb.dict = make(map[LValue]LValue, len(tb.strdict))
- }
- if tb.keys == nil {
- tb.keys = []LValue{}
- tb.k2i = map[LValue]int{}
- }
-
- if value == LNil {
- // TODO tb.keys and tb.k2i should also be removed
- delete(tb.dict, key)
- } else {
- tb.dict[key] = value
- if _, ok := tb.k2i[key]; !ok {
- tb.k2i[key] = len(tb.keys)
- tb.keys = append(tb.keys, key)
- }
- }
-}
-
-// RawGet returns an LValue associated with a given key without __index metamethod.
-func (tb *LTable) RawGet(key LValue) LValue {
- switch v := key.(type) {
- case LNumber:
- if isArrayKey(v) {
- if tb.array == nil {
- return LNil
- }
- index := int(v) - 1
- if index >= len(tb.array) {
- return LNil
- }
- return tb.array[index]
- }
- case LString:
- if tb.strdict == nil {
- return LNil
- }
- if ret, ok := tb.strdict[string(v)]; ok {
- return ret
- }
- return LNil
- }
- if tb.dict == nil {
- return LNil
- }
- if v, ok := tb.dict[key]; ok {
- return v
- }
- return LNil
-}
-
-// RawGetInt returns an LValue at position `key` without __index metamethod.
-func (tb *LTable) RawGetInt(key int) LValue {
- if tb.array == nil {
- return LNil
- }
- index := int(key) - 1
- if index >= len(tb.array) || index < 0 {
- return LNil
- }
- return tb.array[index]
-}
-
-// RawGet returns an LValue associated with a given key without __index metamethod.
-func (tb *LTable) RawGetH(key LValue) LValue {
- if s, sok := key.(LString); sok {
- if tb.strdict == nil {
- return LNil
- }
- if v, vok := tb.strdict[string(s)]; vok {
- return v
- }
- return LNil
- }
- if tb.dict == nil {
- return LNil
- }
- if v, ok := tb.dict[key]; ok {
- return v
- }
- return LNil
-}
-
-// RawGetString returns an LValue associated with a given key without __index metamethod.
-func (tb *LTable) RawGetString(key string) LValue {
- if tb.strdict == nil {
- return LNil
- }
- if v, vok := tb.strdict[string(key)]; vok {
- return v
- }
- return LNil
-}
-
-// ForEach iterates over this table of elements, yielding each in turn to a given function.
-func (tb *LTable) ForEach(cb func(LValue, LValue)) {
- if tb.array != nil {
- for i, v := range tb.array {
- if v != LNil {
- cb(LNumber(i+1), v)
- }
- }
- }
- if tb.strdict != nil {
- for k, v := range tb.strdict {
- if v != LNil {
- cb(LString(k), v)
- }
- }
- }
- if tb.dict != nil {
- for k, v := range tb.dict {
- if v != LNil {
- cb(k, v)
- }
- }
- }
-}
-
-// This function is equivalent to lua_next ( http://www.lua.org/manual/5.1/manual.html#lua_next ).
-func (tb *LTable) Next(key LValue) (LValue, LValue) {
- init := false
- if key == LNil {
- key = LNumber(0)
- init = true
- }
-
- if init || key != LNumber(0) {
- if kv, ok := key.(LNumber); ok && isInteger(kv) && int(kv) >= 0 && kv < LNumber(MaxArrayIndex) {
- index := int(kv)
- if tb.array != nil {
- for ; index < len(tb.array); index++ {
- if v := tb.array[index]; v != LNil {
- return LNumber(index + 1), v
- }
- }
- }
- if tb.array == nil || index == len(tb.array) {
- if (tb.dict == nil || len(tb.dict) == 0) && (tb.strdict == nil || len(tb.strdict) == 0) {
- return LNil, LNil
- }
- key = tb.keys[0]
- if v := tb.RawGetH(key); v != LNil {
- return key, v
- }
- }
- }
- }
-
- for i := tb.k2i[key] + 1; i < len(tb.keys); i++ {
- key := tb.keys[i]
- if v := tb.RawGetH(key); v != LNil {
- return key, v
- }
- }
- return LNil, LNil
-}
diff --git a/vendor/github.com/yuin/gopher-lua/tablelib.go b/vendor/github.com/yuin/gopher-lua/tablelib.go
deleted file mode 100644
index f3f46070..00000000
--- a/vendor/github.com/yuin/gopher-lua/tablelib.go
+++ /dev/null
@@ -1,100 +0,0 @@
-package lua
-
-import (
- "sort"
-)
-
-func OpenTable(L *LState) int {
- tabmod := L.RegisterModule(TabLibName, tableFuncs)
- L.Push(tabmod)
- return 1
-}
-
-var tableFuncs = map[string]LGFunction{
- "getn": tableGetN,
- "concat": tableConcat,
- "insert": tableInsert,
- "maxn": tableMaxN,
- "remove": tableRemove,
- "sort": tableSort,
-}
-
-func tableSort(L *LState) int {
- tbl := L.CheckTable(1)
- sorter := lValueArraySorter{L, nil, tbl.array}
- if L.GetTop() != 1 {
- sorter.Fn = L.CheckFunction(2)
- }
- sort.Sort(sorter)
- return 0
-}
-
-func tableGetN(L *LState) int {
- L.Push(LNumber(L.CheckTable(1).Len()))
- return 1
-}
-
-func tableMaxN(L *LState) int {
- L.Push(LNumber(L.CheckTable(1).MaxN()))
- return 1
-}
-
-func tableRemove(L *LState) int {
- tbl := L.CheckTable(1)
- if L.GetTop() == 1 {
- L.Push(tbl.Remove(-1))
- } else {
- L.Push(tbl.Remove(L.CheckInt(2)))
- }
- return 1
-}
-
-func tableConcat(L *LState) int {
- tbl := L.CheckTable(1)
- sep := LString(L.OptString(2, ""))
- i := L.OptInt(3, 1)
- j := L.OptInt(4, tbl.Len())
- if L.GetTop() == 3 {
- if i > tbl.Len() || i < 1 {
- L.Push(emptyLString)
- return 1
- }
- }
- i = intMax(intMin(i, tbl.Len()), 1)
- j = intMin(intMin(j, tbl.Len()), tbl.Len())
- if i > j {
- L.Push(emptyLString)
- return 1
- }
- //TODO should flushing?
- retbottom := L.GetTop()
- for ; i <= j; i++ {
- v := tbl.RawGetInt(i)
- if !LVCanConvToString(v) {
- L.RaiseError("invalid value (%s) at index %d in table for concat", v.Type().String(), i)
- }
- L.Push(v)
- if i != j {
- L.Push(sep)
- }
- }
- L.Push(stringConcat(L, L.GetTop()-retbottom, L.reg.Top()-1))
- return 1
-}
-
-func tableInsert(L *LState) int {
- tbl := L.CheckTable(1)
- nargs := L.GetTop()
- if nargs == 1 {
- L.RaiseError("wrong number of arguments")
- }
-
- if L.GetTop() == 2 {
- tbl.Append(L.Get(2))
- return 0
- }
- tbl.Insert(int(L.CheckInt(2)), L.CheckAny(3))
- return 0
-}
-
-//
diff --git a/vendor/github.com/yuin/gopher-lua/utils.go b/vendor/github.com/yuin/gopher-lua/utils.go
deleted file mode 100644
index 2df68dc7..00000000
--- a/vendor/github.com/yuin/gopher-lua/utils.go
+++ /dev/null
@@ -1,265 +0,0 @@
-package lua
-
-import (
- "bufio"
- "fmt"
- "io"
- "reflect"
- "strconv"
- "strings"
- "time"
- "unsafe"
-)
-
-func intMin(a, b int) int {
- if a < b {
- return a
- } else {
- return b
- }
-}
-
-func intMax(a, b int) int {
- if a > b {
- return a
- } else {
- return b
- }
-}
-
-func defaultFormat(v interface{}, f fmt.State, c rune) {
- buf := make([]string, 0, 10)
- buf = append(buf, "%")
- for i := 0; i < 128; i++ {
- if f.Flag(i) {
- buf = append(buf, string(rune(i)))
- }
- }
-
- if w, ok := f.Width(); ok {
- buf = append(buf, strconv.Itoa(w))
- }
- if p, ok := f.Precision(); ok {
- buf = append(buf, "."+strconv.Itoa(p))
- }
- buf = append(buf, string(c))
- format := strings.Join(buf, "")
- fmt.Fprintf(f, format, v)
-}
-
-type flagScanner struct {
- flag byte
- start string
- end string
- buf []byte
- str string
- Length int
- Pos int
- HasFlag bool
- ChangeFlag bool
-}
-
-func newFlagScanner(flag byte, start, end, str string) *flagScanner {
- return &flagScanner{flag, start, end, make([]byte, 0, len(str)), str, len(str), 0, false, false}
-}
-
-func (fs *flagScanner) AppendString(str string) { fs.buf = append(fs.buf, str...) }
-
-func (fs *flagScanner) AppendChar(ch byte) { fs.buf = append(fs.buf, ch) }
-
-func (fs *flagScanner) String() string { return string(fs.buf) }
-
-func (fs *flagScanner) Next() (byte, bool) {
- c := byte('\000')
- fs.ChangeFlag = false
- if fs.Pos == fs.Length {
- if fs.HasFlag {
- fs.AppendString(fs.end)
- }
- return c, true
- } else {
- c = fs.str[fs.Pos]
- if c == fs.flag {
- if fs.Pos < (fs.Length-1) && fs.str[fs.Pos+1] == fs.flag {
- fs.HasFlag = false
- fs.AppendChar(fs.flag)
- fs.Pos += 2
- return fs.Next()
- } else if fs.Pos != fs.Length-1 {
- if fs.HasFlag {
- fs.AppendString(fs.end)
- }
- fs.AppendString(fs.start)
- fs.ChangeFlag = true
- fs.HasFlag = true
- }
- }
- }
- fs.Pos++
- return c, false
-}
-
-var cDateFlagToGo = map[byte]string{
- 'a': "mon", 'A': "Monday", 'b': "Jan", 'B': "January", 'c': "02 Jan 06 15:04 MST", 'd': "02",
- 'F': "2006-01-02", 'H': "15", 'I': "03", 'm': "01", 'M': "04", 'p': "PM", 'P': "pm", 'S': "05",
- 'x': "15/04/05", 'X': "15:04:05", 'y': "06", 'Y': "2006", 'z': "-0700", 'Z': "MST"}
-
-func strftime(t time.Time, cfmt string) string {
- sc := newFlagScanner('%', "", "", cfmt)
- for c, eos := sc.Next(); !eos; c, eos = sc.Next() {
- if !sc.ChangeFlag {
- if sc.HasFlag {
- if v, ok := cDateFlagToGo[c]; ok {
- sc.AppendString(t.Format(v))
- } else {
- switch c {
- case 'w':
- sc.AppendString(fmt.Sprint(int(t.Weekday())))
- default:
- sc.AppendChar('%')
- sc.AppendChar(c)
- }
- }
- sc.HasFlag = false
- } else {
- sc.AppendChar(c)
- }
- }
- }
-
- return sc.String()
-}
-
-func isInteger(v LNumber) bool {
- return float64(v) == float64(int64(v))
- //_, frac := math.Modf(float64(v))
- //return frac == 0.0
-}
-
-func isArrayKey(v LNumber) bool {
- return isInteger(v) && v < LNumber(int((^uint(0))>>1)) && v > LNumber(0) && v < LNumber(MaxArrayIndex)
-}
-
-func parseNumber(number string) (LNumber, error) {
- var value LNumber
- number = strings.Trim(number, " \t\n")
- if v, err := strconv.ParseInt(number, 0, LNumberBit); err != nil {
- if v2, err2 := strconv.ParseFloat(number, LNumberBit); err2 != nil {
- return LNumber(0), err2
- } else {
- value = LNumber(v2)
- }
- } else {
- value = LNumber(v)
- }
- return value, nil
-}
-
-func popenArgs(arg string) (string, []string) {
- cmd := "/bin/sh"
- args := []string{"-c"}
- if LuaOS == "windows" {
- cmd = "C:\\Windows\\system32\\cmd.exe"
- args = []string{"/c"}
- }
- args = append(args, arg)
- return cmd, args
-}
-
-func isGoroutineSafe(lv LValue) bool {
- switch v := lv.(type) {
- case *LFunction, *LUserData, *LState:
- return false
- case *LTable:
- return v.Metatable == LNil
- default:
- return true
- }
-}
-
-func readBufioSize(reader *bufio.Reader, size int64) ([]byte, error, bool) {
- result := []byte{}
- read := int64(0)
- var err error
- var n int
- for read != size {
- buf := make([]byte, size-read)
- n, err = reader.Read(buf)
- if err != nil {
- break
- }
- read += int64(n)
- result = append(result, buf[:n]...)
- }
- e := err
- if e != nil && e == io.EOF {
- e = nil
- }
-
- return result, e, len(result) == 0 && err == io.EOF
-}
-
-func readBufioLine(reader *bufio.Reader) ([]byte, error, bool) {
- result := []byte{}
- var buf []byte
- var err error
- var isprefix bool = true
- for isprefix {
- buf, isprefix, err = reader.ReadLine()
- if err != nil {
- break
- }
- result = append(result, buf...)
- }
- e := err
- if e != nil && e == io.EOF {
- e = nil
- }
-
- return result, e, len(result) == 0 && err == io.EOF
-}
-
-func int2Fb(val int) int {
- e := 0
- x := val
- for x >= 16 {
- x = (x + 1) >> 1
- e++
- }
- if x < 8 {
- return x
- }
- return ((e + 1) << 3) | (x - 8)
-}
-
-func strCmp(s1, s2 string) int {
- len1 := len(s1)
- len2 := len(s2)
- for i := 0; ; i++ {
- c1 := -1
- if i < len1 {
- c1 = int(s1[i])
- }
- c2 := -1
- if i != len2 {
- c2 = int(s2[i])
- }
- switch {
- case c1 < c2:
- return -1
- case c1 > c2:
- return +1
- case c1 < 0:
- return 0
- }
- }
-}
-
-func unsafeFastStringToReadOnlyBytes(s string) (bs []byte) {
- sh := (*reflect.StringHeader)(unsafe.Pointer(&s))
- bh := (*reflect.SliceHeader)(unsafe.Pointer(&bs))
- bh.Data = sh.Data
- bh.Cap = sh.Len
- bh.Len = sh.Len
- return
-}
diff --git a/vendor/github.com/yuin/gopher-lua/value.go b/vendor/github.com/yuin/gopher-lua/value.go
deleted file mode 100644
index 0d4af808..00000000
--- a/vendor/github.com/yuin/gopher-lua/value.go
+++ /dev/null
@@ -1,247 +0,0 @@
-package lua
-
-import (
- "context"
- "fmt"
- "os"
-)
-
-type LValueType int
-
-const (
- LTNil LValueType = iota
- LTBool
- LTNumber
- LTString
- LTFunction
- LTUserData
- LTThread
- LTTable
- LTChannel
-)
-
-var lValueNames = [9]string{"nil", "boolean", "number", "string", "function", "userdata", "thread", "table", "channel"}
-
-func (vt LValueType) String() string {
- return lValueNames[int(vt)]
-}
-
-type LValue interface {
- String() string
- Type() LValueType
- // to reduce `runtime.assertI2T2` costs, this method should be used instead of the type assertion in heavy paths(typically inside the VM).
- assertFloat64() (float64, bool)
- // to reduce `runtime.assertI2T2` costs, this method should be used instead of the type assertion in heavy paths(typically inside the VM).
- assertString() (string, bool)
- // to reduce `runtime.assertI2T2` costs, this method should be used instead of the type assertion in heavy paths(typically inside the VM).
- assertFunction() (*LFunction, bool)
-}
-
-// LVIsFalse returns true if a given LValue is a nil or false otherwise false.
-func LVIsFalse(v LValue) bool { return v == LNil || v == LFalse }
-
-// LVIsFalse returns false if a given LValue is a nil or false otherwise true.
-func LVAsBool(v LValue) bool { return v != LNil && v != LFalse }
-
-// LVAsString returns string representation of a given LValue
-// if the LValue is a string or number, otherwise an empty string.
-func LVAsString(v LValue) string {
- switch sn := v.(type) {
- case LString, LNumber:
- return sn.String()
- default:
- return ""
- }
-}
-
-// LVCanConvToString returns true if a given LValue is a string or number
-// otherwise false.
-func LVCanConvToString(v LValue) bool {
- switch v.(type) {
- case LString, LNumber:
- return true
- default:
- return false
- }
-}
-
-// LVAsNumber tries to convert a given LValue to a number.
-func LVAsNumber(v LValue) LNumber {
- switch lv := v.(type) {
- case LNumber:
- return lv
- case LString:
- if num, err := parseNumber(string(lv)); err == nil {
- return num
- }
- }
- return LNumber(0)
-}
-
-type LNilType struct{}
-
-func (nl *LNilType) String() string { return "nil" }
-func (nl *LNilType) Type() LValueType { return LTNil }
-func (nl *LNilType) assertFloat64() (float64, bool) { return 0, false }
-func (nl *LNilType) assertString() (string, bool) { return "", false }
-func (nl *LNilType) assertFunction() (*LFunction, bool) { return nil, false }
-
-var LNil = LValue(&LNilType{})
-
-type LBool bool
-
-func (bl LBool) String() string {
- if bool(bl) {
- return "true"
- }
- return "false"
-}
-func (bl LBool) Type() LValueType { return LTBool }
-func (bl LBool) assertFloat64() (float64, bool) { return 0, false }
-func (bl LBool) assertString() (string, bool) { return "", false }
-func (bl LBool) assertFunction() (*LFunction, bool) { return nil, false }
-
-var LTrue = LBool(true)
-var LFalse = LBool(false)
-
-type LString string
-
-func (st LString) String() string { return string(st) }
-func (st LString) Type() LValueType { return LTString }
-func (st LString) assertFloat64() (float64, bool) { return 0, false }
-func (st LString) assertString() (string, bool) { return string(st), true }
-func (st LString) assertFunction() (*LFunction, bool) { return nil, false }
-
-// fmt.Formatter interface
-func (st LString) Format(f fmt.State, c rune) {
- switch c {
- case 'd', 'i':
- if nm, err := parseNumber(string(st)); err != nil {
- defaultFormat(nm, f, 'd')
- } else {
- defaultFormat(string(st), f, 's')
- }
- default:
- defaultFormat(string(st), f, c)
- }
-}
-
-func (nm LNumber) String() string {
- if isInteger(nm) {
- return fmt.Sprint(int64(nm))
- }
- return fmt.Sprint(float64(nm))
-}
-
-func (nm LNumber) Type() LValueType { return LTNumber }
-func (nm LNumber) assertFloat64() (float64, bool) { return float64(nm), true }
-func (nm LNumber) assertString() (string, bool) { return "", false }
-func (nm LNumber) assertFunction() (*LFunction, bool) { return nil, false }
-
-// fmt.Formatter interface
-func (nm LNumber) Format(f fmt.State, c rune) {
- switch c {
- case 'q', 's':
- defaultFormat(nm.String(), f, c)
- case 'b', 'c', 'd', 'o', 'x', 'X', 'U':
- defaultFormat(int64(nm), f, c)
- case 'e', 'E', 'f', 'F', 'g', 'G':
- defaultFormat(float64(nm), f, c)
- case 'i':
- defaultFormat(int64(nm), f, 'd')
- default:
- if isInteger(nm) {
- defaultFormat(int64(nm), f, c)
- } else {
- defaultFormat(float64(nm), f, c)
- }
- }
-}
-
-type LTable struct {
- Metatable LValue
-
- array []LValue
- dict map[LValue]LValue
- strdict map[string]LValue
- keys []LValue
- k2i map[LValue]int
-}
-
-func (tb *LTable) String() string { return fmt.Sprintf("table: %p", tb) }
-func (tb *LTable) Type() LValueType { return LTTable }
-func (tb *LTable) assertFloat64() (float64, bool) { return 0, false }
-func (tb *LTable) assertString() (string, bool) { return "", false }
-func (tb *LTable) assertFunction() (*LFunction, bool) { return nil, false }
-
-type LFunction struct {
- IsG bool
- Env *LTable
- Proto *FunctionProto
- GFunction LGFunction
- Upvalues []*Upvalue
-}
-type LGFunction func(*LState) int
-
-func (fn *LFunction) String() string { return fmt.Sprintf("function: %p", fn) }
-func (fn *LFunction) Type() LValueType { return LTFunction }
-func (fn *LFunction) assertFloat64() (float64, bool) { return 0, false }
-func (fn *LFunction) assertString() (string, bool) { return "", false }
-func (fn *LFunction) assertFunction() (*LFunction, bool) { return fn, true }
-
-type Global struct {
- MainThread *LState
- CurrentThread *LState
- Registry *LTable
- Global *LTable
-
- builtinMts map[int]LValue
- tempFiles []*os.File
- gccount int32
-}
-
-type LState struct {
- G *Global
- Parent *LState
- Env *LTable
- Panic func(*LState)
- Dead bool
- Options Options
-
- stop int32
- reg *registry
- stack callFrameStack
- alloc *allocator
- currentFrame *callFrame
- wrapped bool
- uvcache *Upvalue
- hasErrorFunc bool
- mainLoop func(*LState, *callFrame)
- ctx context.Context
-}
-
-func (ls *LState) String() string { return fmt.Sprintf("thread: %p", ls) }
-func (ls *LState) Type() LValueType { return LTThread }
-func (ls *LState) assertFloat64() (float64, bool) { return 0, false }
-func (ls *LState) assertString() (string, bool) { return "", false }
-func (ls *LState) assertFunction() (*LFunction, bool) { return nil, false }
-
-type LUserData struct {
- Value interface{}
- Env *LTable
- Metatable LValue
-}
-
-func (ud *LUserData) String() string { return fmt.Sprintf("userdata: %p", ud) }
-func (ud *LUserData) Type() LValueType { return LTUserData }
-func (ud *LUserData) assertFloat64() (float64, bool) { return 0, false }
-func (ud *LUserData) assertString() (string, bool) { return "", false }
-func (ud *LUserData) assertFunction() (*LFunction, bool) { return nil, false }
-
-type LChannel chan LValue
-
-func (ch LChannel) String() string { return fmt.Sprintf("channel: %p", ch) }
-func (ch LChannel) Type() LValueType { return LTChannel }
-func (ch LChannel) assertFloat64() (float64, bool) { return 0, false }
-func (ch LChannel) assertString() (string, bool) { return "", false }
-func (ch LChannel) assertFunction() (*LFunction, bool) { return nil, false }
diff --git a/vendor/github.com/yuin/gopher-lua/vm.go b/vendor/github.com/yuin/gopher-lua/vm.go
deleted file mode 100644
index f3733f13..00000000
--- a/vendor/github.com/yuin/gopher-lua/vm.go
+++ /dev/null
@@ -1,1726 +0,0 @@
-package lua
-
-////////////////////////////////////////////////////////
-// This file was generated by go-inline. DO NOT EDIT. //
-////////////////////////////////////////////////////////
-
-import (
- "fmt"
- "math"
- "strings"
-)
-
-func mainLoop(L *LState, baseframe *callFrame) {
- var inst uint32
- var cf *callFrame
-
- if L.stack.IsEmpty() {
- return
- }
-
- L.currentFrame = L.stack.Last()
- if L.currentFrame.Fn.IsG {
- callGFunction(L, false)
- return
- }
-
- for {
- cf = L.currentFrame
- inst = cf.Fn.Proto.Code[cf.Pc]
- cf.Pc++
- if jumpTable[int(inst>>26)](L, inst, baseframe) == 1 {
- return
- }
- }
-}
-
-func mainLoopWithContext(L *LState, baseframe *callFrame) {
- var inst uint32
- var cf *callFrame
-
- if L.stack.IsEmpty() {
- return
- }
-
- L.currentFrame = L.stack.Last()
- if L.currentFrame.Fn.IsG {
- callGFunction(L, false)
- return
- }
-
- for {
- cf = L.currentFrame
- inst = cf.Fn.Proto.Code[cf.Pc]
- cf.Pc++
- select {
- case <-L.ctx.Done():
- L.RaiseError(L.ctx.Err().Error())
- return
- default:
- if jumpTable[int(inst>>26)](L, inst, baseframe) == 1 {
- return
- }
- }
- }
-}
-
-// regv is the first target register to copy the return values to.
-// It can be reg.top, indicating that the copied values are going into new registers, or it can be below reg.top
-// Indicating that the values should be within the existing registers.
-// b is the available number of return values + 1.
-// n is the desired number of return values.
-// If n more than the available return values then the extra values are set to nil.
-// When this function returns the top of the registry will be set to regv+n.
-func copyReturnValues(L *LState, regv, start, n, b int) { // +inline-start
- if b == 1 {
- // this section is inlined by go-inline
- // source function is 'func (rg *registry) FillNil(regm, n int) ' in '_state.go'
- {
- rg := L.reg
- regm := regv
- newSize := regm + n
- // this section is inlined by go-inline
- // source function is 'func (rg *registry) checkSize(requiredSize int) ' in '_state.go'
- {
- requiredSize := newSize
- if requiredSize > cap(rg.array) {
- rg.resize(requiredSize)
- }
- }
- for i := 0; i < n; i++ {
- rg.array[regm+i] = LNil
- }
- // values beyond top don't need to be valid LValues, so setting them to nil is fine
- // setting them to nil rather than LNil lets us invoke the golang memclr opto
- oldtop := rg.top
- rg.top = regm + n
- if rg.top < oldtop {
- nilRange := rg.array[rg.top:oldtop]
- for i := range nilRange {
- nilRange[i] = nil
- }
- }
- }
- } else {
- // this section is inlined by go-inline
- // source function is 'func (rg *registry) CopyRange(regv, start, limit, n int) ' in '_state.go'
- {
- rg := L.reg
- limit := -1
- newSize := regv + n
- // this section is inlined by go-inline
- // source function is 'func (rg *registry) checkSize(requiredSize int) ' in '_state.go'
- {
- requiredSize := newSize
- if requiredSize > cap(rg.array) {
- rg.resize(requiredSize)
- }
- }
- if limit == -1 || limit > rg.top {
- limit = rg.top
- }
- for i := 0; i < n; i++ {
- srcIdx := start + i
- if srcIdx >= limit || srcIdx < 0 {
- rg.array[regv+i] = LNil
- } else {
- rg.array[regv+i] = rg.array[srcIdx]
- }
- }
-
- // values beyond top don't need to be valid LValues, so setting them to nil is fine
- // setting them to nil rather than LNil lets us invoke the golang memclr opto
- oldtop := rg.top
- rg.top = regv + n
- if rg.top < oldtop {
- nilRange := rg.array[rg.top:oldtop]
- for i := range nilRange {
- nilRange[i] = nil
- }
- }
- }
- if b > 1 && n > (b-1) {
- // this section is inlined by go-inline
- // source function is 'func (rg *registry) FillNil(regm, n int) ' in '_state.go'
- {
- rg := L.reg
- regm := regv + b - 1
- n := n - (b - 1)
- newSize := regm + n
- // this section is inlined by go-inline
- // source function is 'func (rg *registry) checkSize(requiredSize int) ' in '_state.go'
- {
- requiredSize := newSize
- if requiredSize > cap(rg.array) {
- rg.resize(requiredSize)
- }
- }
- for i := 0; i < n; i++ {
- rg.array[regm+i] = LNil
- }
- // values beyond top don't need to be valid LValues, so setting them to nil is fine
- // setting them to nil rather than LNil lets us invoke the golang memclr opto
- oldtop := rg.top
- rg.top = regm + n
- if rg.top < oldtop {
- nilRange := rg.array[rg.top:oldtop]
- for i := range nilRange {
- nilRange[i] = nil
- }
- }
- }
- }
- }
-} // +inline-end
-
-func switchToParentThread(L *LState, nargs int, haserror bool, kill bool) {
- parent := L.Parent
- if parent == nil {
- L.RaiseError("can not yield from outside of a coroutine")
- }
- L.G.CurrentThread = parent
- L.Parent = nil
- if !L.wrapped {
- if haserror {
- parent.Push(LFalse)
- } else {
- parent.Push(LTrue)
- }
- }
- L.XMoveTo(parent, nargs)
- L.stack.Pop()
- offset := L.currentFrame.LocalBase - L.currentFrame.ReturnBase
- L.currentFrame = L.stack.Last()
- L.reg.SetTop(L.reg.Top() - offset) // remove 'yield' function(including tailcalled functions)
- if kill {
- L.kill()
- }
-}
-
-func callGFunction(L *LState, tailcall bool) bool {
- frame := L.currentFrame
- gfnret := frame.Fn.GFunction(L)
- if tailcall {
- L.currentFrame = L.RemoveCallerFrame()
- }
-
- if gfnret < 0 {
- switchToParentThread(L, L.GetTop(), false, false)
- return true
- }
-
- wantret := frame.NRet
- if wantret == MultRet {
- wantret = gfnret
- }
-
- if tailcall && L.Parent != nil && L.stack.Sp() == 1 {
- switchToParentThread(L, wantret, false, true)
- return true
- }
-
- // this section is inlined by go-inline
- // source function is 'func (rg *registry) CopyRange(regv, start, limit, n int) ' in '_state.go'
- {
- rg := L.reg
- regv := frame.ReturnBase
- start := L.reg.Top() - gfnret
- limit := -1
- n := wantret
- newSize := regv + n
- // this section is inlined by go-inline
- // source function is 'func (rg *registry) checkSize(requiredSize int) ' in '_state.go'
- {
- requiredSize := newSize
- if requiredSize > cap(rg.array) {
- rg.resize(requiredSize)
- }
- }
- if limit == -1 || limit > rg.top {
- limit = rg.top
- }
- for i := 0; i < n; i++ {
- srcIdx := start + i
- if srcIdx >= limit || srcIdx < 0 {
- rg.array[regv+i] = LNil
- } else {
- rg.array[regv+i] = rg.array[srcIdx]
- }
- }
-
- // values beyond top don't need to be valid LValues, so setting them to nil is fine
- // setting them to nil rather than LNil lets us invoke the golang memclr opto
- oldtop := rg.top
- rg.top = regv + n
- if rg.top < oldtop {
- nilRange := rg.array[rg.top:oldtop]
- for i := range nilRange {
- nilRange[i] = nil
- }
- }
- }
- L.stack.Pop()
- L.currentFrame = L.stack.Last()
- return false
-}
-
-func threadRun(L *LState) {
- if L.stack.IsEmpty() {
- return
- }
-
- defer func() {
- if rcv := recover(); rcv != nil {
- var lv LValue
- if v, ok := rcv.(*ApiError); ok {
- lv = v.Object
- } else {
- lv = LString(fmt.Sprint(rcv))
- }
- if parent := L.Parent; parent != nil {
- if L.wrapped {
- L.Push(lv)
- parent.Panic(L)
- } else {
- L.SetTop(0)
- L.Push(lv)
- switchToParentThread(L, 1, true, true)
- }
- } else {
- panic(rcv)
- }
- }
- }()
- L.mainLoop(L, nil)
-}
-
-type instFunc func(*LState, uint32, *callFrame) int
-
-var jumpTable [opCodeMax + 1]instFunc
-
-func init() {
- jumpTable = [opCodeMax + 1]instFunc{
- func(L *LState, inst uint32, baseframe *callFrame) int { //OP_MOVE
- reg := L.reg
- cf := L.currentFrame
- lbase := cf.LocalBase
- A := int(inst>>18) & 0xff //GETA
- RA := lbase + A
- B := int(inst & 0x1ff) //GETB
- reg.Set(RA, reg.Get(lbase+B))
- return 0
- },
- func(L *LState, inst uint32, baseframe *callFrame) int { //OP_MOVEN
- reg := L.reg
- cf := L.currentFrame
- lbase := cf.LocalBase
- A := int(inst>>18) & 0xff //GETA
- B := int(inst & 0x1ff) //GETB
- C := int(inst>>9) & 0x1ff //GETC
- reg.Set(lbase+A, reg.Get(lbase+B))
- code := cf.Fn.Proto.Code
- pc := cf.Pc
- for i := 0; i < C; i++ {
- inst = code[pc]
- pc++
- A = int(inst>>18) & 0xff //GETA
- B = int(inst & 0x1ff) //GETB
- reg.Set(lbase+A, reg.Get(lbase+B))
- }
- cf.Pc = pc
- return 0
- },
- func(L *LState, inst uint32, baseframe *callFrame) int { //OP_LOADK
- reg := L.reg
- cf := L.currentFrame
- lbase := cf.LocalBase
- A := int(inst>>18) & 0xff //GETA
- RA := lbase + A
- Bx := int(inst & 0x3ffff) //GETBX
- reg.Set(RA, cf.Fn.Proto.Constants[Bx])
- return 0
- },
- func(L *LState, inst uint32, baseframe *callFrame) int { //OP_LOADBOOL
- reg := L.reg
- cf := L.currentFrame
- lbase := cf.LocalBase
- A := int(inst>>18) & 0xff //GETA
- RA := lbase + A
- B := int(inst & 0x1ff) //GETB
- C := int(inst>>9) & 0x1ff //GETC
- if B != 0 {
- reg.Set(RA, LTrue)
- } else {
- reg.Set(RA, LFalse)
- }
- if C != 0 {
- cf.Pc++
- }
- return 0
- },
- func(L *LState, inst uint32, baseframe *callFrame) int { //OP_LOADNIL
- reg := L.reg
- cf := L.currentFrame
- lbase := cf.LocalBase
- A := int(inst>>18) & 0xff //GETA
- RA := lbase + A
- B := int(inst & 0x1ff) //GETB
- for i := RA; i <= lbase+B; i++ {
- reg.Set(i, LNil)
- }
- return 0
- },
- func(L *LState, inst uint32, baseframe *callFrame) int { //OP_GETUPVAL
- reg := L.reg
- cf := L.currentFrame
- lbase := cf.LocalBase
- A := int(inst>>18) & 0xff //GETA
- RA := lbase + A
- B := int(inst & 0x1ff) //GETB
- reg.Set(RA, cf.Fn.Upvalues[B].Value())
- return 0
- },
- func(L *LState, inst uint32, baseframe *callFrame) int { //OP_GETGLOBAL
- reg := L.reg
- cf := L.currentFrame
- lbase := cf.LocalBase
- A := int(inst>>18) & 0xff //GETA
- RA := lbase + A
- Bx := int(inst & 0x3ffff) //GETBX
- //reg.Set(RA, L.getField(cf.Fn.Env, cf.Fn.Proto.Constants[Bx]))
- reg.Set(RA, L.getFieldString(cf.Fn.Env, cf.Fn.Proto.stringConstants[Bx]))
- return 0
- },
- func(L *LState, inst uint32, baseframe *callFrame) int { //OP_GETTABLE
- reg := L.reg
- cf := L.currentFrame
- lbase := cf.LocalBase
- A := int(inst>>18) & 0xff //GETA
- RA := lbase + A
- B := int(inst & 0x1ff) //GETB
- C := int(inst>>9) & 0x1ff //GETC
- reg.Set(RA, L.getField(reg.Get(lbase+B), L.rkValue(C)))
- return 0
- },
- func(L *LState, inst uint32, baseframe *callFrame) int { //OP_GETTABLEKS
- reg := L.reg
- cf := L.currentFrame
- lbase := cf.LocalBase
- A := int(inst>>18) & 0xff //GETA
- RA := lbase + A
- B := int(inst & 0x1ff) //GETB
- C := int(inst>>9) & 0x1ff //GETC
- reg.Set(RA, L.getFieldString(reg.Get(lbase+B), L.rkString(C)))
- return 0
- },
- func(L *LState, inst uint32, baseframe *callFrame) int { //OP_SETGLOBAL
- reg := L.reg
- cf := L.currentFrame
- lbase := cf.LocalBase
- A := int(inst>>18) & 0xff //GETA
- RA := lbase + A
- Bx := int(inst & 0x3ffff) //GETBX
- //L.setField(cf.Fn.Env, cf.Fn.Proto.Constants[Bx], reg.Get(RA))
- L.setFieldString(cf.Fn.Env, cf.Fn.Proto.stringConstants[Bx], reg.Get(RA))
- return 0
- },
- func(L *LState, inst uint32, baseframe *callFrame) int { //OP_SETUPVAL
- reg := L.reg
- cf := L.currentFrame
- lbase := cf.LocalBase
- A := int(inst>>18) & 0xff //GETA
- RA := lbase + A
- B := int(inst & 0x1ff) //GETB
- cf.Fn.Upvalues[B].SetValue(reg.Get(RA))
- return 0
- },
- func(L *LState, inst uint32, baseframe *callFrame) int { //OP_SETTABLE
- reg := L.reg
- cf := L.currentFrame
- lbase := cf.LocalBase
- A := int(inst>>18) & 0xff //GETA
- RA := lbase + A
- B := int(inst & 0x1ff) //GETB
- C := int(inst>>9) & 0x1ff //GETC
- L.setField(reg.Get(RA), L.rkValue(B), L.rkValue(C))
- return 0
- },
- func(L *LState, inst uint32, baseframe *callFrame) int { //OP_SETTABLEKS
- reg := L.reg
- cf := L.currentFrame
- lbase := cf.LocalBase
- A := int(inst>>18) & 0xff //GETA
- RA := lbase + A
- B := int(inst & 0x1ff) //GETB
- C := int(inst>>9) & 0x1ff //GETC
- L.setFieldString(reg.Get(RA), L.rkString(B), L.rkValue(C))
- return 0
- },
- func(L *LState, inst uint32, baseframe *callFrame) int { //OP_NEWTABLE
- reg := L.reg
- cf := L.currentFrame
- lbase := cf.LocalBase
- A := int(inst>>18) & 0xff //GETA
- RA := lbase + A
- B := int(inst & 0x1ff) //GETB
- C := int(inst>>9) & 0x1ff //GETC
- reg.Set(RA, newLTable(B, C))
- return 0
- },
- func(L *LState, inst uint32, baseframe *callFrame) int { //OP_SELF
- reg := L.reg
- cf := L.currentFrame
- lbase := cf.LocalBase
- A := int(inst>>18) & 0xff //GETA
- RA := lbase + A
- B := int(inst & 0x1ff) //GETB
- C := int(inst>>9) & 0x1ff //GETC
- selfobj := reg.Get(lbase + B)
- reg.Set(RA, L.getFieldString(selfobj, L.rkString(C)))
- reg.Set(RA+1, selfobj)
- return 0
- },
- opArith, // OP_ADD
- opArith, // OP_SUB
- opArith, // OP_MUL
- opArith, // OP_DIV
- opArith, // OP_MOD
- opArith, // OP_POW
- func(L *LState, inst uint32, baseframe *callFrame) int { //OP_UNM
- reg := L.reg
- cf := L.currentFrame
- lbase := cf.LocalBase
- A := int(inst>>18) & 0xff //GETA
- RA := lbase + A
- B := int(inst & 0x1ff) //GETB
- unaryv := L.rkValue(B)
- if nm, ok := unaryv.(LNumber); ok {
- reg.SetNumber(RA, -nm)
- } else {
- op := L.metaOp1(unaryv, "__unm")
- if op.Type() == LTFunction {
- reg.Push(op)
- reg.Push(unaryv)
- L.Call(1, 1)
- reg.Set(RA, reg.Pop())
- } else if str, ok1 := unaryv.(LString); ok1 {
- if num, err := parseNumber(string(str)); err == nil {
- reg.Set(RA, -num)
- } else {
- L.RaiseError("__unm undefined")
- }
- } else {
- L.RaiseError("__unm undefined")
- }
- }
- return 0
- },
- func(L *LState, inst uint32, baseframe *callFrame) int { //OP_NOT
- reg := L.reg
- cf := L.currentFrame
- lbase := cf.LocalBase
- A := int(inst>>18) & 0xff //GETA
- RA := lbase + A
- B := int(inst & 0x1ff) //GETB
- if LVIsFalse(reg.Get(lbase + B)) {
- reg.Set(RA, LTrue)
- } else {
- reg.Set(RA, LFalse)
- }
- return 0
- },
- func(L *LState, inst uint32, baseframe *callFrame) int { //OP_LEN
- reg := L.reg
- cf := L.currentFrame
- lbase := cf.LocalBase
- A := int(inst>>18) & 0xff //GETA
- RA := lbase + A
- B := int(inst & 0x1ff) //GETB
- switch lv := L.rkValue(B).(type) {
- case LString:
- reg.SetNumber(RA, LNumber(len(lv)))
- default:
- op := L.metaOp1(lv, "__len")
- if op.Type() == LTFunction {
- reg.Push(op)
- reg.Push(lv)
- L.Call(1, 1)
- ret := reg.Pop()
- if ret.Type() == LTNumber {
- reg.SetNumber(RA, ret.(LNumber))
- } else {
- reg.SetNumber(RA, LNumber(0))
- }
- } else if lv.Type() == LTTable {
- reg.SetNumber(RA, LNumber(lv.(*LTable).Len()))
- } else {
- L.RaiseError("__len undefined")
- }
- }
- return 0
- },
- func(L *LState, inst uint32, baseframe *callFrame) int { //OP_CONCAT
- reg := L.reg
- cf := L.currentFrame
- lbase := cf.LocalBase
- A := int(inst>>18) & 0xff //GETA
- RA := lbase + A
- B := int(inst & 0x1ff) //GETB
- C := int(inst>>9) & 0x1ff //GETC
- RC := lbase + C
- RB := lbase + B
- reg.Set(RA, stringConcat(L, RC-RB+1, RC))
- return 0
- },
- func(L *LState, inst uint32, baseframe *callFrame) int { //OP_JMP
- cf := L.currentFrame
- Sbx := int(inst&0x3ffff) - opMaxArgSbx //GETSBX
- cf.Pc += Sbx
- return 0
- },
- func(L *LState, inst uint32, baseframe *callFrame) int { //OP_EQ
- cf := L.currentFrame
- A := int(inst>>18) & 0xff //GETA
- B := int(inst & 0x1ff) //GETB
- C := int(inst>>9) & 0x1ff //GETC
- ret := equals(L, L.rkValue(B), L.rkValue(C), false)
- v := 1
- if ret {
- v = 0
- }
- if v == A {
- cf.Pc++
- }
- return 0
- },
- func(L *LState, inst uint32, baseframe *callFrame) int { //OP_LT
- cf := L.currentFrame
- A := int(inst>>18) & 0xff //GETA
- B := int(inst & 0x1ff) //GETB
- C := int(inst>>9) & 0x1ff //GETC
- ret := lessThan(L, L.rkValue(B), L.rkValue(C))
- v := 1
- if ret {
- v = 0
- }
- if v == A {
- cf.Pc++
- }
- return 0
- },
- func(L *LState, inst uint32, baseframe *callFrame) int { //OP_LE
- cf := L.currentFrame
- A := int(inst>>18) & 0xff //GETA
- B := int(inst & 0x1ff) //GETB
- C := int(inst>>9) & 0x1ff //GETC
- lhs := L.rkValue(B)
- rhs := L.rkValue(C)
- ret := false
-
- if v1, ok1 := lhs.assertFloat64(); ok1 {
- if v2, ok2 := rhs.assertFloat64(); ok2 {
- ret = v1 <= v2
- } else {
- L.RaiseError("attempt to compare %v with %v", lhs.Type().String(), rhs.Type().String())
- }
- } else {
- if lhs.Type() != rhs.Type() {
- L.RaiseError("attempt to compare %v with %v", lhs.Type().String(), rhs.Type().String())
- }
- switch lhs.Type() {
- case LTString:
- ret = strCmp(string(lhs.(LString)), string(rhs.(LString))) <= 0
- default:
- switch objectRational(L, lhs, rhs, "__le") {
- case 1:
- ret = true
- case 0:
- ret = false
- default:
- ret = !objectRationalWithError(L, rhs, lhs, "__lt")
- }
- }
- }
-
- v := 1
- if ret {
- v = 0
- }
- if v == A {
- cf.Pc++
- }
- return 0
- },
- func(L *LState, inst uint32, baseframe *callFrame) int { //OP_TEST
- reg := L.reg
- cf := L.currentFrame
- lbase := cf.LocalBase
- A := int(inst>>18) & 0xff //GETA
- RA := lbase + A
- C := int(inst>>9) & 0x1ff //GETC
- if LVAsBool(reg.Get(RA)) == (C == 0) {
- cf.Pc++
- }
- return 0
- },
- func(L *LState, inst uint32, baseframe *callFrame) int { //OP_TESTSET
- reg := L.reg
- cf := L.currentFrame
- lbase := cf.LocalBase
- A := int(inst>>18) & 0xff //GETA
- RA := lbase + A
- B := int(inst & 0x1ff) //GETB
- C := int(inst>>9) & 0x1ff //GETC
- if value := reg.Get(lbase + B); LVAsBool(value) != (C == 0) {
- reg.Set(RA, value)
- } else {
- cf.Pc++
- }
- return 0
- },
- func(L *LState, inst uint32, baseframe *callFrame) int { //OP_CALL
- reg := L.reg
- cf := L.currentFrame
- lbase := cf.LocalBase
- A := int(inst>>18) & 0xff //GETA
- RA := lbase + A
- B := int(inst & 0x1ff) //GETB
- C := int(inst>>9) & 0x1ff //GETC
- nargs := B - 1
- if B == 0 {
- nargs = reg.Top() - (RA + 1)
- }
- lv := reg.Get(RA)
- nret := C - 1
- var callable *LFunction
- var meta bool
- if fn, ok := lv.assertFunction(); ok {
- callable = fn
- meta = false
- } else {
- callable, meta = L.metaCall(lv)
- }
- // this section is inlined by go-inline
- // source function is 'func (ls *LState) pushCallFrame(cf callFrame, fn LValue, meta bool) ' in '_state.go'
- {
- ls := L
- cf := callFrame{Fn: callable, Pc: 0, Base: RA, LocalBase: RA + 1, ReturnBase: RA, NArgs: nargs, NRet: nret, Parent: cf, TailCall: 0}
- fn := lv
- if meta {
- cf.NArgs++
- ls.reg.Insert(fn, cf.LocalBase)
- }
- if cf.Fn == nil {
- ls.RaiseError("attempt to call a non-function object")
- }
- if ls.stack.IsFull() {
- ls.RaiseError("stack overflow")
- }
- ls.stack.Push(cf)
- newcf := ls.stack.Last()
- // this section is inlined by go-inline
- // source function is 'func (ls *LState) initCallFrame(cf *callFrame) ' in '_state.go'
- {
- cf := newcf
- if cf.Fn.IsG {
- ls.reg.SetTop(cf.LocalBase + cf.NArgs)
- } else {
- proto := cf.Fn.Proto
- nargs := cf.NArgs
- np := int(proto.NumParameters)
- if nargs < np {
- // default any missing arguments to nil
- newSize := cf.LocalBase + np
- // this section is inlined by go-inline
- // source function is 'func (rg *registry) checkSize(requiredSize int) ' in '_state.go'
- {
- rg := ls.reg
- requiredSize := newSize
- if requiredSize > cap(rg.array) {
- rg.resize(requiredSize)
- }
- }
- for i := nargs; i < np; i++ {
- ls.reg.array[cf.LocalBase+i] = LNil
- }
- nargs = np
- ls.reg.top = newSize
- }
-
- if (proto.IsVarArg & VarArgIsVarArg) == 0 {
- if nargs < int(proto.NumUsedRegisters) {
- nargs = int(proto.NumUsedRegisters)
- }
- newSize := cf.LocalBase + nargs
- // this section is inlined by go-inline
- // source function is 'func (rg *registry) checkSize(requiredSize int) ' in '_state.go'
- {
- rg := ls.reg
- requiredSize := newSize
- if requiredSize > cap(rg.array) {
- rg.resize(requiredSize)
- }
- }
- for i := np; i < nargs; i++ {
- ls.reg.array[cf.LocalBase+i] = LNil
- }
- ls.reg.top = cf.LocalBase + int(proto.NumUsedRegisters)
- } else {
- /* swap vararg positions:
- closure
- namedparam1 <- lbase
- namedparam2
- vararg1
- vararg2
-
- TO
-
- closure
- nil
- nil
- vararg1
- vararg2
- namedparam1 <- lbase
- namedparam2
- */
- nvarargs := nargs - np
- if nvarargs < 0 {
- nvarargs = 0
- }
-
- ls.reg.SetTop(cf.LocalBase + nargs + np)
- for i := 0; i < np; i++ {
- //ls.reg.Set(cf.LocalBase+nargs+i, ls.reg.Get(cf.LocalBase+i))
- ls.reg.array[cf.LocalBase+nargs+i] = ls.reg.array[cf.LocalBase+i]
- //ls.reg.Set(cf.LocalBase+i, LNil)
- ls.reg.array[cf.LocalBase+i] = LNil
- }
-
- if CompatVarArg {
- ls.reg.SetTop(cf.LocalBase + nargs + np + 1)
- if (proto.IsVarArg & VarArgNeedsArg) != 0 {
- argtb := newLTable(nvarargs, 0)
- for i := 0; i < nvarargs; i++ {
- argtb.RawSetInt(i+1, ls.reg.Get(cf.LocalBase+np+i))
- }
- argtb.RawSetString("n", LNumber(nvarargs))
- //ls.reg.Set(cf.LocalBase+nargs+np, argtb)
- ls.reg.array[cf.LocalBase+nargs+np] = argtb
- } else {
- ls.reg.array[cf.LocalBase+nargs+np] = LNil
- }
- }
- cf.LocalBase += nargs
- maxreg := cf.LocalBase + int(proto.NumUsedRegisters)
- ls.reg.SetTop(maxreg)
- }
- }
- }
- ls.currentFrame = newcf
- }
- if callable.IsG && callGFunction(L, false) {
- return 1
- }
- return 0
- },
- func(L *LState, inst uint32, baseframe *callFrame) int { //OP_TAILCALL
- reg := L.reg
- cf := L.currentFrame
- lbase := cf.LocalBase
- A := int(inst>>18) & 0xff //GETA
- RA := lbase + A
- B := int(inst & 0x1ff) //GETB
- nargs := B - 1
- if B == 0 {
- nargs = reg.Top() - (RA + 1)
- }
- lv := reg.Get(RA)
- var callable *LFunction
- var meta bool
- if fn, ok := lv.assertFunction(); ok {
- callable = fn
- meta = false
- } else {
- callable, meta = L.metaCall(lv)
- }
- if callable == nil {
- L.RaiseError("attempt to call a non-function object")
- }
- // this section is inlined by go-inline
- // source function is 'func (ls *LState) closeUpvalues(idx int) ' in '_state.go'
- {
- ls := L
- idx := lbase
- if ls.uvcache != nil {
- var prev *Upvalue
- for uv := ls.uvcache; uv != nil; uv = uv.next {
- if uv.index >= idx {
- if prev != nil {
- prev.next = nil
- } else {
- ls.uvcache = nil
- }
- uv.Close()
- }
- prev = uv
- }
- }
- }
- if callable.IsG {
- luaframe := cf
- L.pushCallFrame(callFrame{
- Fn: callable,
- Pc: 0,
- Base: RA,
- LocalBase: RA + 1,
- ReturnBase: cf.ReturnBase,
- NArgs: nargs,
- NRet: cf.NRet,
- Parent: cf,
- TailCall: 0,
- }, lv, meta)
- if callGFunction(L, true) {
- return 1
- }
- if L.currentFrame == nil || L.currentFrame.Fn.IsG || luaframe == baseframe {
- return 1
- }
- } else {
- base := cf.Base
- cf.Fn = callable
- cf.Pc = 0
- cf.Base = RA
- cf.LocalBase = RA + 1
- cf.ReturnBase = cf.ReturnBase
- cf.NArgs = nargs
- cf.NRet = cf.NRet
- cf.TailCall++
- lbase := cf.LocalBase
- if meta {
- cf.NArgs++
- L.reg.Insert(lv, cf.LocalBase)
- }
- // this section is inlined by go-inline
- // source function is 'func (ls *LState) initCallFrame(cf *callFrame) ' in '_state.go'
- {
- ls := L
- if cf.Fn.IsG {
- ls.reg.SetTop(cf.LocalBase + cf.NArgs)
- } else {
- proto := cf.Fn.Proto
- nargs := cf.NArgs
- np := int(proto.NumParameters)
- if nargs < np {
- // default any missing arguments to nil
- newSize := cf.LocalBase + np
- // this section is inlined by go-inline
- // source function is 'func (rg *registry) checkSize(requiredSize int) ' in '_state.go'
- {
- rg := ls.reg
- requiredSize := newSize
- if requiredSize > cap(rg.array) {
- rg.resize(requiredSize)
- }
- }
- for i := nargs; i < np; i++ {
- ls.reg.array[cf.LocalBase+i] = LNil
- }
- nargs = np
- ls.reg.top = newSize
- }
-
- if (proto.IsVarArg & VarArgIsVarArg) == 0 {
- if nargs < int(proto.NumUsedRegisters) {
- nargs = int(proto.NumUsedRegisters)
- }
- newSize := cf.LocalBase + nargs
- // this section is inlined by go-inline
- // source function is 'func (rg *registry) checkSize(requiredSize int) ' in '_state.go'
- {
- rg := ls.reg
- requiredSize := newSize
- if requiredSize > cap(rg.array) {
- rg.resize(requiredSize)
- }
- }
- for i := np; i < nargs; i++ {
- ls.reg.array[cf.LocalBase+i] = LNil
- }
- ls.reg.top = cf.LocalBase + int(proto.NumUsedRegisters)
- } else {
- /* swap vararg positions:
- closure
- namedparam1 <- lbase
- namedparam2
- vararg1
- vararg2
-
- TO
-
- closure
- nil
- nil
- vararg1
- vararg2
- namedparam1 <- lbase
- namedparam2
- */
- nvarargs := nargs - np
- if nvarargs < 0 {
- nvarargs = 0
- }
-
- ls.reg.SetTop(cf.LocalBase + nargs + np)
- for i := 0; i < np; i++ {
- //ls.reg.Set(cf.LocalBase+nargs+i, ls.reg.Get(cf.LocalBase+i))
- ls.reg.array[cf.LocalBase+nargs+i] = ls.reg.array[cf.LocalBase+i]
- //ls.reg.Set(cf.LocalBase+i, LNil)
- ls.reg.array[cf.LocalBase+i] = LNil
- }
-
- if CompatVarArg {
- ls.reg.SetTop(cf.LocalBase + nargs + np + 1)
- if (proto.IsVarArg & VarArgNeedsArg) != 0 {
- argtb := newLTable(nvarargs, 0)
- for i := 0; i < nvarargs; i++ {
- argtb.RawSetInt(i+1, ls.reg.Get(cf.LocalBase+np+i))
- }
- argtb.RawSetString("n", LNumber(nvarargs))
- //ls.reg.Set(cf.LocalBase+nargs+np, argtb)
- ls.reg.array[cf.LocalBase+nargs+np] = argtb
- } else {
- ls.reg.array[cf.LocalBase+nargs+np] = LNil
- }
- }
- cf.LocalBase += nargs
- maxreg := cf.LocalBase + int(proto.NumUsedRegisters)
- ls.reg.SetTop(maxreg)
- }
- }
- }
- // this section is inlined by go-inline
- // source function is 'func (rg *registry) CopyRange(regv, start, limit, n int) ' in '_state.go'
- {
- rg := L.reg
- regv := base
- start := RA
- limit := -1
- n := reg.Top() - RA - 1
- newSize := regv + n
- // this section is inlined by go-inline
- // source function is 'func (rg *registry) checkSize(requiredSize int) ' in '_state.go'
- {
- requiredSize := newSize
- if requiredSize > cap(rg.array) {
- rg.resize(requiredSize)
- }
- }
- if limit == -1 || limit > rg.top {
- limit = rg.top
- }
- for i := 0; i < n; i++ {
- srcIdx := start + i
- if srcIdx >= limit || srcIdx < 0 {
- rg.array[regv+i] = LNil
- } else {
- rg.array[regv+i] = rg.array[srcIdx]
- }
- }
-
- // values beyond top don't need to be valid LValues, so setting them to nil is fine
- // setting them to nil rather than LNil lets us invoke the golang memclr opto
- oldtop := rg.top
- rg.top = regv + n
- if rg.top < oldtop {
- nilRange := rg.array[rg.top:oldtop]
- for i := range nilRange {
- nilRange[i] = nil
- }
- }
- }
- cf.Base = base
- cf.LocalBase = base + (cf.LocalBase - lbase + 1)
- }
- return 0
- },
- func(L *LState, inst uint32, baseframe *callFrame) int { //OP_RETURN
- reg := L.reg
- cf := L.currentFrame
- lbase := cf.LocalBase
- A := int(inst>>18) & 0xff //GETA
- RA := lbase + A
- B := int(inst & 0x1ff) //GETB
- // this section is inlined by go-inline
- // source function is 'func (ls *LState) closeUpvalues(idx int) ' in '_state.go'
- {
- ls := L
- idx := lbase
- if ls.uvcache != nil {
- var prev *Upvalue
- for uv := ls.uvcache; uv != nil; uv = uv.next {
- if uv.index >= idx {
- if prev != nil {
- prev.next = nil
- } else {
- ls.uvcache = nil
- }
- uv.Close()
- }
- prev = uv
- }
- }
- }
- nret := B - 1
- if B == 0 {
- nret = reg.Top() - RA
- }
- n := cf.NRet
- if cf.NRet == MultRet {
- n = nret
- }
-
- if L.Parent != nil && L.stack.Sp() == 1 {
- // this section is inlined by go-inline
- // source function is 'func copyReturnValues(L *LState, regv, start, n, b int) ' in '_vm.go'
- {
- regv := reg.Top()
- start := RA
- b := B
- if b == 1 {
- // this section is inlined by go-inline
- // source function is 'func (rg *registry) FillNil(regm, n int) ' in '_state.go'
- {
- rg := L.reg
- regm := regv
- newSize := regm + n
- // this section is inlined by go-inline
- // source function is 'func (rg *registry) checkSize(requiredSize int) ' in '_state.go'
- {
- requiredSize := newSize
- if requiredSize > cap(rg.array) {
- rg.resize(requiredSize)
- }
- }
- for i := 0; i < n; i++ {
- rg.array[regm+i] = LNil
- }
- // values beyond top don't need to be valid LValues, so setting them to nil is fine
- // setting them to nil rather than LNil lets us invoke the golang memclr opto
- oldtop := rg.top
- rg.top = regm + n
- if rg.top < oldtop {
- nilRange := rg.array[rg.top:oldtop]
- for i := range nilRange {
- nilRange[i] = nil
- }
- }
- }
- } else {
- // this section is inlined by go-inline
- // source function is 'func (rg *registry) CopyRange(regv, start, limit, n int) ' in '_state.go'
- {
- rg := L.reg
- limit := -1
- newSize := regv + n
- // this section is inlined by go-inline
- // source function is 'func (rg *registry) checkSize(requiredSize int) ' in '_state.go'
- {
- requiredSize := newSize
- if requiredSize > cap(rg.array) {
- rg.resize(requiredSize)
- }
- }
- if limit == -1 || limit > rg.top {
- limit = rg.top
- }
- for i := 0; i < n; i++ {
- srcIdx := start + i
- if srcIdx >= limit || srcIdx < 0 {
- rg.array[regv+i] = LNil
- } else {
- rg.array[regv+i] = rg.array[srcIdx]
- }
- }
-
- // values beyond top don't need to be valid LValues, so setting them to nil is fine
- // setting them to nil rather than LNil lets us invoke the golang memclr opto
- oldtop := rg.top
- rg.top = regv + n
- if rg.top < oldtop {
- nilRange := rg.array[rg.top:oldtop]
- for i := range nilRange {
- nilRange[i] = nil
- }
- }
- }
- if b > 1 && n > (b-1) {
- // this section is inlined by go-inline
- // source function is 'func (rg *registry) FillNil(regm, n int) ' in '_state.go'
- {
- rg := L.reg
- regm := regv + b - 1
- n := n - (b - 1)
- newSize := regm + n
- // this section is inlined by go-inline
- // source function is 'func (rg *registry) checkSize(requiredSize int) ' in '_state.go'
- {
- requiredSize := newSize
- if requiredSize > cap(rg.array) {
- rg.resize(requiredSize)
- }
- }
- for i := 0; i < n; i++ {
- rg.array[regm+i] = LNil
- }
- // values beyond top don't need to be valid LValues, so setting them to nil is fine
- // setting them to nil rather than LNil lets us invoke the golang memclr opto
- oldtop := rg.top
- rg.top = regm + n
- if rg.top < oldtop {
- nilRange := rg.array[rg.top:oldtop]
- for i := range nilRange {
- nilRange[i] = nil
- }
- }
- }
- }
- }
- }
- switchToParentThread(L, n, false, true)
- return 1
- }
- islast := baseframe == L.stack.Pop() || L.stack.IsEmpty()
- // this section is inlined by go-inline
- // source function is 'func copyReturnValues(L *LState, regv, start, n, b int) ' in '_vm.go'
- {
- regv := cf.ReturnBase
- start := RA
- b := B
- if b == 1 {
- // this section is inlined by go-inline
- // source function is 'func (rg *registry) FillNil(regm, n int) ' in '_state.go'
- {
- rg := L.reg
- regm := regv
- newSize := regm + n
- // this section is inlined by go-inline
- // source function is 'func (rg *registry) checkSize(requiredSize int) ' in '_state.go'
- {
- requiredSize := newSize
- if requiredSize > cap(rg.array) {
- rg.resize(requiredSize)
- }
- }
- for i := 0; i < n; i++ {
- rg.array[regm+i] = LNil
- }
- // values beyond top don't need to be valid LValues, so setting them to nil is fine
- // setting them to nil rather than LNil lets us invoke the golang memclr opto
- oldtop := rg.top
- rg.top = regm + n
- if rg.top < oldtop {
- nilRange := rg.array[rg.top:oldtop]
- for i := range nilRange {
- nilRange[i] = nil
- }
- }
- }
- } else {
- // this section is inlined by go-inline
- // source function is 'func (rg *registry) CopyRange(regv, start, limit, n int) ' in '_state.go'
- {
- rg := L.reg
- limit := -1
- newSize := regv + n
- // this section is inlined by go-inline
- // source function is 'func (rg *registry) checkSize(requiredSize int) ' in '_state.go'
- {
- requiredSize := newSize
- if requiredSize > cap(rg.array) {
- rg.resize(requiredSize)
- }
- }
- if limit == -1 || limit > rg.top {
- limit = rg.top
- }
- for i := 0; i < n; i++ {
- srcIdx := start + i
- if srcIdx >= limit || srcIdx < 0 {
- rg.array[regv+i] = LNil
- } else {
- rg.array[regv+i] = rg.array[srcIdx]
- }
- }
-
- // values beyond top don't need to be valid LValues, so setting them to nil is fine
- // setting them to nil rather than LNil lets us invoke the golang memclr opto
- oldtop := rg.top
- rg.top = regv + n
- if rg.top < oldtop {
- nilRange := rg.array[rg.top:oldtop]
- for i := range nilRange {
- nilRange[i] = nil
- }
- }
- }
- if b > 1 && n > (b-1) {
- // this section is inlined by go-inline
- // source function is 'func (rg *registry) FillNil(regm, n int) ' in '_state.go'
- {
- rg := L.reg
- regm := regv + b - 1
- n := n - (b - 1)
- newSize := regm + n
- // this section is inlined by go-inline
- // source function is 'func (rg *registry) checkSize(requiredSize int) ' in '_state.go'
- {
- requiredSize := newSize
- if requiredSize > cap(rg.array) {
- rg.resize(requiredSize)
- }
- }
- for i := 0; i < n; i++ {
- rg.array[regm+i] = LNil
- }
- // values beyond top don't need to be valid LValues, so setting them to nil is fine
- // setting them to nil rather than LNil lets us invoke the golang memclr opto
- oldtop := rg.top
- rg.top = regm + n
- if rg.top < oldtop {
- nilRange := rg.array[rg.top:oldtop]
- for i := range nilRange {
- nilRange[i] = nil
- }
- }
- }
- }
- }
- }
- L.currentFrame = L.stack.Last()
- if islast || L.currentFrame == nil || L.currentFrame.Fn.IsG {
- return 1
- }
- return 0
- },
- func(L *LState, inst uint32, baseframe *callFrame) int { //OP_FORLOOP
- reg := L.reg
- cf := L.currentFrame
- lbase := cf.LocalBase
- A := int(inst>>18) & 0xff //GETA
- RA := lbase + A
- if init, ok1 := reg.Get(RA).assertFloat64(); ok1 {
- if limit, ok2 := reg.Get(RA + 1).assertFloat64(); ok2 {
- if step, ok3 := reg.Get(RA + 2).assertFloat64(); ok3 {
- init += step
- reg.SetNumber(RA, LNumber(init))
- if (step > 0 && init <= limit) || (step <= 0 && init >= limit) {
- Sbx := int(inst&0x3ffff) - opMaxArgSbx //GETSBX
- cf.Pc += Sbx
- reg.SetNumber(RA+3, LNumber(init))
- } else {
- reg.SetTop(RA + 1)
- }
- } else {
- L.RaiseError("for statement step must be a number")
- }
- } else {
- L.RaiseError("for statement limit must be a number")
- }
- } else {
- L.RaiseError("for statement init must be a number")
- }
- return 0
- },
- func(L *LState, inst uint32, baseframe *callFrame) int { //OP_FORPREP
- reg := L.reg
- cf := L.currentFrame
- lbase := cf.LocalBase
- A := int(inst>>18) & 0xff //GETA
- RA := lbase + A
- Sbx := int(inst&0x3ffff) - opMaxArgSbx //GETSBX
- if init, ok1 := reg.Get(RA).assertFloat64(); ok1 {
- if step, ok2 := reg.Get(RA + 2).assertFloat64(); ok2 {
- reg.SetNumber(RA, LNumber(init-step))
- } else {
- L.RaiseError("for statement step must be a number")
- }
- } else {
- L.RaiseError("for statement init must be a number")
- }
- cf.Pc += Sbx
- return 0
- },
- func(L *LState, inst uint32, baseframe *callFrame) int { //OP_TFORLOOP
- reg := L.reg
- cf := L.currentFrame
- lbase := cf.LocalBase
- A := int(inst>>18) & 0xff //GETA
- RA := lbase + A
- C := int(inst>>9) & 0x1ff //GETC
- nret := C
- reg.SetTop(RA + 3 + 2)
- reg.Set(RA+3+2, reg.Get(RA+2))
- reg.Set(RA+3+1, reg.Get(RA+1))
- reg.Set(RA+3, reg.Get(RA))
- L.callR(2, nret, RA+3)
- if value := reg.Get(RA + 3); value != LNil {
- reg.Set(RA+2, value)
- pc := cf.Fn.Proto.Code[cf.Pc]
- cf.Pc += int(pc&0x3ffff) - opMaxArgSbx
- }
- cf.Pc++
- return 0
- },
- func(L *LState, inst uint32, baseframe *callFrame) int { //OP_SETLIST
- reg := L.reg
- cf := L.currentFrame
- lbase := cf.LocalBase
- A := int(inst>>18) & 0xff //GETA
- RA := lbase + A
- B := int(inst & 0x1ff) //GETB
- C := int(inst>>9) & 0x1ff //GETC
- if C == 0 {
- C = int(cf.Fn.Proto.Code[cf.Pc])
- cf.Pc++
- }
- offset := (C - 1) * FieldsPerFlush
- table := reg.Get(RA).(*LTable)
- nelem := B
- if B == 0 {
- nelem = reg.Top() - RA - 1
- }
- for i := 1; i <= nelem; i++ {
- table.RawSetInt(offset+i, reg.Get(RA+i))
- }
- return 0
- },
- func(L *LState, inst uint32, baseframe *callFrame) int { //OP_CLOSE
- cf := L.currentFrame
- lbase := cf.LocalBase
- A := int(inst>>18) & 0xff //GETA
- RA := lbase + A
- // this section is inlined by go-inline
- // source function is 'func (ls *LState) closeUpvalues(idx int) ' in '_state.go'
- {
- ls := L
- idx := RA
- if ls.uvcache != nil {
- var prev *Upvalue
- for uv := ls.uvcache; uv != nil; uv = uv.next {
- if uv.index >= idx {
- if prev != nil {
- prev.next = nil
- } else {
- ls.uvcache = nil
- }
- uv.Close()
- }
- prev = uv
- }
- }
- }
- return 0
- },
- func(L *LState, inst uint32, baseframe *callFrame) int { //OP_CLOSURE
- reg := L.reg
- cf := L.currentFrame
- lbase := cf.LocalBase
- A := int(inst>>18) & 0xff //GETA
- RA := lbase + A
- Bx := int(inst & 0x3ffff) //GETBX
- proto := cf.Fn.Proto.FunctionPrototypes[Bx]
- closure := newLFunctionL(proto, cf.Fn.Env, int(proto.NumUpvalues))
- reg.Set(RA, closure)
- for i := 0; i < int(proto.NumUpvalues); i++ {
- inst = cf.Fn.Proto.Code[cf.Pc]
- cf.Pc++
- B := opGetArgB(inst)
- switch opGetOpCode(inst) {
- case OP_MOVE:
- closure.Upvalues[i] = L.findUpvalue(lbase + B)
- case OP_GETUPVAL:
- closure.Upvalues[i] = cf.Fn.Upvalues[B]
- }
- }
- return 0
- },
- func(L *LState, inst uint32, baseframe *callFrame) int { //OP_VARARG
- reg := L.reg
- cf := L.currentFrame
- lbase := cf.LocalBase
- A := int(inst>>18) & 0xff //GETA
- RA := lbase + A
- B := int(inst & 0x1ff) //GETB
- nparams := int(cf.Fn.Proto.NumParameters)
- nvarargs := cf.NArgs - nparams
- if nvarargs < 0 {
- nvarargs = 0
- }
- nwant := B - 1
- if B == 0 {
- nwant = nvarargs
- }
- // this section is inlined by go-inline
- // source function is 'func (rg *registry) CopyRange(regv, start, limit, n int) ' in '_state.go'
- {
- rg := reg
- regv := RA
- start := cf.Base + nparams + 1
- limit := cf.LocalBase
- n := nwant
- newSize := regv + n
- // this section is inlined by go-inline
- // source function is 'func (rg *registry) checkSize(requiredSize int) ' in '_state.go'
- {
- requiredSize := newSize
- if requiredSize > cap(rg.array) {
- rg.resize(requiredSize)
- }
- }
- if limit == -1 || limit > rg.top {
- limit = rg.top
- }
- for i := 0; i < n; i++ {
- srcIdx := start + i
- if srcIdx >= limit || srcIdx < 0 {
- rg.array[regv+i] = LNil
- } else {
- rg.array[regv+i] = rg.array[srcIdx]
- }
- }
-
- // values beyond top don't need to be valid LValues, so setting them to nil is fine
- // setting them to nil rather than LNil lets us invoke the golang memclr opto
- oldtop := rg.top
- rg.top = regv + n
- if rg.top < oldtop {
- nilRange := rg.array[rg.top:oldtop]
- for i := range nilRange {
- nilRange[i] = nil
- }
- }
- }
- return 0
- },
- func(L *LState, inst uint32, baseframe *callFrame) int { //OP_NOP
- return 0
- },
- }
-}
-
-func opArith(L *LState, inst uint32, baseframe *callFrame) int { //OP_ADD, OP_SUB, OP_MUL, OP_DIV, OP_MOD, OP_POW
- reg := L.reg
- cf := L.currentFrame
- lbase := cf.LocalBase
- A := int(inst>>18) & 0xff //GETA
- RA := lbase + A
- opcode := int(inst >> 26) //GETOPCODE
- B := int(inst & 0x1ff) //GETB
- C := int(inst>>9) & 0x1ff //GETC
- lhs := L.rkValue(B)
- rhs := L.rkValue(C)
- v1, ok1 := lhs.assertFloat64()
- v2, ok2 := rhs.assertFloat64()
- if ok1 && ok2 {
- reg.SetNumber(RA, numberArith(L, opcode, LNumber(v1), LNumber(v2)))
- } else {
- reg.Set(RA, objectArith(L, opcode, lhs, rhs))
- }
- return 0
-}
-
-func luaModulo(lhs, rhs LNumber) LNumber {
- flhs := float64(lhs)
- frhs := float64(rhs)
- v := math.Mod(flhs, frhs)
- if flhs < 0 || frhs < 0 && !(flhs < 0 && frhs < 0) {
- v += frhs
- }
- return LNumber(v)
-}
-
-func numberArith(L *LState, opcode int, lhs, rhs LNumber) LNumber {
- switch opcode {
- case OP_ADD:
- return lhs + rhs
- case OP_SUB:
- return lhs - rhs
- case OP_MUL:
- return lhs * rhs
- case OP_DIV:
- return lhs / rhs
- case OP_MOD:
- return luaModulo(lhs, rhs)
- case OP_POW:
- flhs := float64(lhs)
- frhs := float64(rhs)
- return LNumber(math.Pow(flhs, frhs))
- }
- panic("should not reach here")
- return LNumber(0)
-}
-
-func objectArith(L *LState, opcode int, lhs, rhs LValue) LValue {
- event := ""
- switch opcode {
- case OP_ADD:
- event = "__add"
- case OP_SUB:
- event = "__sub"
- case OP_MUL:
- event = "__mul"
- case OP_DIV:
- event = "__div"
- case OP_MOD:
- event = "__mod"
- case OP_POW:
- event = "__pow"
- }
- op := L.metaOp2(lhs, rhs, event)
- if op.Type() == LTFunction {
- L.reg.Push(op)
- L.reg.Push(lhs)
- L.reg.Push(rhs)
- L.Call(2, 1)
- return L.reg.Pop()
- }
- if str, ok := lhs.(LString); ok {
- if lnum, err := parseNumber(string(str)); err == nil {
- lhs = lnum
- }
- }
- if str, ok := rhs.(LString); ok {
- if rnum, err := parseNumber(string(str)); err == nil {
- rhs = rnum
- }
- }
- if v1, ok1 := lhs.assertFloat64(); ok1 {
- if v2, ok2 := rhs.assertFloat64(); ok2 {
- return numberArith(L, opcode, LNumber(v1), LNumber(v2))
- }
- }
- L.RaiseError(fmt.Sprintf("cannot perform %v operation between %v and %v",
- strings.TrimLeft(event, "_"), lhs.Type().String(), rhs.Type().String()))
-
- return LNil
-}
-
-func stringConcat(L *LState, total, last int) LValue {
- rhs := L.reg.Get(last)
- total--
- for i := last - 1; total > 0; {
- lhs := L.reg.Get(i)
- if !(LVCanConvToString(lhs) && LVCanConvToString(rhs)) {
- op := L.metaOp2(lhs, rhs, "__concat")
- if op.Type() == LTFunction {
- L.reg.Push(op)
- L.reg.Push(lhs)
- L.reg.Push(rhs)
- L.Call(2, 1)
- rhs = L.reg.Pop()
- total--
- i--
- } else {
- L.RaiseError("cannot perform concat operation between %v and %v", lhs.Type().String(), rhs.Type().String())
- return LNil
- }
- } else {
- buf := make([]string, total+1)
- buf[total] = LVAsString(rhs)
- for total > 0 {
- lhs = L.reg.Get(i)
- if !LVCanConvToString(lhs) {
- break
- }
- buf[total-1] = LVAsString(lhs)
- i--
- total--
- }
- rhs = LString(strings.Join(buf, ""))
- }
- }
- return rhs
-}
-
-func lessThan(L *LState, lhs, rhs LValue) bool {
- // optimization for numbers
- if v1, ok1 := lhs.assertFloat64(); ok1 {
- if v2, ok2 := rhs.assertFloat64(); ok2 {
- return v1 < v2
- }
- L.RaiseError("attempt to compare %v with %v", lhs.Type().String(), rhs.Type().String())
- }
- if lhs.Type() != rhs.Type() {
- L.RaiseError("attempt to compare %v with %v", lhs.Type().String(), rhs.Type().String())
- return false
- }
- ret := false
- switch lhs.Type() {
- case LTString:
- ret = strCmp(string(lhs.(LString)), string(rhs.(LString))) < 0
- default:
- ret = objectRationalWithError(L, lhs, rhs, "__lt")
- }
- return ret
-}
-
-func equals(L *LState, lhs, rhs LValue, raw bool) bool {
- if lhs.Type() != rhs.Type() {
- return false
- }
-
- ret := false
- switch lhs.Type() {
- case LTNil:
- ret = true
- case LTNumber:
- v1, _ := lhs.assertFloat64()
- v2, _ := rhs.assertFloat64()
- ret = v1 == v2
- case LTBool:
- ret = bool(lhs.(LBool)) == bool(rhs.(LBool))
- case LTString:
- ret = string(lhs.(LString)) == string(rhs.(LString))
- case LTUserData, LTTable:
- if lhs == rhs {
- ret = true
- } else if !raw {
- switch objectRational(L, lhs, rhs, "__eq") {
- case 1:
- ret = true
- default:
- ret = false
- }
- }
- default:
- ret = lhs == rhs
- }
- return ret
-}
-
-func objectRationalWithError(L *LState, lhs, rhs LValue, event string) bool {
- switch objectRational(L, lhs, rhs, event) {
- case 1:
- return true
- case 0:
- return false
- }
- L.RaiseError("attempt to compare %v with %v", lhs.Type().String(), rhs.Type().String())
- return false
-}
-
-func objectRational(L *LState, lhs, rhs LValue, event string) int {
- m1 := L.metaOp1(lhs, event)
- m2 := L.metaOp1(rhs, event)
- if m1.Type() == LTFunction && m1 == m2 {
- L.reg.Push(m1)
- L.reg.Push(lhs)
- L.reg.Push(rhs)
- L.Call(2, 1)
- if LVAsBool(L.reg.Pop()) {
- return 1
- }
- return 0
- }
- return -1
-}
diff --git a/vendor/go.etcd.io/bbolt/.gitignore b/vendor/go.etcd.io/bbolt/.gitignore
deleted file mode 100644
index 3bcd8cba..00000000
--- a/vendor/go.etcd.io/bbolt/.gitignore
+++ /dev/null
@@ -1,5 +0,0 @@
-*.prof
-*.test
-*.swp
-/bin/
-cover.out
diff --git a/vendor/go.etcd.io/bbolt/.travis.yml b/vendor/go.etcd.io/bbolt/.travis.yml
deleted file mode 100644
index 257dfdfe..00000000
--- a/vendor/go.etcd.io/bbolt/.travis.yml
+++ /dev/null
@@ -1,17 +0,0 @@
-language: go
-go_import_path: go.etcd.io/bbolt
-
-sudo: false
-
-go:
-- 1.12
-
-before_install:
-- go get -v honnef.co/go/tools/...
-- go get -v github.com/kisielk/errcheck
-
-script:
-- make fmt
-- make test
-- make race
-# - make errcheck
diff --git a/vendor/go.etcd.io/bbolt/LICENSE b/vendor/go.etcd.io/bbolt/LICENSE
deleted file mode 100644
index 004e77fe..00000000
--- a/vendor/go.etcd.io/bbolt/LICENSE
+++ /dev/null
@@ -1,20 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2013 Ben Johnson
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
-the Software, and to permit persons to whom the Software is furnished to do so,
-subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
-COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
-IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/go.etcd.io/bbolt/Makefile b/vendor/go.etcd.io/bbolt/Makefile
deleted file mode 100644
index 2968aaa6..00000000
--- a/vendor/go.etcd.io/bbolt/Makefile
+++ /dev/null
@@ -1,38 +0,0 @@
-BRANCH=`git rev-parse --abbrev-ref HEAD`
-COMMIT=`git rev-parse --short HEAD`
-GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)"
-
-default: build
-
-race:
- @TEST_FREELIST_TYPE=hashmap go test -v -race -test.run="TestSimulate_(100op|1000op)"
- @echo "array freelist test"
- @TEST_FREELIST_TYPE=array go test -v -race -test.run="TestSimulate_(100op|1000op)"
-
-fmt:
- !(gofmt -l -s -d $(shell find . -name \*.go) | grep '[a-z]')
-
-# go get honnef.co/go/tools/simple
-gosimple:
- gosimple ./...
-
-# go get honnef.co/go/tools/unused
-unused:
- unused ./...
-
-# go get github.com/kisielk/errcheck
-errcheck:
- @errcheck -ignorepkg=bytes -ignore=os:Remove go.etcd.io/bbolt
-
-test:
- TEST_FREELIST_TYPE=hashmap go test -timeout 20m -v -coverprofile cover.out -covermode atomic
- # Note: gets "program not an importable package" in out of path builds
- TEST_FREELIST_TYPE=hashmap go test -v ./cmd/bbolt
-
- @echo "array freelist test"
-
- @TEST_FREELIST_TYPE=array go test -timeout 20m -v -coverprofile cover.out -covermode atomic
- # Note: gets "program not an importable package" in out of path builds
- @TEST_FREELIST_TYPE=array go test -v ./cmd/bbolt
-
-.PHONY: race fmt errcheck test gosimple unused
diff --git a/vendor/go.etcd.io/bbolt/README.md b/vendor/go.etcd.io/bbolt/README.md
deleted file mode 100644
index c9e64b1a..00000000
--- a/vendor/go.etcd.io/bbolt/README.md
+++ /dev/null
@@ -1,957 +0,0 @@
-bbolt
-=====
-
-[![Go Report Card](https://goreportcard.com/badge/github.com/etcd-io/bbolt?style=flat-square)](https://goreportcard.com/report/github.com/etcd-io/bbolt)
-[![Coverage](https://codecov.io/gh/etcd-io/bbolt/branch/master/graph/badge.svg)](https://codecov.io/gh/etcd-io/bbolt)
-[![Build Status Travis](https://img.shields.io/travis/etcd-io/bboltlabs.svg?style=flat-square&&branch=master)](https://travis-ci.com/etcd-io/bbolt)
-[![Godoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/github.com/etcd-io/bbolt)
-[![Releases](https://img.shields.io/github/release/etcd-io/bbolt/all.svg?style=flat-square)](https://github.com/etcd-io/bbolt/releases)
-[![LICENSE](https://img.shields.io/github/license/etcd-io/bbolt.svg?style=flat-square)](https://github.com/etcd-io/bbolt/blob/master/LICENSE)
-
-bbolt is a fork of [Ben Johnson's][gh_ben] [Bolt][bolt] key/value
-store. The purpose of this fork is to provide the Go community with an active
-maintenance and development target for Bolt; the goal is improved reliability
-and stability. bbolt includes bug fixes, performance enhancements, and features
-not found in Bolt while preserving backwards compatibility with the Bolt API.
-
-Bolt is a pure Go key/value store inspired by [Howard Chu's][hyc_symas]
-[LMDB project][lmdb]. The goal of the project is to provide a simple,
-fast, and reliable database for projects that don't require a full database
-server such as Postgres or MySQL.
-
-Since Bolt is meant to be used as such a low-level piece of functionality,
-simplicity is key. The API will be small and only focus on getting values
-and setting values. That's it.
-
-[gh_ben]: https://github.com/benbjohnson
-[bolt]: https://github.com/boltdb/bolt
-[hyc_symas]: https://twitter.com/hyc_symas
-[lmdb]: http://symas.com/mdb/
-
-## Project Status
-
-Bolt is stable, the API is fixed, and the file format is fixed. Full unit
-test coverage and randomized black box testing are used to ensure database
-consistency and thread safety. Bolt is currently used in high-load production
-environments serving databases as large as 1TB. Many companies such as
-Shopify and Heroku use Bolt-backed services every day.
-
-## Project versioning
-
-bbolt uses [semantic versioning](http://semver.org).
-API should not change between patch and minor releases.
-New minor versions may add additional features to the API.
-
-## Table of Contents
-
- - [Getting Started](#getting-started)
- - [Installing](#installing)
- - [Opening a database](#opening-a-database)
- - [Transactions](#transactions)
- - [Read-write transactions](#read-write-transactions)
- - [Read-only transactions](#read-only-transactions)
- - [Batch read-write transactions](#batch-read-write-transactions)
- - [Managing transactions manually](#managing-transactions-manually)
- - [Using buckets](#using-buckets)
- - [Using key/value pairs](#using-keyvalue-pairs)
- - [Autoincrementing integer for the bucket](#autoincrementing-integer-for-the-bucket)
- - [Iterating over keys](#iterating-over-keys)
- - [Prefix scans](#prefix-scans)
- - [Range scans](#range-scans)
- - [ForEach()](#foreach)
- - [Nested buckets](#nested-buckets)
- - [Database backups](#database-backups)
- - [Statistics](#statistics)
- - [Read-Only Mode](#read-only-mode)
- - [Mobile Use (iOS/Android)](#mobile-use-iosandroid)
- - [Resources](#resources)
- - [Comparison with other databases](#comparison-with-other-databases)
- - [Postgres, MySQL, & other relational databases](#postgres-mysql--other-relational-databases)
- - [LevelDB, RocksDB](#leveldb-rocksdb)
- - [LMDB](#lmdb)
- - [Caveats & Limitations](#caveats--limitations)
- - [Reading the Source](#reading-the-source)
- - [Other Projects Using Bolt](#other-projects-using-bolt)
-
-## Getting Started
-
-### Installing
-
-To start using Bolt, install Go and run `go get`:
-
-```sh
-$ go get go.etcd.io/bbolt/...
-```
-
-This will retrieve the library and install the `bolt` command line utility into
-your `$GOBIN` path.
-
-
-### Importing bbolt
-
-To use bbolt as an embedded key-value store, import as:
-
-```go
-import bolt "go.etcd.io/bbolt"
-
-db, err := bolt.Open(path, 0666, nil)
-if err != nil {
- return err
-}
-defer db.Close()
-```
-
-
-### Opening a database
-
-The top-level object in Bolt is a `DB`. It is represented as a single file on
-your disk and represents a consistent snapshot of your data.
-
-To open your database, simply use the `bolt.Open()` function:
-
-```go
-package main
-
-import (
- "log"
-
- bolt "go.etcd.io/bbolt"
-)
-
-func main() {
- // Open the my.db data file in your current directory.
- // It will be created if it doesn't exist.
- db, err := bolt.Open("my.db", 0600, nil)
- if err != nil {
- log.Fatal(err)
- }
- defer db.Close()
-
- ...
-}
-```
-
-Please note that Bolt obtains a file lock on the data file so multiple processes
-cannot open the same database at the same time. Opening an already open Bolt
-database will cause it to hang until the other process closes it. To prevent
-an indefinite wait you can pass a timeout option to the `Open()` function:
-
-```go
-db, err := bolt.Open("my.db", 0600, &bolt.Options{Timeout: 1 * time.Second})
-```
-
-
-### Transactions
-
-Bolt allows only one read-write transaction at a time but allows as many
-read-only transactions as you want at a time. Each transaction has a consistent
-view of the data as it existed when the transaction started.
-
-Individual transactions and all objects created from them (e.g. buckets, keys)
-are not thread safe. To work with data in multiple goroutines you must start
-a transaction for each one or use locking to ensure only one goroutine accesses
-a transaction at a time. Creating transaction from the `DB` is thread safe.
-
-Transactions should not depend on one another and generally shouldn't be opened
-simultaneously in the same goroutine. This can cause a deadlock as the read-write
-transaction needs to periodically re-map the data file but it cannot do so while
-any read-only transaction is open. Even a nested read-only transaction can cause
-a deadlock, as the child transaction can block the parent transaction from releasing
-its resources.
-
-#### Read-write transactions
-
-To start a read-write transaction, you can use the `DB.Update()` function:
-
-```go
-err := db.Update(func(tx *bolt.Tx) error {
- ...
- return nil
-})
-```
-
-Inside the closure, you have a consistent view of the database. You commit the
-transaction by returning `nil` at the end. You can also rollback the transaction
-at any point by returning an error. All database operations are allowed inside
-a read-write transaction.
-
-Always check the return error as it will report any disk failures that can cause
-your transaction to not complete. If you return an error within your closure
-it will be passed through.
-
-
-#### Read-only transactions
-
-To start a read-only transaction, you can use the `DB.View()` function:
-
-```go
-err := db.View(func(tx *bolt.Tx) error {
- ...
- return nil
-})
-```
-
-You also get a consistent view of the database within this closure, however,
-no mutating operations are allowed within a read-only transaction. You can only
-retrieve buckets, retrieve values, and copy the database within a read-only
-transaction.
-
-
-#### Batch read-write transactions
-
-Each `DB.Update()` waits for disk to commit the writes. This overhead
-can be minimized by combining multiple updates with the `DB.Batch()`
-function:
-
-```go
-err := db.Batch(func(tx *bolt.Tx) error {
- ...
- return nil
-})
-```
-
-Concurrent Batch calls are opportunistically combined into larger
-transactions. Batch is only useful when there are multiple goroutines
-calling it.
-
-The trade-off is that `Batch` can call the given
-function multiple times, if parts of the transaction fail. The
-function must be idempotent and side effects must take effect only
-after a successful return from `DB.Batch()`.
-
-For example: don't display messages from inside the function, instead
-set variables in the enclosing scope:
-
-```go
-var id uint64
-err := db.Batch(func(tx *bolt.Tx) error {
- // Find last key in bucket, decode as bigendian uint64, increment
- // by one, encode back to []byte, and add new key.
- ...
- id = newValue
- return nil
-})
-if err != nil {
- return ...
-}
-fmt.Println("Allocated ID %d", id)
-```
-
-
-#### Managing transactions manually
-
-The `DB.View()` and `DB.Update()` functions are wrappers around the `DB.Begin()`
-function. These helper functions will start the transaction, execute a function,
-and then safely close your transaction if an error is returned. This is the
-recommended way to use Bolt transactions.
-
-However, sometimes you may want to manually start and end your transactions.
-You can use the `DB.Begin()` function directly but **please** be sure to close
-the transaction.
-
-```go
-// Start a writable transaction.
-tx, err := db.Begin(true)
-if err != nil {
- return err
-}
-defer tx.Rollback()
-
-// Use the transaction...
-_, err := tx.CreateBucket([]byte("MyBucket"))
-if err != nil {
- return err
-}
-
-// Commit the transaction and check for error.
-if err := tx.Commit(); err != nil {
- return err
-}
-```
-
-The first argument to `DB.Begin()` is a boolean stating if the transaction
-should be writable.
-
-
-### Using buckets
-
-Buckets are collections of key/value pairs within the database. All keys in a
-bucket must be unique. You can create a bucket using the `Tx.CreateBucket()`
-function:
-
-```go
-db.Update(func(tx *bolt.Tx) error {
- b, err := tx.CreateBucket([]byte("MyBucket"))
- if err != nil {
- return fmt.Errorf("create bucket: %s", err)
- }
- return nil
-})
-```
-
-You can also create a bucket only if it doesn't exist by using the
-`Tx.CreateBucketIfNotExists()` function. It's a common pattern to call this
-function for all your top-level buckets after you open your database so you can
-guarantee that they exist for future transactions.
-
-To delete a bucket, simply call the `Tx.DeleteBucket()` function.
-
-
-### Using key/value pairs
-
-To save a key/value pair to a bucket, use the `Bucket.Put()` function:
-
-```go
-db.Update(func(tx *bolt.Tx) error {
- b := tx.Bucket([]byte("MyBucket"))
- err := b.Put([]byte("answer"), []byte("42"))
- return err
-})
-```
-
-This will set the value of the `"answer"` key to `"42"` in the `MyBucket`
-bucket. To retrieve this value, we can use the `Bucket.Get()` function:
-
-```go
-db.View(func(tx *bolt.Tx) error {
- b := tx.Bucket([]byte("MyBucket"))
- v := b.Get([]byte("answer"))
- fmt.Printf("The answer is: %s\n", v)
- return nil
-})
-```
-
-The `Get()` function does not return an error because its operation is
-guaranteed to work (unless there is some kind of system failure). If the key
-exists then it will return its byte slice value. If it doesn't exist then it
-will return `nil`. It's important to note that you can have a zero-length value
-set to a key which is different than the key not existing.
-
-Use the `Bucket.Delete()` function to delete a key from the bucket.
-
-Please note that values returned from `Get()` are only valid while the
-transaction is open. If you need to use a value outside of the transaction
-then you must use `copy()` to copy it to another byte slice.
-
-
-### Autoincrementing integer for the bucket
-By using the `NextSequence()` function, you can let Bolt determine a sequence
-which can be used as the unique identifier for your key/value pairs. See the
-example below.
-
-```go
-// CreateUser saves u to the store. The new user ID is set on u once the data is persisted.
-func (s *Store) CreateUser(u *User) error {
- return s.db.Update(func(tx *bolt.Tx) error {
- // Retrieve the users bucket.
- // This should be created when the DB is first opened.
- b := tx.Bucket([]byte("users"))
-
- // Generate ID for the user.
- // This returns an error only if the Tx is closed or not writeable.
- // That can't happen in an Update() call so I ignore the error check.
- id, _ := b.NextSequence()
- u.ID = int(id)
-
- // Marshal user data into bytes.
- buf, err := json.Marshal(u)
- if err != nil {
- return err
- }
-
- // Persist bytes to users bucket.
- return b.Put(itob(u.ID), buf)
- })
-}
-
-// itob returns an 8-byte big endian representation of v.
-func itob(v int) []byte {
- b := make([]byte, 8)
- binary.BigEndian.PutUint64(b, uint64(v))
- return b
-}
-
-type User struct {
- ID int
- ...
-}
-```
-
-### Iterating over keys
-
-Bolt stores its keys in byte-sorted order within a bucket. This makes sequential
-iteration over these keys extremely fast. To iterate over keys we'll use a
-`Cursor`:
-
-```go
-db.View(func(tx *bolt.Tx) error {
- // Assume bucket exists and has keys
- b := tx.Bucket([]byte("MyBucket"))
-
- c := b.Cursor()
-
- for k, v := c.First(); k != nil; k, v = c.Next() {
- fmt.Printf("key=%s, value=%s\n", k, v)
- }
-
- return nil
-})
-```
-
-The cursor allows you to move to a specific point in the list of keys and move
-forward or backward through the keys one at a time.
-
-The following functions are available on the cursor:
-
-```
-First() Move to the first key.
-Last() Move to the last key.
-Seek() Move to a specific key.
-Next() Move to the next key.
-Prev() Move to the previous key.
-```
-
-Each of those functions has a return signature of `(key []byte, value []byte)`.
-When you have iterated to the end of the cursor then `Next()` will return a
-`nil` key. You must seek to a position using `First()`, `Last()`, or `Seek()`
-before calling `Next()` or `Prev()`. If you do not seek to a position then
-these functions will return a `nil` key.
-
-During iteration, if the key is non-`nil` but the value is `nil`, that means
-the key refers to a bucket rather than a value. Use `Bucket.Bucket()` to
-access the sub-bucket.
-
-
-#### Prefix scans
-
-To iterate over a key prefix, you can combine `Seek()` and `bytes.HasPrefix()`:
-
-```go
-db.View(func(tx *bolt.Tx) error {
- // Assume bucket exists and has keys
- c := tx.Bucket([]byte("MyBucket")).Cursor()
-
- prefix := []byte("1234")
- for k, v := c.Seek(prefix); k != nil && bytes.HasPrefix(k, prefix); k, v = c.Next() {
- fmt.Printf("key=%s, value=%s\n", k, v)
- }
-
- return nil
-})
-```
-
-#### Range scans
-
-Another common use case is scanning over a range such as a time range. If you
-use a sortable time encoding such as RFC3339 then you can query a specific
-date range like this:
-
-```go
-db.View(func(tx *bolt.Tx) error {
- // Assume our events bucket exists and has RFC3339 encoded time keys.
- c := tx.Bucket([]byte("Events")).Cursor()
-
- // Our time range spans the 90's decade.
- min := []byte("1990-01-01T00:00:00Z")
- max := []byte("2000-01-01T00:00:00Z")
-
- // Iterate over the 90's.
- for k, v := c.Seek(min); k != nil && bytes.Compare(k, max) <= 0; k, v = c.Next() {
- fmt.Printf("%s: %s\n", k, v)
- }
-
- return nil
-})
-```
-
-Note that, while RFC3339 is sortable, the Golang implementation of RFC3339Nano does not use a fixed number of digits after the decimal point and is therefore not sortable.
-
-
-#### ForEach()
-
-You can also use the function `ForEach()` if you know you'll be iterating over
-all the keys in a bucket:
-
-```go
-db.View(func(tx *bolt.Tx) error {
- // Assume bucket exists and has keys
- b := tx.Bucket([]byte("MyBucket"))
-
- b.ForEach(func(k, v []byte) error {
- fmt.Printf("key=%s, value=%s\n", k, v)
- return nil
- })
- return nil
-})
-```
-
-Please note that keys and values in `ForEach()` are only valid while
-the transaction is open. If you need to use a key or value outside of
-the transaction, you must use `copy()` to copy it to another byte
-slice.
-
-### Nested buckets
-
-You can also store a bucket in a key to create nested buckets. The API is the
-same as the bucket management API on the `DB` object:
-
-```go
-func (*Bucket) CreateBucket(key []byte) (*Bucket, error)
-func (*Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error)
-func (*Bucket) DeleteBucket(key []byte) error
-```
-
-Say you had a multi-tenant application where the root level bucket was the account bucket. Inside of this bucket was a sequence of accounts which themselves are buckets. And inside the sequence bucket you could have many buckets pertaining to the Account itself (Users, Notes, etc) isolating the information into logical groupings.
-
-```go
-
-// createUser creates a new user in the given account.
-func createUser(accountID int, u *User) error {
- // Start the transaction.
- tx, err := db.Begin(true)
- if err != nil {
- return err
- }
- defer tx.Rollback()
-
- // Retrieve the root bucket for the account.
- // Assume this has already been created when the account was set up.
- root := tx.Bucket([]byte(strconv.FormatUint(accountID, 10)))
-
- // Setup the users bucket.
- bkt, err := root.CreateBucketIfNotExists([]byte("USERS"))
- if err != nil {
- return err
- }
-
- // Generate an ID for the new user.
- userID, err := bkt.NextSequence()
- if err != nil {
- return err
- }
- u.ID = userID
-
- // Marshal and save the encoded user.
- if buf, err := json.Marshal(u); err != nil {
- return err
- } else if err := bkt.Put([]byte(strconv.FormatUint(u.ID, 10)), buf); err != nil {
- return err
- }
-
- // Commit the transaction.
- if err := tx.Commit(); err != nil {
- return err
- }
-
- return nil
-}
-
-```
-
-
-
-
-### Database backups
-
-Bolt is a single file so it's easy to backup. You can use the `Tx.WriteTo()`
-function to write a consistent view of the database to a writer. If you call
-this from a read-only transaction, it will perform a hot backup and not block
-your other database reads and writes.
-
-By default, it will use a regular file handle which will utilize the operating
-system's page cache. See the [`Tx`](https://godoc.org/go.etcd.io/bbolt#Tx)
-documentation for information about optimizing for larger-than-RAM datasets.
-
-One common use case is to backup over HTTP so you can use tools like `cURL` to
-do database backups:
-
-```go
-func BackupHandleFunc(w http.ResponseWriter, req *http.Request) {
- err := db.View(func(tx *bolt.Tx) error {
- w.Header().Set("Content-Type", "application/octet-stream")
- w.Header().Set("Content-Disposition", `attachment; filename="my.db"`)
- w.Header().Set("Content-Length", strconv.Itoa(int(tx.Size())))
- _, err := tx.WriteTo(w)
- return err
- })
- if err != nil {
- http.Error(w, err.Error(), http.StatusInternalServerError)
- }
-}
-```
-
-Then you can backup using this command:
-
-```sh
-$ curl http://localhost/backup > my.db
-```
-
-Or you can open your browser to `http://localhost/backup` and it will download
-automatically.
-
-If you want to backup to another file you can use the `Tx.CopyFile()` helper
-function.
-
-
-### Statistics
-
-The database keeps a running count of many of the internal operations it
-performs so you can better understand what's going on. By grabbing a snapshot
-of these stats at two points in time we can see what operations were performed
-in that time range.
-
-For example, we could start a goroutine to log stats every 10 seconds:
-
-```go
-go func() {
- // Grab the initial stats.
- prev := db.Stats()
-
- for {
- // Wait for 10s.
- time.Sleep(10 * time.Second)
-
- // Grab the current stats and diff them.
- stats := db.Stats()
- diff := stats.Sub(&prev)
-
- // Encode stats to JSON and print to STDERR.
- json.NewEncoder(os.Stderr).Encode(diff)
-
- // Save stats for the next loop.
- prev = stats
- }
-}()
-```
-
-It's also useful to pipe these stats to a service such as statsd for monitoring
-or to provide an HTTP endpoint that will perform a fixed-length sample.
-
-
-### Read-Only Mode
-
-Sometimes it is useful to create a shared, read-only Bolt database. To this,
-set the `Options.ReadOnly` flag when opening your database. Read-only mode
-uses a shared lock to allow multiple processes to read from the database but
-it will block any processes from opening the database in read-write mode.
-
-```go
-db, err := bolt.Open("my.db", 0666, &bolt.Options{ReadOnly: true})
-if err != nil {
- log.Fatal(err)
-}
-```
-
-### Mobile Use (iOS/Android)
-
-Bolt is able to run on mobile devices by leveraging the binding feature of the
-[gomobile](https://github.com/golang/mobile) tool. Create a struct that will
-contain your database logic and a reference to a `*bolt.DB` with a initializing
-constructor that takes in a filepath where the database file will be stored.
-Neither Android nor iOS require extra permissions or cleanup from using this method.
-
-```go
-func NewBoltDB(filepath string) *BoltDB {
- db, err := bolt.Open(filepath+"/demo.db", 0600, nil)
- if err != nil {
- log.Fatal(err)
- }
-
- return &BoltDB{db}
-}
-
-type BoltDB struct {
- db *bolt.DB
- ...
-}
-
-func (b *BoltDB) Path() string {
- return b.db.Path()
-}
-
-func (b *BoltDB) Close() {
- b.db.Close()
-}
-```
-
-Database logic should be defined as methods on this wrapper struct.
-
-To initialize this struct from the native language (both platforms now sync
-their local storage to the cloud. These snippets disable that functionality for the
-database file):
-
-#### Android
-
-```java
-String path;
-if (android.os.Build.VERSION.SDK_INT >=android.os.Build.VERSION_CODES.LOLLIPOP){
- path = getNoBackupFilesDir().getAbsolutePath();
-} else{
- path = getFilesDir().getAbsolutePath();
-}
-Boltmobiledemo.BoltDB boltDB = Boltmobiledemo.NewBoltDB(path)
-```
-
-#### iOS
-
-```objc
-- (void)demo {
- NSString* path = [NSSearchPathForDirectoriesInDomains(NSLibraryDirectory,
- NSUserDomainMask,
- YES) objectAtIndex:0];
- GoBoltmobiledemoBoltDB * demo = GoBoltmobiledemoNewBoltDB(path);
- [self addSkipBackupAttributeToItemAtPath:demo.path];
- //Some DB Logic would go here
- [demo close];
-}
-
-- (BOOL)addSkipBackupAttributeToItemAtPath:(NSString *) filePathString
-{
- NSURL* URL= [NSURL fileURLWithPath: filePathString];
- assert([[NSFileManager defaultManager] fileExistsAtPath: [URL path]]);
-
- NSError *error = nil;
- BOOL success = [URL setResourceValue: [NSNumber numberWithBool: YES]
- forKey: NSURLIsExcludedFromBackupKey error: &error];
- if(!success){
- NSLog(@"Error excluding %@ from backup %@", [URL lastPathComponent], error);
- }
- return success;
-}
-
-```
-
-## Resources
-
-For more information on getting started with Bolt, check out the following articles:
-
-* [Intro to BoltDB: Painless Performant Persistence](http://npf.io/2014/07/intro-to-boltdb-painless-performant-persistence/) by [Nate Finch](https://github.com/natefinch).
-* [Bolt -- an embedded key/value database for Go](https://www.progville.com/go/bolt-embedded-db-golang/) by Progville
-
-
-## Comparison with other databases
-
-### Postgres, MySQL, & other relational databases
-
-Relational databases structure data into rows and are only accessible through
-the use of SQL. This approach provides flexibility in how you store and query
-your data but also incurs overhead in parsing and planning SQL statements. Bolt
-accesses all data by a byte slice key. This makes Bolt fast to read and write
-data by key but provides no built-in support for joining values together.
-
-Most relational databases (with the exception of SQLite) are standalone servers
-that run separately from your application. This gives your systems
-flexibility to connect multiple application servers to a single database
-server but also adds overhead in serializing and transporting data over the
-network. Bolt runs as a library included in your application so all data access
-has to go through your application's process. This brings data closer to your
-application but limits multi-process access to the data.
-
-
-### LevelDB, RocksDB
-
-LevelDB and its derivatives (RocksDB, HyperLevelDB) are similar to Bolt in that
-they are libraries bundled into the application, however, their underlying
-structure is a log-structured merge-tree (LSM tree). An LSM tree optimizes
-random writes by using a write ahead log and multi-tiered, sorted files called
-SSTables. Bolt uses a B+tree internally and only a single file. Both approaches
-have trade-offs.
-
-If you require a high random write throughput (>10,000 w/sec) or you need to use
-spinning disks then LevelDB could be a good choice. If your application is
-read-heavy or does a lot of range scans then Bolt could be a good choice.
-
-One other important consideration is that LevelDB does not have transactions.
-It supports batch writing of key/values pairs and it supports read snapshots
-but it will not give you the ability to do a compare-and-swap operation safely.
-Bolt supports fully serializable ACID transactions.
-
-
-### LMDB
-
-Bolt was originally a port of LMDB so it is architecturally similar. Both use
-a B+tree, have ACID semantics with fully serializable transactions, and support
-lock-free MVCC using a single writer and multiple readers.
-
-The two projects have somewhat diverged. LMDB heavily focuses on raw performance
-while Bolt has focused on simplicity and ease of use. For example, LMDB allows
-several unsafe actions such as direct writes for the sake of performance. Bolt
-opts to disallow actions which can leave the database in a corrupted state. The
-only exception to this in Bolt is `DB.NoSync`.
-
-There are also a few differences in API. LMDB requires a maximum mmap size when
-opening an `mdb_env` whereas Bolt will handle incremental mmap resizing
-automatically. LMDB overloads the getter and setter functions with multiple
-flags whereas Bolt splits these specialized cases into their own functions.
-
-
-## Caveats & Limitations
-
-It's important to pick the right tool for the job and Bolt is no exception.
-Here are a few things to note when evaluating and using Bolt:
-
-* Bolt is good for read intensive workloads. Sequential write performance is
- also fast but random writes can be slow. You can use `DB.Batch()` or add a
- write-ahead log to help mitigate this issue.
-
-* Bolt uses a B+tree internally so there can be a lot of random page access.
- SSDs provide a significant performance boost over spinning disks.
-
-* Try to avoid long running read transactions. Bolt uses copy-on-write so
- old pages cannot be reclaimed while an old transaction is using them.
-
-* Byte slices returned from Bolt are only valid during a transaction. Once the
- transaction has been committed or rolled back then the memory they point to
- can be reused by a new page or can be unmapped from virtual memory and you'll
- see an `unexpected fault address` panic when accessing it.
-
-* Bolt uses an exclusive write lock on the database file so it cannot be
- shared by multiple processes.
-
-* Be careful when using `Bucket.FillPercent`. Setting a high fill percent for
- buckets that have random inserts will cause your database to have very poor
- page utilization.
-
-* Use larger buckets in general. Smaller buckets causes poor page utilization
- once they become larger than the page size (typically 4KB).
-
-* Bulk loading a lot of random writes into a new bucket can be slow as the
- page will not split until the transaction is committed. Randomly inserting
- more than 100,000 key/value pairs into a single new bucket in a single
- transaction is not advised.
-
-* Bolt uses a memory-mapped file so the underlying operating system handles the
- caching of the data. Typically, the OS will cache as much of the file as it
- can in memory and will release memory as needed to other processes. This means
- that Bolt can show very high memory usage when working with large databases.
- However, this is expected and the OS will release memory as needed. Bolt can
- handle databases much larger than the available physical RAM, provided its
- memory-map fits in the process virtual address space. It may be problematic
- on 32-bits systems.
-
-* The data structures in the Bolt database are memory mapped so the data file
- will be endian specific. This means that you cannot copy a Bolt file from a
- little endian machine to a big endian machine and have it work. For most
- users this is not a concern since most modern CPUs are little endian.
-
-* Because of the way pages are laid out on disk, Bolt cannot truncate data files
- and return free pages back to the disk. Instead, Bolt maintains a free list
- of unused pages within its data file. These free pages can be reused by later
- transactions. This works well for many use cases as databases generally tend
- to grow. However, it's important to note that deleting large chunks of data
- will not allow you to reclaim that space on disk.
-
- For more information on page allocation, [see this comment][page-allocation].
-
-[page-allocation]: https://github.com/boltdb/bolt/issues/308#issuecomment-74811638
-
-
-## Reading the Source
-
-Bolt is a relatively small code base (<5KLOC) for an embedded, serializable,
-transactional key/value database so it can be a good starting point for people
-interested in how databases work.
-
-The best places to start are the main entry points into Bolt:
-
-- `Open()` - Initializes the reference to the database. It's responsible for
- creating the database if it doesn't exist, obtaining an exclusive lock on the
- file, reading the meta pages, & memory-mapping the file.
-
-- `DB.Begin()` - Starts a read-only or read-write transaction depending on the
- value of the `writable` argument. This requires briefly obtaining the "meta"
- lock to keep track of open transactions. Only one read-write transaction can
- exist at a time so the "rwlock" is acquired during the life of a read-write
- transaction.
-
-- `Bucket.Put()` - Writes a key/value pair into a bucket. After validating the
- arguments, a cursor is used to traverse the B+tree to the page and position
- where they key & value will be written. Once the position is found, the bucket
- materializes the underlying page and the page's parent pages into memory as
- "nodes". These nodes are where mutations occur during read-write transactions.
- These changes get flushed to disk during commit.
-
-- `Bucket.Get()` - Retrieves a key/value pair from a bucket. This uses a cursor
- to move to the page & position of a key/value pair. During a read-only
- transaction, the key and value data is returned as a direct reference to the
- underlying mmap file so there's no allocation overhead. For read-write
- transactions, this data may reference the mmap file or one of the in-memory
- node values.
-
-- `Cursor` - This object is simply for traversing the B+tree of on-disk pages
- or in-memory nodes. It can seek to a specific key, move to the first or last
- value, or it can move forward or backward. The cursor handles the movement up
- and down the B+tree transparently to the end user.
-
-- `Tx.Commit()` - Converts the in-memory dirty nodes and the list of free pages
- into pages to be written to disk. Writing to disk then occurs in two phases.
- First, the dirty pages are written to disk and an `fsync()` occurs. Second, a
- new meta page with an incremented transaction ID is written and another
- `fsync()` occurs. This two phase write ensures that partially written data
- pages are ignored in the event of a crash since the meta page pointing to them
- is never written. Partially written meta pages are invalidated because they
- are written with a checksum.
-
-If you have additional notes that could be helpful for others, please submit
-them via pull request.
-
-
-## Other Projects Using Bolt
-
-Below is a list of public, open source projects that use Bolt:
-
-* [Algernon](https://github.com/xyproto/algernon) - A HTTP/2 web server with built-in support for Lua. Uses BoltDB as the default database backend.
-* [Bazil](https://bazil.org/) - A file system that lets your data reside where it is most convenient for it to reside.
-* [bolter](https://github.com/hasit/bolter) - Command-line app for viewing BoltDB file in your terminal.
-* [boltcli](https://github.com/spacewander/boltcli) - the redis-cli for boltdb with Lua script support.
-* [BoltHold](https://github.com/timshannon/bolthold) - An embeddable NoSQL store for Go types built on BoltDB
-* [BoltStore](https://github.com/yosssi/boltstore) - Session store using Bolt.
-* [Boltdb Boilerplate](https://github.com/bobintornado/boltdb-boilerplate) - Boilerplate wrapper around bolt aiming to make simple calls one-liners.
-* [BoltDbWeb](https://github.com/evnix/boltdbweb) - A web based GUI for BoltDB files.
-* [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend.
-* [btcwallet](https://github.com/btcsuite/btcwallet) - A bitcoin wallet.
-* [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining
- simple tx and key scans.
-* [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend.
-* [ChainStore](https://github.com/pressly/chainstore) - Simple key-value interface to a variety of storage engines organized as a chain of operations.
-* [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware.
-* [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional storage engine and testing it against Basho-tuned leveldb.
-* [dcrwallet](https://github.com/decred/dcrwallet) - A wallet for the Decred cryptocurrency.
-* [drive](https://github.com/odeke-em/drive) - drive is an unofficial Google Drive command line client for \*NIX operating systems.
-* [event-shuttle](https://github.com/sclasen/event-shuttle) - A Unix system service to collect and reliably deliver messages to Kafka.
-* [Freehold](http://tshannon.bitbucket.org/freehold/) - An open, secure, and lightweight platform for your files and data.
-* [Go Report Card](https://goreportcard.com/) - Go code quality report cards as a (free and open source) service.
-* [GoWebApp](https://github.com/josephspurrier/gowebapp) - A basic MVC web application in Go using BoltDB.
-* [GoShort](https://github.com/pankajkhairnar/goShort) - GoShort is a URL shortener written in Golang and BoltDB for persistent key/value storage and for routing it's using high performent HTTPRouter.
-* [gopherpit](https://github.com/gopherpit/gopherpit) - A web service to manage Go remote import paths with custom domains
-* [gokv](https://github.com/philippgille/gokv) - Simple key-value store abstraction and implementations for Go (Redis, Consul, etcd, bbolt, BadgerDB, LevelDB, Memcached, DynamoDB, S3, PostgreSQL, MongoDB, CockroachDB and many more)
-* [Gitchain](https://github.com/gitchain/gitchain) - Decentralized, peer-to-peer Git repositories aka "Git meets Bitcoin".
-* [InfluxDB](https://influxdata.com) - Scalable datastore for metrics, events, and real-time analytics.
-* [ipLocator](https://github.com/AndreasBriese/ipLocator) - A fast ip-geo-location-server using bolt with bloom filters.
-* [ipxed](https://github.com/kelseyhightower/ipxed) - Web interface and api for ipxed.
-* [Ironsmith](https://github.com/timshannon/ironsmith) - A simple, script-driven continuous integration (build - > test -> release) tool, with no external dependencies
-* [Kala](https://github.com/ajvb/kala) - Kala is a modern job scheduler optimized to run on a single node. It is persistent, JSON over HTTP API, ISO 8601 duration notation, and dependent jobs.
-* [Key Value Access Langusge (KVAL)](https://github.com/kval-access-language) - A proposed grammar for key-value datastores offering a bbolt binding.
-* [LedisDB](https://github.com/siddontang/ledisdb) - A high performance NoSQL, using Bolt as optional storage.
-* [lru](https://github.com/crowdriff/lru) - Easy to use Bolt-backed Least-Recently-Used (LRU) read-through cache with chainable remote stores.
-* [mbuckets](https://github.com/abhigupta912/mbuckets) - A Bolt wrapper that allows easy operations on multi level (nested) buckets.
-* [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite.
-* [MuLiFS](https://github.com/dankomiocevic/mulifs) - Music Library Filesystem creates a filesystem to organise your music files.
-* [NATS](https://github.com/nats-io/nats-streaming-server) - NATS Streaming uses bbolt for message and metadata storage.
-* [Operation Go: A Routine Mission](http://gocode.io) - An online programming game for Golang using Bolt for user accounts and a leaderboard.
-* [photosite/session](https://godoc.org/bitbucket.org/kardianos/photosite/session) - Sessions for a photo viewing site.
-* [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system.
-* [reef-pi](https://github.com/reef-pi/reef-pi) - reef-pi is an award winning, modular, DIY reef tank controller using easy to learn electronics based on a Raspberry Pi.
-* [Request Baskets](https://github.com/darklynx/request-baskets) - A web service to collect arbitrary HTTP requests and inspect them via REST API or simple web UI, similar to [RequestBin](http://requestb.in/) service
-* [Seaweed File System](https://github.com/chrislusf/seaweedfs) - Highly scalable distributed key~file system with O(1) disk read.
-* [stow](https://github.com/djherbis/stow) - a persistence manager for objects
- backed by boltdb.
-* [Storm](https://github.com/asdine/storm) - Simple and powerful ORM for BoltDB.
-* [SimpleBolt](https://github.com/xyproto/simplebolt) - A simple way to use BoltDB. Deals mainly with strings.
-* [Skybox Analytics](https://github.com/skybox/skybox) - A standalone funnel analysis tool for web analytics.
-* [Scuttlebutt](https://github.com/benbjohnson/scuttlebutt) - Uses Bolt to store and process all Twitter mentions of GitHub projects.
-* [tentacool](https://github.com/optiflows/tentacool) - REST api server to manage system stuff (IP, DNS, Gateway...) on a linux server.
-* [torrent](https://github.com/anacrolix/torrent) - Full-featured BitTorrent client package and utilities in Go. BoltDB is a storage backend in development.
-* [Wiki](https://github.com/peterhellberg/wiki) - A tiny wiki using Goji, BoltDB and Blackfriday.
-
-If you are using Bolt in a project please send a pull request to add it to the list.
diff --git a/vendor/go.etcd.io/bbolt/bolt_386.go b/vendor/go.etcd.io/bbolt/bolt_386.go
deleted file mode 100644
index aee25960..00000000
--- a/vendor/go.etcd.io/bbolt/bolt_386.go
+++ /dev/null
@@ -1,7 +0,0 @@
-package bbolt
-
-// maxMapSize represents the largest mmap size supported by Bolt.
-const maxMapSize = 0x7FFFFFFF // 2GB
-
-// maxAllocSize is the size used when creating array pointers.
-const maxAllocSize = 0xFFFFFFF
diff --git a/vendor/go.etcd.io/bbolt/bolt_amd64.go b/vendor/go.etcd.io/bbolt/bolt_amd64.go
deleted file mode 100644
index 5dd8f3f2..00000000
--- a/vendor/go.etcd.io/bbolt/bolt_amd64.go
+++ /dev/null
@@ -1,7 +0,0 @@
-package bbolt
-
-// maxMapSize represents the largest mmap size supported by Bolt.
-const maxMapSize = 0xFFFFFFFFFFFF // 256TB
-
-// maxAllocSize is the size used when creating array pointers.
-const maxAllocSize = 0x7FFFFFFF
diff --git a/vendor/go.etcd.io/bbolt/bolt_arm.go b/vendor/go.etcd.io/bbolt/bolt_arm.go
deleted file mode 100644
index aee25960..00000000
--- a/vendor/go.etcd.io/bbolt/bolt_arm.go
+++ /dev/null
@@ -1,7 +0,0 @@
-package bbolt
-
-// maxMapSize represents the largest mmap size supported by Bolt.
-const maxMapSize = 0x7FFFFFFF // 2GB
-
-// maxAllocSize is the size used when creating array pointers.
-const maxAllocSize = 0xFFFFFFF
diff --git a/vendor/go.etcd.io/bbolt/bolt_arm64.go b/vendor/go.etcd.io/bbolt/bolt_arm64.go
deleted file mode 100644
index 810dfd55..00000000
--- a/vendor/go.etcd.io/bbolt/bolt_arm64.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// +build arm64
-
-package bbolt
-
-// maxMapSize represents the largest mmap size supported by Bolt.
-const maxMapSize = 0xFFFFFFFFFFFF // 256TB
-
-// maxAllocSize is the size used when creating array pointers.
-const maxAllocSize = 0x7FFFFFFF
diff --git a/vendor/go.etcd.io/bbolt/bolt_linux.go b/vendor/go.etcd.io/bbolt/bolt_linux.go
deleted file mode 100644
index 7707bcac..00000000
--- a/vendor/go.etcd.io/bbolt/bolt_linux.go
+++ /dev/null
@@ -1,10 +0,0 @@
-package bbolt
-
-import (
- "syscall"
-)
-
-// fdatasync flushes written data to a file descriptor.
-func fdatasync(db *DB) error {
- return syscall.Fdatasync(int(db.file.Fd()))
-}
diff --git a/vendor/go.etcd.io/bbolt/bolt_mips64x.go b/vendor/go.etcd.io/bbolt/bolt_mips64x.go
deleted file mode 100644
index dd8ffe12..00000000
--- a/vendor/go.etcd.io/bbolt/bolt_mips64x.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// +build mips64 mips64le
-
-package bbolt
-
-// maxMapSize represents the largest mmap size supported by Bolt.
-const maxMapSize = 0x8000000000 // 512GB
-
-// maxAllocSize is the size used when creating array pointers.
-const maxAllocSize = 0x7FFFFFFF
diff --git a/vendor/go.etcd.io/bbolt/bolt_mipsx.go b/vendor/go.etcd.io/bbolt/bolt_mipsx.go
deleted file mode 100644
index a669703a..00000000
--- a/vendor/go.etcd.io/bbolt/bolt_mipsx.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// +build mips mipsle
-
-package bbolt
-
-// maxMapSize represents the largest mmap size supported by Bolt.
-const maxMapSize = 0x40000000 // 1GB
-
-// maxAllocSize is the size used when creating array pointers.
-const maxAllocSize = 0xFFFFFFF
diff --git a/vendor/go.etcd.io/bbolt/bolt_openbsd.go b/vendor/go.etcd.io/bbolt/bolt_openbsd.go
deleted file mode 100644
index d7f50358..00000000
--- a/vendor/go.etcd.io/bbolt/bolt_openbsd.go
+++ /dev/null
@@ -1,27 +0,0 @@
-package bbolt
-
-import (
- "syscall"
- "unsafe"
-)
-
-const (
- msAsync = 1 << iota // perform asynchronous writes
- msSync // perform synchronous writes
- msInvalidate // invalidate cached data
-)
-
-func msync(db *DB) error {
- _, _, errno := syscall.Syscall(syscall.SYS_MSYNC, uintptr(unsafe.Pointer(db.data)), uintptr(db.datasz), msInvalidate)
- if errno != 0 {
- return errno
- }
- return nil
-}
-
-func fdatasync(db *DB) error {
- if db.data != nil {
- return msync(db)
- }
- return db.file.Sync()
-}
diff --git a/vendor/go.etcd.io/bbolt/bolt_ppc.go b/vendor/go.etcd.io/bbolt/bolt_ppc.go
deleted file mode 100644
index 84e545ef..00000000
--- a/vendor/go.etcd.io/bbolt/bolt_ppc.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// +build ppc
-
-package bbolt
-
-// maxMapSize represents the largest mmap size supported by Bolt.
-const maxMapSize = 0x7FFFFFFF // 2GB
-
-// maxAllocSize is the size used when creating array pointers.
-const maxAllocSize = 0xFFFFFFF
diff --git a/vendor/go.etcd.io/bbolt/bolt_ppc64.go b/vendor/go.etcd.io/bbolt/bolt_ppc64.go
deleted file mode 100644
index a7612090..00000000
--- a/vendor/go.etcd.io/bbolt/bolt_ppc64.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// +build ppc64
-
-package bbolt
-
-// maxMapSize represents the largest mmap size supported by Bolt.
-const maxMapSize = 0xFFFFFFFFFFFF // 256TB
-
-// maxAllocSize is the size used when creating array pointers.
-const maxAllocSize = 0x7FFFFFFF
diff --git a/vendor/go.etcd.io/bbolt/bolt_ppc64le.go b/vendor/go.etcd.io/bbolt/bolt_ppc64le.go
deleted file mode 100644
index c830f2fc..00000000
--- a/vendor/go.etcd.io/bbolt/bolt_ppc64le.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// +build ppc64le
-
-package bbolt
-
-// maxMapSize represents the largest mmap size supported by Bolt.
-const maxMapSize = 0xFFFFFFFFFFFF // 256TB
-
-// maxAllocSize is the size used when creating array pointers.
-const maxAllocSize = 0x7FFFFFFF
diff --git a/vendor/go.etcd.io/bbolt/bolt_riscv64.go b/vendor/go.etcd.io/bbolt/bolt_riscv64.go
deleted file mode 100644
index c967613b..00000000
--- a/vendor/go.etcd.io/bbolt/bolt_riscv64.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// +build riscv64
-
-package bbolt
-
-// maxMapSize represents the largest mmap size supported by Bolt.
-const maxMapSize = 0xFFFFFFFFFFFF // 256TB
-
-// maxAllocSize is the size used when creating array pointers.
-const maxAllocSize = 0x7FFFFFFF
diff --git a/vendor/go.etcd.io/bbolt/bolt_s390x.go b/vendor/go.etcd.io/bbolt/bolt_s390x.go
deleted file mode 100644
index ff2a5609..00000000
--- a/vendor/go.etcd.io/bbolt/bolt_s390x.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// +build s390x
-
-package bbolt
-
-// maxMapSize represents the largest mmap size supported by Bolt.
-const maxMapSize = 0xFFFFFFFFFFFF // 256TB
-
-// maxAllocSize is the size used when creating array pointers.
-const maxAllocSize = 0x7FFFFFFF
diff --git a/vendor/go.etcd.io/bbolt/bolt_unix.go b/vendor/go.etcd.io/bbolt/bolt_unix.go
deleted file mode 100644
index 2938fed5..00000000
--- a/vendor/go.etcd.io/bbolt/bolt_unix.go
+++ /dev/null
@@ -1,93 +0,0 @@
-// +build !windows,!plan9,!solaris,!aix
-
-package bbolt
-
-import (
- "fmt"
- "syscall"
- "time"
- "unsafe"
-)
-
-// flock acquires an advisory lock on a file descriptor.
-func flock(db *DB, exclusive bool, timeout time.Duration) error {
- var t time.Time
- if timeout != 0 {
- t = time.Now()
- }
- fd := db.file.Fd()
- flag := syscall.LOCK_NB
- if exclusive {
- flag |= syscall.LOCK_EX
- } else {
- flag |= syscall.LOCK_SH
- }
- for {
- // Attempt to obtain an exclusive lock.
- err := syscall.Flock(int(fd), flag)
- if err == nil {
- return nil
- } else if err != syscall.EWOULDBLOCK {
- return err
- }
-
- // If we timed out then return an error.
- if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout {
- return ErrTimeout
- }
-
- // Wait for a bit and try again.
- time.Sleep(flockRetryTimeout)
- }
-}
-
-// funlock releases an advisory lock on a file descriptor.
-func funlock(db *DB) error {
- return syscall.Flock(int(db.file.Fd()), syscall.LOCK_UN)
-}
-
-// mmap memory maps a DB's data file.
-func mmap(db *DB, sz int) error {
- // Map the data file to memory.
- b, err := syscall.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags)
- if err != nil {
- return err
- }
-
- // Advise the kernel that the mmap is accessed randomly.
- err = madvise(b, syscall.MADV_RANDOM)
- if err != nil && err != syscall.ENOSYS {
- // Ignore not implemented error in kernel because it still works.
- return fmt.Errorf("madvise: %s", err)
- }
-
- // Save the original byte slice and convert to a byte array pointer.
- db.dataref = b
- db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0]))
- db.datasz = sz
- return nil
-}
-
-// munmap unmaps a DB's data file from memory.
-func munmap(db *DB) error {
- // Ignore the unmap if we have no mapped data.
- if db.dataref == nil {
- return nil
- }
-
- // Unmap using the original byte slice.
- err := syscall.Munmap(db.dataref)
- db.dataref = nil
- db.data = nil
- db.datasz = 0
- return err
-}
-
-// NOTE: This function is copied from stdlib because it is not available on darwin.
-func madvise(b []byte, advice int) (err error) {
- _, _, e1 := syscall.Syscall(syscall.SYS_MADVISE, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), uintptr(advice))
- if e1 != 0 {
- err = e1
- }
- return
-}
diff --git a/vendor/go.etcd.io/bbolt/bolt_unix_aix.go b/vendor/go.etcd.io/bbolt/bolt_unix_aix.go
deleted file mode 100644
index a64c16f5..00000000
--- a/vendor/go.etcd.io/bbolt/bolt_unix_aix.go
+++ /dev/null
@@ -1,90 +0,0 @@
-// +build aix
-
-package bbolt
-
-import (
- "fmt"
- "syscall"
- "time"
- "unsafe"
-
- "golang.org/x/sys/unix"
-)
-
-// flock acquires an advisory lock on a file descriptor.
-func flock(db *DB, exclusive bool, timeout time.Duration) error {
- var t time.Time
- if timeout != 0 {
- t = time.Now()
- }
- fd := db.file.Fd()
- var lockType int16
- if exclusive {
- lockType = syscall.F_WRLCK
- } else {
- lockType = syscall.F_RDLCK
- }
- for {
- // Attempt to obtain an exclusive lock.
- lock := syscall.Flock_t{Type: lockType}
- err := syscall.FcntlFlock(fd, syscall.F_SETLK, &lock)
- if err == nil {
- return nil
- } else if err != syscall.EAGAIN {
- return err
- }
-
- // If we timed out then return an error.
- if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout {
- return ErrTimeout
- }
-
- // Wait for a bit and try again.
- time.Sleep(flockRetryTimeout)
- }
-}
-
-// funlock releases an advisory lock on a file descriptor.
-func funlock(db *DB) error {
- var lock syscall.Flock_t
- lock.Start = 0
- lock.Len = 0
- lock.Type = syscall.F_UNLCK
- lock.Whence = 0
- return syscall.FcntlFlock(uintptr(db.file.Fd()), syscall.F_SETLK, &lock)
-}
-
-// mmap memory maps a DB's data file.
-func mmap(db *DB, sz int) error {
- // Map the data file to memory.
- b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags)
- if err != nil {
- return err
- }
-
- // Advise the kernel that the mmap is accessed randomly.
- if err := unix.Madvise(b, syscall.MADV_RANDOM); err != nil {
- return fmt.Errorf("madvise: %s", err)
- }
-
- // Save the original byte slice and convert to a byte array pointer.
- db.dataref = b
- db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0]))
- db.datasz = sz
- return nil
-}
-
-// munmap unmaps a DB's data file from memory.
-func munmap(db *DB) error {
- // Ignore the unmap if we have no mapped data.
- if db.dataref == nil {
- return nil
- }
-
- // Unmap using the original byte slice.
- err := unix.Munmap(db.dataref)
- db.dataref = nil
- db.data = nil
- db.datasz = 0
- return err
-}
diff --git a/vendor/go.etcd.io/bbolt/bolt_unix_solaris.go b/vendor/go.etcd.io/bbolt/bolt_unix_solaris.go
deleted file mode 100644
index babad657..00000000
--- a/vendor/go.etcd.io/bbolt/bolt_unix_solaris.go
+++ /dev/null
@@ -1,88 +0,0 @@
-package bbolt
-
-import (
- "fmt"
- "syscall"
- "time"
- "unsafe"
-
- "golang.org/x/sys/unix"
-)
-
-// flock acquires an advisory lock on a file descriptor.
-func flock(db *DB, exclusive bool, timeout time.Duration) error {
- var t time.Time
- if timeout != 0 {
- t = time.Now()
- }
- fd := db.file.Fd()
- var lockType int16
- if exclusive {
- lockType = syscall.F_WRLCK
- } else {
- lockType = syscall.F_RDLCK
- }
- for {
- // Attempt to obtain an exclusive lock.
- lock := syscall.Flock_t{Type: lockType}
- err := syscall.FcntlFlock(fd, syscall.F_SETLK, &lock)
- if err == nil {
- return nil
- } else if err != syscall.EAGAIN {
- return err
- }
-
- // If we timed out then return an error.
- if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout {
- return ErrTimeout
- }
-
- // Wait for a bit and try again.
- time.Sleep(flockRetryTimeout)
- }
-}
-
-// funlock releases an advisory lock on a file descriptor.
-func funlock(db *DB) error {
- var lock syscall.Flock_t
- lock.Start = 0
- lock.Len = 0
- lock.Type = syscall.F_UNLCK
- lock.Whence = 0
- return syscall.FcntlFlock(uintptr(db.file.Fd()), syscall.F_SETLK, &lock)
-}
-
-// mmap memory maps a DB's data file.
-func mmap(db *DB, sz int) error {
- // Map the data file to memory.
- b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags)
- if err != nil {
- return err
- }
-
- // Advise the kernel that the mmap is accessed randomly.
- if err := unix.Madvise(b, syscall.MADV_RANDOM); err != nil {
- return fmt.Errorf("madvise: %s", err)
- }
-
- // Save the original byte slice and convert to a byte array pointer.
- db.dataref = b
- db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0]))
- db.datasz = sz
- return nil
-}
-
-// munmap unmaps a DB's data file from memory.
-func munmap(db *DB) error {
- // Ignore the unmap if we have no mapped data.
- if db.dataref == nil {
- return nil
- }
-
- // Unmap using the original byte slice.
- err := unix.Munmap(db.dataref)
- db.dataref = nil
- db.data = nil
- db.datasz = 0
- return err
-}
diff --git a/vendor/go.etcd.io/bbolt/bolt_windows.go b/vendor/go.etcd.io/bbolt/bolt_windows.go
deleted file mode 100644
index fca178bd..00000000
--- a/vendor/go.etcd.io/bbolt/bolt_windows.go
+++ /dev/null
@@ -1,141 +0,0 @@
-package bbolt
-
-import (
- "fmt"
- "os"
- "syscall"
- "time"
- "unsafe"
-)
-
-// LockFileEx code derived from golang build filemutex_windows.go @ v1.5.1
-var (
- modkernel32 = syscall.NewLazyDLL("kernel32.dll")
- procLockFileEx = modkernel32.NewProc("LockFileEx")
- procUnlockFileEx = modkernel32.NewProc("UnlockFileEx")
-)
-
-const (
- // see https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx
- flagLockExclusive = 2
- flagLockFailImmediately = 1
-
- // see https://msdn.microsoft.com/en-us/library/windows/desktop/ms681382(v=vs.85).aspx
- errLockViolation syscall.Errno = 0x21
-)
-
-func lockFileEx(h syscall.Handle, flags, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) {
- r, _, err := procLockFileEx.Call(uintptr(h), uintptr(flags), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)))
- if r == 0 {
- return err
- }
- return nil
-}
-
-func unlockFileEx(h syscall.Handle, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) {
- r, _, err := procUnlockFileEx.Call(uintptr(h), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)), 0)
- if r == 0 {
- return err
- }
- return nil
-}
-
-// fdatasync flushes written data to a file descriptor.
-func fdatasync(db *DB) error {
- return db.file.Sync()
-}
-
-// flock acquires an advisory lock on a file descriptor.
-func flock(db *DB, exclusive bool, timeout time.Duration) error {
- var t time.Time
- if timeout != 0 {
- t = time.Now()
- }
- var flag uint32 = flagLockFailImmediately
- if exclusive {
- flag |= flagLockExclusive
- }
- for {
- // Fix for https://github.com/etcd-io/bbolt/issues/121. Use byte-range
- // -1..0 as the lock on the database file.
- var m1 uint32 = (1 << 32) - 1 // -1 in a uint32
- err := lockFileEx(syscall.Handle(db.file.Fd()), flag, 0, 1, 0, &syscall.Overlapped{
- Offset: m1,
- OffsetHigh: m1,
- })
-
- if err == nil {
- return nil
- } else if err != errLockViolation {
- return err
- }
-
- // If we timed oumercit then return an error.
- if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout {
- return ErrTimeout
- }
-
- // Wait for a bit and try again.
- time.Sleep(flockRetryTimeout)
- }
-}
-
-// funlock releases an advisory lock on a file descriptor.
-func funlock(db *DB) error {
- var m1 uint32 = (1 << 32) - 1 // -1 in a uint32
- err := unlockFileEx(syscall.Handle(db.file.Fd()), 0, 1, 0, &syscall.Overlapped{
- Offset: m1,
- OffsetHigh: m1,
- })
- return err
-}
-
-// mmap memory maps a DB's data file.
-// Based on: https://github.com/edsrzf/mmap-go
-func mmap(db *DB, sz int) error {
- if !db.readOnly {
- // Truncate the database to the size of the mmap.
- if err := db.file.Truncate(int64(sz)); err != nil {
- return fmt.Errorf("truncate: %s", err)
- }
- }
-
- // Open a file mapping handle.
- sizelo := uint32(sz >> 32)
- sizehi := uint32(sz) & 0xffffffff
- h, errno := syscall.CreateFileMapping(syscall.Handle(db.file.Fd()), nil, syscall.PAGE_READONLY, sizelo, sizehi, nil)
- if h == 0 {
- return os.NewSyscallError("CreateFileMapping", errno)
- }
-
- // Create the memory map.
- addr, errno := syscall.MapViewOfFile(h, syscall.FILE_MAP_READ, 0, 0, uintptr(sz))
- if addr == 0 {
- return os.NewSyscallError("MapViewOfFile", errno)
- }
-
- // Close mapping handle.
- if err := syscall.CloseHandle(syscall.Handle(h)); err != nil {
- return os.NewSyscallError("CloseHandle", err)
- }
-
- // Convert to a byte array.
- db.data = ((*[maxMapSize]byte)(unsafe.Pointer(addr)))
- db.datasz = sz
-
- return nil
-}
-
-// munmap unmaps a pointer from a file.
-// Based on: https://github.com/edsrzf/mmap-go
-func munmap(db *DB) error {
- if db.data == nil {
- return nil
- }
-
- addr := (uintptr)(unsafe.Pointer(&db.data[0]))
- if err := syscall.UnmapViewOfFile(addr); err != nil {
- return os.NewSyscallError("UnmapViewOfFile", err)
- }
- return nil
-}
diff --git a/vendor/go.etcd.io/bbolt/boltsync_unix.go b/vendor/go.etcd.io/bbolt/boltsync_unix.go
deleted file mode 100644
index 9587afef..00000000
--- a/vendor/go.etcd.io/bbolt/boltsync_unix.go
+++ /dev/null
@@ -1,8 +0,0 @@
-// +build !windows,!plan9,!linux,!openbsd
-
-package bbolt
-
-// fdatasync flushes written data to a file descriptor.
-func fdatasync(db *DB) error {
- return db.file.Sync()
-}
diff --git a/vendor/go.etcd.io/bbolt/bucket.go b/vendor/go.etcd.io/bbolt/bucket.go
deleted file mode 100644
index d8750b14..00000000
--- a/vendor/go.etcd.io/bbolt/bucket.go
+++ /dev/null
@@ -1,777 +0,0 @@
-package bbolt
-
-import (
- "bytes"
- "fmt"
- "unsafe"
-)
-
-const (
- // MaxKeySize is the maximum length of a key, in bytes.
- MaxKeySize = 32768
-
- // MaxValueSize is the maximum length of a value, in bytes.
- MaxValueSize = (1 << 31) - 2
-)
-
-const bucketHeaderSize = int(unsafe.Sizeof(bucket{}))
-
-const (
- minFillPercent = 0.1
- maxFillPercent = 1.0
-)
-
-// DefaultFillPercent is the percentage that split pages are filled.
-// This value can be changed by setting Bucket.FillPercent.
-const DefaultFillPercent = 0.5
-
-// Bucket represents a collection of key/value pairs inside the database.
-type Bucket struct {
- *bucket
- tx *Tx // the associated transaction
- buckets map[string]*Bucket // subbucket cache
- page *page // inline page reference
- rootNode *node // materialized node for the root page.
- nodes map[pgid]*node // node cache
-
- // Sets the threshold for filling nodes when they split. By default,
- // the bucket will fill to 50% but it can be useful to increase this
- // amount if you know that your write workloads are mostly append-only.
- //
- // This is non-persisted across transactions so it must be set in every Tx.
- FillPercent float64
-}
-
-// bucket represents the on-file representation of a bucket.
-// This is stored as the "value" of a bucket key. If the bucket is small enough,
-// then its root page can be stored inline in the "value", after the bucket
-// header. In the case of inline buckets, the "root" will be 0.
-type bucket struct {
- root pgid // page id of the bucket's root-level page
- sequence uint64 // monotonically incrementing, used by NextSequence()
-}
-
-// newBucket returns a new bucket associated with a transaction.
-func newBucket(tx *Tx) Bucket {
- var b = Bucket{tx: tx, FillPercent: DefaultFillPercent}
- if tx.writable {
- b.buckets = make(map[string]*Bucket)
- b.nodes = make(map[pgid]*node)
- }
- return b
-}
-
-// Tx returns the tx of the bucket.
-func (b *Bucket) Tx() *Tx {
- return b.tx
-}
-
-// Root returns the root of the bucket.
-func (b *Bucket) Root() pgid {
- return b.root
-}
-
-// Writable returns whether the bucket is writable.
-func (b *Bucket) Writable() bool {
- return b.tx.writable
-}
-
-// Cursor creates a cursor associated with the bucket.
-// The cursor is only valid as long as the transaction is open.
-// Do not use a cursor after the transaction is closed.
-func (b *Bucket) Cursor() *Cursor {
- // Update transaction statistics.
- b.tx.stats.CursorCount++
-
- // Allocate and return a cursor.
- return &Cursor{
- bucket: b,
- stack: make([]elemRef, 0),
- }
-}
-
-// Bucket retrieves a nested bucket by name.
-// Returns nil if the bucket does not exist.
-// The bucket instance is only valid for the lifetime of the transaction.
-func (b *Bucket) Bucket(name []byte) *Bucket {
- if b.buckets != nil {
- if child := b.buckets[string(name)]; child != nil {
- return child
- }
- }
-
- // Move cursor to key.
- c := b.Cursor()
- k, v, flags := c.seek(name)
-
- // Return nil if the key doesn't exist or it is not a bucket.
- if !bytes.Equal(name, k) || (flags&bucketLeafFlag) == 0 {
- return nil
- }
-
- // Otherwise create a bucket and cache it.
- var child = b.openBucket(v)
- if b.buckets != nil {
- b.buckets[string(name)] = child
- }
-
- return child
-}
-
-// Helper method that re-interprets a sub-bucket value
-// from a parent into a Bucket
-func (b *Bucket) openBucket(value []byte) *Bucket {
- var child = newBucket(b.tx)
-
- // Unaligned access requires a copy to be made.
- const unalignedMask = unsafe.Alignof(struct {
- bucket
- page
- }{}) - 1
- unaligned := uintptr(unsafe.Pointer(&value[0]))&unalignedMask != 0
- if unaligned {
- value = cloneBytes(value)
- }
-
- // If this is a writable transaction then we need to copy the bucket entry.
- // Read-only transactions can point directly at the mmap entry.
- if b.tx.writable && !unaligned {
- child.bucket = &bucket{}
- *child.bucket = *(*bucket)(unsafe.Pointer(&value[0]))
- } else {
- child.bucket = (*bucket)(unsafe.Pointer(&value[0]))
- }
-
- // Save a reference to the inline page if the bucket is inline.
- if child.root == 0 {
- child.page = (*page)(unsafe.Pointer(&value[bucketHeaderSize]))
- }
-
- return &child
-}
-
-// CreateBucket creates a new bucket at the given key and returns the new bucket.
-// Returns an error if the key already exists, if the bucket name is blank, or if the bucket name is too long.
-// The bucket instance is only valid for the lifetime of the transaction.
-func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) {
- if b.tx.db == nil {
- return nil, ErrTxClosed
- } else if !b.tx.writable {
- return nil, ErrTxNotWritable
- } else if len(key) == 0 {
- return nil, ErrBucketNameRequired
- }
-
- // Move cursor to correct position.
- c := b.Cursor()
- k, _, flags := c.seek(key)
-
- // Return an error if there is an existing key.
- if bytes.Equal(key, k) {
- if (flags & bucketLeafFlag) != 0 {
- return nil, ErrBucketExists
- }
- return nil, ErrIncompatibleValue
- }
-
- // Create empty, inline bucket.
- var bucket = Bucket{
- bucket: &bucket{},
- rootNode: &node{isLeaf: true},
- FillPercent: DefaultFillPercent,
- }
- var value = bucket.write()
-
- // Insert into node.
- key = cloneBytes(key)
- c.node().put(key, key, value, 0, bucketLeafFlag)
-
- // Since subbuckets are not allowed on inline buckets, we need to
- // dereference the inline page, if it exists. This will cause the bucket
- // to be treated as a regular, non-inline bucket for the rest of the tx.
- b.page = nil
-
- return b.Bucket(key), nil
-}
-
-// CreateBucketIfNotExists creates a new bucket if it doesn't already exist and returns a reference to it.
-// Returns an error if the bucket name is blank, or if the bucket name is too long.
-// The bucket instance is only valid for the lifetime of the transaction.
-func (b *Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) {
- child, err := b.CreateBucket(key)
- if err == ErrBucketExists {
- return b.Bucket(key), nil
- } else if err != nil {
- return nil, err
- }
- return child, nil
-}
-
-// DeleteBucket deletes a bucket at the given key.
-// Returns an error if the bucket does not exist, or if the key represents a non-bucket value.
-func (b *Bucket) DeleteBucket(key []byte) error {
- if b.tx.db == nil {
- return ErrTxClosed
- } else if !b.Writable() {
- return ErrTxNotWritable
- }
-
- // Move cursor to correct position.
- c := b.Cursor()
- k, _, flags := c.seek(key)
-
- // Return an error if bucket doesn't exist or is not a bucket.
- if !bytes.Equal(key, k) {
- return ErrBucketNotFound
- } else if (flags & bucketLeafFlag) == 0 {
- return ErrIncompatibleValue
- }
-
- // Recursively delete all child buckets.
- child := b.Bucket(key)
- err := child.ForEach(func(k, v []byte) error {
- if _, _, childFlags := child.Cursor().seek(k); (childFlags & bucketLeafFlag) != 0 {
- if err := child.DeleteBucket(k); err != nil {
- return fmt.Errorf("delete bucket: %s", err)
- }
- }
- return nil
- })
- if err != nil {
- return err
- }
-
- // Remove cached copy.
- delete(b.buckets, string(key))
-
- // Release all bucket pages to freelist.
- child.nodes = nil
- child.rootNode = nil
- child.free()
-
- // Delete the node if we have a matching key.
- c.node().del(key)
-
- return nil
-}
-
-// Get retrieves the value for a key in the bucket.
-// Returns a nil value if the key does not exist or if the key is a nested bucket.
-// The returned value is only valid for the life of the transaction.
-func (b *Bucket) Get(key []byte) []byte {
- k, v, flags := b.Cursor().seek(key)
-
- // Return nil if this is a bucket.
- if (flags & bucketLeafFlag) != 0 {
- return nil
- }
-
- // If our target node isn't the same key as what's passed in then return nil.
- if !bytes.Equal(key, k) {
- return nil
- }
- return v
-}
-
-// Put sets the value for a key in the bucket.
-// If the key exist then its previous value will be overwritten.
-// Supplied value must remain valid for the life of the transaction.
-// Returns an error if the bucket was created from a read-only transaction, if the key is blank, if the key is too large, or if the value is too large.
-func (b *Bucket) Put(key []byte, value []byte) error {
- if b.tx.db == nil {
- return ErrTxClosed
- } else if !b.Writable() {
- return ErrTxNotWritable
- } else if len(key) == 0 {
- return ErrKeyRequired
- } else if len(key) > MaxKeySize {
- return ErrKeyTooLarge
- } else if int64(len(value)) > MaxValueSize {
- return ErrValueTooLarge
- }
-
- // Move cursor to correct position.
- c := b.Cursor()
- k, _, flags := c.seek(key)
-
- // Return an error if there is an existing key with a bucket value.
- if bytes.Equal(key, k) && (flags&bucketLeafFlag) != 0 {
- return ErrIncompatibleValue
- }
-
- // Insert into node.
- key = cloneBytes(key)
- c.node().put(key, key, value, 0, 0)
-
- return nil
-}
-
-// Delete removes a key from the bucket.
-// If the key does not exist then nothing is done and a nil error is returned.
-// Returns an error if the bucket was created from a read-only transaction.
-func (b *Bucket) Delete(key []byte) error {
- if b.tx.db == nil {
- return ErrTxClosed
- } else if !b.Writable() {
- return ErrTxNotWritable
- }
-
- // Move cursor to correct position.
- c := b.Cursor()
- k, _, flags := c.seek(key)
-
- // Return nil if the key doesn't exist.
- if !bytes.Equal(key, k) {
- return nil
- }
-
- // Return an error if there is already existing bucket value.
- if (flags & bucketLeafFlag) != 0 {
- return ErrIncompatibleValue
- }
-
- // Delete the node if we have a matching key.
- c.node().del(key)
-
- return nil
-}
-
-// Sequence returns the current integer for the bucket without incrementing it.
-func (b *Bucket) Sequence() uint64 { return b.bucket.sequence }
-
-// SetSequence updates the sequence number for the bucket.
-func (b *Bucket) SetSequence(v uint64) error {
- if b.tx.db == nil {
- return ErrTxClosed
- } else if !b.Writable() {
- return ErrTxNotWritable
- }
-
- // Materialize the root node if it hasn't been already so that the
- // bucket will be saved during commit.
- if b.rootNode == nil {
- _ = b.node(b.root, nil)
- }
-
- // Increment and return the sequence.
- b.bucket.sequence = v
- return nil
-}
-
-// NextSequence returns an autoincrementing integer for the bucket.
-func (b *Bucket) NextSequence() (uint64, error) {
- if b.tx.db == nil {
- return 0, ErrTxClosed
- } else if !b.Writable() {
- return 0, ErrTxNotWritable
- }
-
- // Materialize the root node if it hasn't been already so that the
- // bucket will be saved during commit.
- if b.rootNode == nil {
- _ = b.node(b.root, nil)
- }
-
- // Increment and return the sequence.
- b.bucket.sequence++
- return b.bucket.sequence, nil
-}
-
-// ForEach executes a function for each key/value pair in a bucket.
-// If the provided function returns an error then the iteration is stopped and
-// the error is returned to the caller. The provided function must not modify
-// the bucket; this will result in undefined behavior.
-func (b *Bucket) ForEach(fn func(k, v []byte) error) error {
- if b.tx.db == nil {
- return ErrTxClosed
- }
- c := b.Cursor()
- for k, v := c.First(); k != nil; k, v = c.Next() {
- if err := fn(k, v); err != nil {
- return err
- }
- }
- return nil
-}
-
-// Stat returns stats on a bucket.
-func (b *Bucket) Stats() BucketStats {
- var s, subStats BucketStats
- pageSize := b.tx.db.pageSize
- s.BucketN += 1
- if b.root == 0 {
- s.InlineBucketN += 1
- }
- b.forEachPage(func(p *page, depth int) {
- if (p.flags & leafPageFlag) != 0 {
- s.KeyN += int(p.count)
-
- // used totals the used bytes for the page
- used := pageHeaderSize
-
- if p.count != 0 {
- // If page has any elements, add all element headers.
- used += leafPageElementSize * uintptr(p.count-1)
-
- // Add all element key, value sizes.
- // The computation takes advantage of the fact that the position
- // of the last element's key/value equals to the total of the sizes
- // of all previous elements' keys and values.
- // It also includes the last element's header.
- lastElement := p.leafPageElement(p.count - 1)
- used += uintptr(lastElement.pos + lastElement.ksize + lastElement.vsize)
- }
-
- if b.root == 0 {
- // For inlined bucket just update the inline stats
- s.InlineBucketInuse += int(used)
- } else {
- // For non-inlined bucket update all the leaf stats
- s.LeafPageN++
- s.LeafInuse += int(used)
- s.LeafOverflowN += int(p.overflow)
-
- // Collect stats from sub-buckets.
- // Do that by iterating over all element headers
- // looking for the ones with the bucketLeafFlag.
- for i := uint16(0); i < p.count; i++ {
- e := p.leafPageElement(i)
- if (e.flags & bucketLeafFlag) != 0 {
- // For any bucket element, open the element value
- // and recursively call Stats on the contained bucket.
- subStats.Add(b.openBucket(e.value()).Stats())
- }
- }
- }
- } else if (p.flags & branchPageFlag) != 0 {
- s.BranchPageN++
- lastElement := p.branchPageElement(p.count - 1)
-
- // used totals the used bytes for the page
- // Add header and all element headers.
- used := pageHeaderSize + (branchPageElementSize * uintptr(p.count-1))
-
- // Add size of all keys and values.
- // Again, use the fact that last element's position equals to
- // the total of key, value sizes of all previous elements.
- used += uintptr(lastElement.pos + lastElement.ksize)
- s.BranchInuse += int(used)
- s.BranchOverflowN += int(p.overflow)
- }
-
- // Keep track of maximum page depth.
- if depth+1 > s.Depth {
- s.Depth = (depth + 1)
- }
- })
-
- // Alloc stats can be computed from page counts and pageSize.
- s.BranchAlloc = (s.BranchPageN + s.BranchOverflowN) * pageSize
- s.LeafAlloc = (s.LeafPageN + s.LeafOverflowN) * pageSize
-
- // Add the max depth of sub-buckets to get total nested depth.
- s.Depth += subStats.Depth
- // Add the stats for all sub-buckets
- s.Add(subStats)
- return s
-}
-
-// forEachPage iterates over every page in a bucket, including inline pages.
-func (b *Bucket) forEachPage(fn func(*page, int)) {
- // If we have an inline page then just use that.
- if b.page != nil {
- fn(b.page, 0)
- return
- }
-
- // Otherwise traverse the page hierarchy.
- b.tx.forEachPage(b.root, 0, fn)
-}
-
-// forEachPageNode iterates over every page (or node) in a bucket.
-// This also includes inline pages.
-func (b *Bucket) forEachPageNode(fn func(*page, *node, int)) {
- // If we have an inline page or root node then just use that.
- if b.page != nil {
- fn(b.page, nil, 0)
- return
- }
- b._forEachPageNode(b.root, 0, fn)
-}
-
-func (b *Bucket) _forEachPageNode(pgid pgid, depth int, fn func(*page, *node, int)) {
- var p, n = b.pageNode(pgid)
-
- // Execute function.
- fn(p, n, depth)
-
- // Recursively loop over children.
- if p != nil {
- if (p.flags & branchPageFlag) != 0 {
- for i := 0; i < int(p.count); i++ {
- elem := p.branchPageElement(uint16(i))
- b._forEachPageNode(elem.pgid, depth+1, fn)
- }
- }
- } else {
- if !n.isLeaf {
- for _, inode := range n.inodes {
- b._forEachPageNode(inode.pgid, depth+1, fn)
- }
- }
- }
-}
-
-// spill writes all the nodes for this bucket to dirty pages.
-func (b *Bucket) spill() error {
- // Spill all child buckets first.
- for name, child := range b.buckets {
- // If the child bucket is small enough and it has no child buckets then
- // write it inline into the parent bucket's page. Otherwise spill it
- // like a normal bucket and make the parent value a pointer to the page.
- var value []byte
- if child.inlineable() {
- child.free()
- value = child.write()
- } else {
- if err := child.spill(); err != nil {
- return err
- }
-
- // Update the child bucket header in this bucket.
- value = make([]byte, unsafe.Sizeof(bucket{}))
- var bucket = (*bucket)(unsafe.Pointer(&value[0]))
- *bucket = *child.bucket
- }
-
- // Skip writing the bucket if there are no materialized nodes.
- if child.rootNode == nil {
- continue
- }
-
- // Update parent node.
- var c = b.Cursor()
- k, _, flags := c.seek([]byte(name))
- if !bytes.Equal([]byte(name), k) {
- panic(fmt.Sprintf("misplaced bucket header: %x -> %x", []byte(name), k))
- }
- if flags&bucketLeafFlag == 0 {
- panic(fmt.Sprintf("unexpected bucket header flag: %x", flags))
- }
- c.node().put([]byte(name), []byte(name), value, 0, bucketLeafFlag)
- }
-
- // Ignore if there's not a materialized root node.
- if b.rootNode == nil {
- return nil
- }
-
- // Spill nodes.
- if err := b.rootNode.spill(); err != nil {
- return err
- }
- b.rootNode = b.rootNode.root()
-
- // Update the root node for this bucket.
- if b.rootNode.pgid >= b.tx.meta.pgid {
- panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", b.rootNode.pgid, b.tx.meta.pgid))
- }
- b.root = b.rootNode.pgid
-
- return nil
-}
-
-// inlineable returns true if a bucket is small enough to be written inline
-// and if it contains no subbuckets. Otherwise returns false.
-func (b *Bucket) inlineable() bool {
- var n = b.rootNode
-
- // Bucket must only contain a single leaf node.
- if n == nil || !n.isLeaf {
- return false
- }
-
- // Bucket is not inlineable if it contains subbuckets or if it goes beyond
- // our threshold for inline bucket size.
- var size = pageHeaderSize
- for _, inode := range n.inodes {
- size += leafPageElementSize + uintptr(len(inode.key)) + uintptr(len(inode.value))
-
- if inode.flags&bucketLeafFlag != 0 {
- return false
- } else if size > b.maxInlineBucketSize() {
- return false
- }
- }
-
- return true
-}
-
-// Returns the maximum total size of a bucket to make it a candidate for inlining.
-func (b *Bucket) maxInlineBucketSize() uintptr {
- return uintptr(b.tx.db.pageSize / 4)
-}
-
-// write allocates and writes a bucket to a byte slice.
-func (b *Bucket) write() []byte {
- // Allocate the appropriate size.
- var n = b.rootNode
- var value = make([]byte, bucketHeaderSize+n.size())
-
- // Write a bucket header.
- var bucket = (*bucket)(unsafe.Pointer(&value[0]))
- *bucket = *b.bucket
-
- // Convert byte slice to a fake page and write the root node.
- var p = (*page)(unsafe.Pointer(&value[bucketHeaderSize]))
- n.write(p)
-
- return value
-}
-
-// rebalance attempts to balance all nodes.
-func (b *Bucket) rebalance() {
- for _, n := range b.nodes {
- n.rebalance()
- }
- for _, child := range b.buckets {
- child.rebalance()
- }
-}
-
-// node creates a node from a page and associates it with a given parent.
-func (b *Bucket) node(pgid pgid, parent *node) *node {
- _assert(b.nodes != nil, "nodes map expected")
-
- // Retrieve node if it's already been created.
- if n := b.nodes[pgid]; n != nil {
- return n
- }
-
- // Otherwise create a node and cache it.
- n := &node{bucket: b, parent: parent}
- if parent == nil {
- b.rootNode = n
- } else {
- parent.children = append(parent.children, n)
- }
-
- // Use the inline page if this is an inline bucket.
- var p = b.page
- if p == nil {
- p = b.tx.page(pgid)
- }
-
- // Read the page into the node and cache it.
- n.read(p)
- b.nodes[pgid] = n
-
- // Update statistics.
- b.tx.stats.NodeCount++
-
- return n
-}
-
-// free recursively frees all pages in the bucket.
-func (b *Bucket) free() {
- if b.root == 0 {
- return
- }
-
- var tx = b.tx
- b.forEachPageNode(func(p *page, n *node, _ int) {
- if p != nil {
- tx.db.freelist.free(tx.meta.txid, p)
- } else {
- n.free()
- }
- })
- b.root = 0
-}
-
-// dereference removes all references to the old mmap.
-func (b *Bucket) dereference() {
- if b.rootNode != nil {
- b.rootNode.root().dereference()
- }
-
- for _, child := range b.buckets {
- child.dereference()
- }
-}
-
-// pageNode returns the in-memory node, if it exists.
-// Otherwise returns the underlying page.
-func (b *Bucket) pageNode(id pgid) (*page, *node) {
- // Inline buckets have a fake page embedded in their value so treat them
- // differently. We'll return the rootNode (if available) or the fake page.
- if b.root == 0 {
- if id != 0 {
- panic(fmt.Sprintf("inline bucket non-zero page access(2): %d != 0", id))
- }
- if b.rootNode != nil {
- return nil, b.rootNode
- }
- return b.page, nil
- }
-
- // Check the node cache for non-inline buckets.
- if b.nodes != nil {
- if n := b.nodes[id]; n != nil {
- return nil, n
- }
- }
-
- // Finally lookup the page from the transaction if no node is materialized.
- return b.tx.page(id), nil
-}
-
-// BucketStats records statistics about resources used by a bucket.
-type BucketStats struct {
- // Page count statistics.
- BranchPageN int // number of logical branch pages
- BranchOverflowN int // number of physical branch overflow pages
- LeafPageN int // number of logical leaf pages
- LeafOverflowN int // number of physical leaf overflow pages
-
- // Tree statistics.
- KeyN int // number of keys/value pairs
- Depth int // number of levels in B+tree
-
- // Page size utilization.
- BranchAlloc int // bytes allocated for physical branch pages
- BranchInuse int // bytes actually used for branch data
- LeafAlloc int // bytes allocated for physical leaf pages
- LeafInuse int // bytes actually used for leaf data
-
- // Bucket statistics
- BucketN int // total number of buckets including the top bucket
- InlineBucketN int // total number on inlined buckets
- InlineBucketInuse int // bytes used for inlined buckets (also accounted for in LeafInuse)
-}
-
-func (s *BucketStats) Add(other BucketStats) {
- s.BranchPageN += other.BranchPageN
- s.BranchOverflowN += other.BranchOverflowN
- s.LeafPageN += other.LeafPageN
- s.LeafOverflowN += other.LeafOverflowN
- s.KeyN += other.KeyN
- if s.Depth < other.Depth {
- s.Depth = other.Depth
- }
- s.BranchAlloc += other.BranchAlloc
- s.BranchInuse += other.BranchInuse
- s.LeafAlloc += other.LeafAlloc
- s.LeafInuse += other.LeafInuse
-
- s.BucketN += other.BucketN
- s.InlineBucketN += other.InlineBucketN
- s.InlineBucketInuse += other.InlineBucketInuse
-}
-
-// cloneBytes returns a copy of a given slice.
-func cloneBytes(v []byte) []byte {
- var clone = make([]byte, len(v))
- copy(clone, v)
- return clone
-}
diff --git a/vendor/go.etcd.io/bbolt/cursor.go b/vendor/go.etcd.io/bbolt/cursor.go
deleted file mode 100644
index 98aeb449..00000000
--- a/vendor/go.etcd.io/bbolt/cursor.go
+++ /dev/null
@@ -1,396 +0,0 @@
-package bbolt
-
-import (
- "bytes"
- "fmt"
- "sort"
-)
-
-// Cursor represents an iterator that can traverse over all key/value pairs in a bucket in sorted order.
-// Cursors see nested buckets with value == nil.
-// Cursors can be obtained from a transaction and are valid as long as the transaction is open.
-//
-// Keys and values returned from the cursor are only valid for the life of the transaction.
-//
-// Changing data while traversing with a cursor may cause it to be invalidated
-// and return unexpected keys and/or values. You must reposition your cursor
-// after mutating data.
-type Cursor struct {
- bucket *Bucket
- stack []elemRef
-}
-
-// Bucket returns the bucket that this cursor was created from.
-func (c *Cursor) Bucket() *Bucket {
- return c.bucket
-}
-
-// First moves the cursor to the first item in the bucket and returns its key and value.
-// If the bucket is empty then a nil key and value are returned.
-// The returned key and value are only valid for the life of the transaction.
-func (c *Cursor) First() (key []byte, value []byte) {
- _assert(c.bucket.tx.db != nil, "tx closed")
- c.stack = c.stack[:0]
- p, n := c.bucket.pageNode(c.bucket.root)
- c.stack = append(c.stack, elemRef{page: p, node: n, index: 0})
- c.first()
-
- // If we land on an empty page then move to the next value.
- // https://github.com/boltdb/bolt/issues/450
- if c.stack[len(c.stack)-1].count() == 0 {
- c.next()
- }
-
- k, v, flags := c.keyValue()
- if (flags & uint32(bucketLeafFlag)) != 0 {
- return k, nil
- }
- return k, v
-
-}
-
-// Last moves the cursor to the last item in the bucket and returns its key and value.
-// If the bucket is empty then a nil key and value are returned.
-// The returned key and value are only valid for the life of the transaction.
-func (c *Cursor) Last() (key []byte, value []byte) {
- _assert(c.bucket.tx.db != nil, "tx closed")
- c.stack = c.stack[:0]
- p, n := c.bucket.pageNode(c.bucket.root)
- ref := elemRef{page: p, node: n}
- ref.index = ref.count() - 1
- c.stack = append(c.stack, ref)
- c.last()
- k, v, flags := c.keyValue()
- if (flags & uint32(bucketLeafFlag)) != 0 {
- return k, nil
- }
- return k, v
-}
-
-// Next moves the cursor to the next item in the bucket and returns its key and value.
-// If the cursor is at the end of the bucket then a nil key and value are returned.
-// The returned key and value are only valid for the life of the transaction.
-func (c *Cursor) Next() (key []byte, value []byte) {
- _assert(c.bucket.tx.db != nil, "tx closed")
- k, v, flags := c.next()
- if (flags & uint32(bucketLeafFlag)) != 0 {
- return k, nil
- }
- return k, v
-}
-
-// Prev moves the cursor to the previous item in the bucket and returns its key and value.
-// If the cursor is at the beginning of the bucket then a nil key and value are returned.
-// The returned key and value are only valid for the life of the transaction.
-func (c *Cursor) Prev() (key []byte, value []byte) {
- _assert(c.bucket.tx.db != nil, "tx closed")
-
- // Attempt to move back one element until we're successful.
- // Move up the stack as we hit the beginning of each page in our stack.
- for i := len(c.stack) - 1; i >= 0; i-- {
- elem := &c.stack[i]
- if elem.index > 0 {
- elem.index--
- break
- }
- c.stack = c.stack[:i]
- }
-
- // If we've hit the end then return nil.
- if len(c.stack) == 0 {
- return nil, nil
- }
-
- // Move down the stack to find the last element of the last leaf under this branch.
- c.last()
- k, v, flags := c.keyValue()
- if (flags & uint32(bucketLeafFlag)) != 0 {
- return k, nil
- }
- return k, v
-}
-
-// Seek moves the cursor to a given key and returns it.
-// If the key does not exist then the next key is used. If no keys
-// follow, a nil key is returned.
-// The returned key and value are only valid for the life of the transaction.
-func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) {
- k, v, flags := c.seek(seek)
-
- // If we ended up after the last element of a page then move to the next one.
- if ref := &c.stack[len(c.stack)-1]; ref.index >= ref.count() {
- k, v, flags = c.next()
- }
-
- if k == nil {
- return nil, nil
- } else if (flags & uint32(bucketLeafFlag)) != 0 {
- return k, nil
- }
- return k, v
-}
-
-// Delete removes the current key/value under the cursor from the bucket.
-// Delete fails if current key/value is a bucket or if the transaction is not writable.
-func (c *Cursor) Delete() error {
- if c.bucket.tx.db == nil {
- return ErrTxClosed
- } else if !c.bucket.Writable() {
- return ErrTxNotWritable
- }
-
- key, _, flags := c.keyValue()
- // Return an error if current value is a bucket.
- if (flags & bucketLeafFlag) != 0 {
- return ErrIncompatibleValue
- }
- c.node().del(key)
-
- return nil
-}
-
-// seek moves the cursor to a given key and returns it.
-// If the key does not exist then the next key is used.
-func (c *Cursor) seek(seek []byte) (key []byte, value []byte, flags uint32) {
- _assert(c.bucket.tx.db != nil, "tx closed")
-
- // Start from root page/node and traverse to correct page.
- c.stack = c.stack[:0]
- c.search(seek, c.bucket.root)
-
- // If this is a bucket then return a nil value.
- return c.keyValue()
-}
-
-// first moves the cursor to the first leaf element under the last page in the stack.
-func (c *Cursor) first() {
- for {
- // Exit when we hit a leaf page.
- var ref = &c.stack[len(c.stack)-1]
- if ref.isLeaf() {
- break
- }
-
- // Keep adding pages pointing to the first element to the stack.
- var pgid pgid
- if ref.node != nil {
- pgid = ref.node.inodes[ref.index].pgid
- } else {
- pgid = ref.page.branchPageElement(uint16(ref.index)).pgid
- }
- p, n := c.bucket.pageNode(pgid)
- c.stack = append(c.stack, elemRef{page: p, node: n, index: 0})
- }
-}
-
-// last moves the cursor to the last leaf element under the last page in the stack.
-func (c *Cursor) last() {
- for {
- // Exit when we hit a leaf page.
- ref := &c.stack[len(c.stack)-1]
- if ref.isLeaf() {
- break
- }
-
- // Keep adding pages pointing to the last element in the stack.
- var pgid pgid
- if ref.node != nil {
- pgid = ref.node.inodes[ref.index].pgid
- } else {
- pgid = ref.page.branchPageElement(uint16(ref.index)).pgid
- }
- p, n := c.bucket.pageNode(pgid)
-
- var nextRef = elemRef{page: p, node: n}
- nextRef.index = nextRef.count() - 1
- c.stack = append(c.stack, nextRef)
- }
-}
-
-// next moves to the next leaf element and returns the key and value.
-// If the cursor is at the last leaf element then it stays there and returns nil.
-func (c *Cursor) next() (key []byte, value []byte, flags uint32) {
- for {
- // Attempt to move over one element until we're successful.
- // Move up the stack as we hit the end of each page in our stack.
- var i int
- for i = len(c.stack) - 1; i >= 0; i-- {
- elem := &c.stack[i]
- if elem.index < elem.count()-1 {
- elem.index++
- break
- }
- }
-
- // If we've hit the root page then stop and return. This will leave the
- // cursor on the last element of the last page.
- if i == -1 {
- return nil, nil, 0
- }
-
- // Otherwise start from where we left off in the stack and find the
- // first element of the first leaf page.
- c.stack = c.stack[:i+1]
- c.first()
-
- // If this is an empty page then restart and move back up the stack.
- // https://github.com/boltdb/bolt/issues/450
- if c.stack[len(c.stack)-1].count() == 0 {
- continue
- }
-
- return c.keyValue()
- }
-}
-
-// search recursively performs a binary search against a given page/node until it finds a given key.
-func (c *Cursor) search(key []byte, pgid pgid) {
- p, n := c.bucket.pageNode(pgid)
- if p != nil && (p.flags&(branchPageFlag|leafPageFlag)) == 0 {
- panic(fmt.Sprintf("invalid page type: %d: %x", p.id, p.flags))
- }
- e := elemRef{page: p, node: n}
- c.stack = append(c.stack, e)
-
- // If we're on a leaf page/node then find the specific node.
- if e.isLeaf() {
- c.nsearch(key)
- return
- }
-
- if n != nil {
- c.searchNode(key, n)
- return
- }
- c.searchPage(key, p)
-}
-
-func (c *Cursor) searchNode(key []byte, n *node) {
- var exact bool
- index := sort.Search(len(n.inodes), func(i int) bool {
- // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now.
- // sort.Search() finds the lowest index where f() != -1 but we need the highest index.
- ret := bytes.Compare(n.inodes[i].key, key)
- if ret == 0 {
- exact = true
- }
- return ret != -1
- })
- if !exact && index > 0 {
- index--
- }
- c.stack[len(c.stack)-1].index = index
-
- // Recursively search to the next page.
- c.search(key, n.inodes[index].pgid)
-}
-
-func (c *Cursor) searchPage(key []byte, p *page) {
- // Binary search for the correct range.
- inodes := p.branchPageElements()
-
- var exact bool
- index := sort.Search(int(p.count), func(i int) bool {
- // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now.
- // sort.Search() finds the lowest index where f() != -1 but we need the highest index.
- ret := bytes.Compare(inodes[i].key(), key)
- if ret == 0 {
- exact = true
- }
- return ret != -1
- })
- if !exact && index > 0 {
- index--
- }
- c.stack[len(c.stack)-1].index = index
-
- // Recursively search to the next page.
- c.search(key, inodes[index].pgid)
-}
-
-// nsearch searches the leaf node on the top of the stack for a key.
-func (c *Cursor) nsearch(key []byte) {
- e := &c.stack[len(c.stack)-1]
- p, n := e.page, e.node
-
- // If we have a node then search its inodes.
- if n != nil {
- index := sort.Search(len(n.inodes), func(i int) bool {
- return bytes.Compare(n.inodes[i].key, key) != -1
- })
- e.index = index
- return
- }
-
- // If we have a page then search its leaf elements.
- inodes := p.leafPageElements()
- index := sort.Search(int(p.count), func(i int) bool {
- return bytes.Compare(inodes[i].key(), key) != -1
- })
- e.index = index
-}
-
-// keyValue returns the key and value of the current leaf element.
-func (c *Cursor) keyValue() ([]byte, []byte, uint32) {
- ref := &c.stack[len(c.stack)-1]
-
- // If the cursor is pointing to the end of page/node then return nil.
- if ref.count() == 0 || ref.index >= ref.count() {
- return nil, nil, 0
- }
-
- // Retrieve value from node.
- if ref.node != nil {
- inode := &ref.node.inodes[ref.index]
- return inode.key, inode.value, inode.flags
- }
-
- // Or retrieve value from page.
- elem := ref.page.leafPageElement(uint16(ref.index))
- return elem.key(), elem.value(), elem.flags
-}
-
-// node returns the node that the cursor is currently positioned on.
-func (c *Cursor) node() *node {
- _assert(len(c.stack) > 0, "accessing a node with a zero-length cursor stack")
-
- // If the top of the stack is a leaf node then just return it.
- if ref := &c.stack[len(c.stack)-1]; ref.node != nil && ref.isLeaf() {
- return ref.node
- }
-
- // Start from root and traverse down the hierarchy.
- var n = c.stack[0].node
- if n == nil {
- n = c.bucket.node(c.stack[0].page.id, nil)
- }
- for _, ref := range c.stack[:len(c.stack)-1] {
- _assert(!n.isLeaf, "expected branch node")
- n = n.childAt(ref.index)
- }
- _assert(n.isLeaf, "expected leaf node")
- return n
-}
-
-// elemRef represents a reference to an element on a given page/node.
-type elemRef struct {
- page *page
- node *node
- index int
-}
-
-// isLeaf returns whether the ref is pointing at a leaf page/node.
-func (r *elemRef) isLeaf() bool {
- if r.node != nil {
- return r.node.isLeaf
- }
- return (r.page.flags & leafPageFlag) != 0
-}
-
-// count returns the number of inodes or page elements.
-func (r *elemRef) count() int {
- if r.node != nil {
- return len(r.node.inodes)
- }
- return int(r.page.count)
-}
diff --git a/vendor/go.etcd.io/bbolt/db.go b/vendor/go.etcd.io/bbolt/db.go
deleted file mode 100644
index 80b0095c..00000000
--- a/vendor/go.etcd.io/bbolt/db.go
+++ /dev/null
@@ -1,1174 +0,0 @@
-package bbolt
-
-import (
- "errors"
- "fmt"
- "hash/fnv"
- "log"
- "os"
- "runtime"
- "sort"
- "sync"
- "time"
- "unsafe"
-)
-
-// The largest step that can be taken when remapping the mmap.
-const maxMmapStep = 1 << 30 // 1GB
-
-// The data file format version.
-const version = 2
-
-// Represents a marker value to indicate that a file is a Bolt DB.
-const magic uint32 = 0xED0CDAED
-
-const pgidNoFreelist pgid = 0xffffffffffffffff
-
-// IgnoreNoSync specifies whether the NoSync field of a DB is ignored when
-// syncing changes to a file. This is required as some operating systems,
-// such as OpenBSD, do not have a unified buffer cache (UBC) and writes
-// must be synchronized using the msync(2) syscall.
-const IgnoreNoSync = runtime.GOOS == "openbsd"
-
-// Default values if not set in a DB instance.
-const (
- DefaultMaxBatchSize int = 1000
- DefaultMaxBatchDelay = 10 * time.Millisecond
- DefaultAllocSize = 16 * 1024 * 1024
-)
-
-// default page size for db is set to the OS page size.
-var defaultPageSize = os.Getpagesize()
-
-// The time elapsed between consecutive file locking attempts.
-const flockRetryTimeout = 50 * time.Millisecond
-
-// FreelistType is the type of the freelist backend
-type FreelistType string
-
-const (
- // FreelistArrayType indicates backend freelist type is array
- FreelistArrayType = FreelistType("array")
- // FreelistMapType indicates backend freelist type is hashmap
- FreelistMapType = FreelistType("hashmap")
-)
-
-// DB represents a collection of buckets persisted to a file on disk.
-// All data access is performed through transactions which can be obtained through the DB.
-// All the functions on DB will return a ErrDatabaseNotOpen if accessed before Open() is called.
-type DB struct {
- // When enabled, the database will perform a Check() after every commit.
- // A panic is issued if the database is in an inconsistent state. This
- // flag has a large performance impact so it should only be used for
- // debugging purposes.
- StrictMode bool
-
- // Setting the NoSync flag will cause the database to skip fsync()
- // calls after each commit. This can be useful when bulk loading data
- // into a database and you can restart the bulk load in the event of
- // a system failure or database corruption. Do not set this flag for
- // normal use.
- //
- // If the package global IgnoreNoSync constant is true, this value is
- // ignored. See the comment on that constant for more details.
- //
- // THIS IS UNSAFE. PLEASE USE WITH CAUTION.
- NoSync bool
-
- // When true, skips syncing freelist to disk. This improves the database
- // write performance under normal operation, but requires a full database
- // re-sync during recovery.
- NoFreelistSync bool
-
- // FreelistType sets the backend freelist type. There are two options. Array which is simple but endures
- // dramatic performance degradation if database is large and framentation in freelist is common.
- // The alternative one is using hashmap, it is faster in almost all circumstances
- // but it doesn't guarantee that it offers the smallest page id available. In normal case it is safe.
- // The default type is array
- FreelistType FreelistType
-
- // When true, skips the truncate call when growing the database.
- // Setting this to true is only safe on non-ext3/ext4 systems.
- // Skipping truncation avoids preallocation of hard drive space and
- // bypasses a truncate() and fsync() syscall on remapping.
- //
- // https://github.com/boltdb/bolt/issues/284
- NoGrowSync bool
-
- // If you want to read the entire database fast, you can set MmapFlag to
- // syscall.MAP_POPULATE on Linux 2.6.23+ for sequential read-ahead.
- MmapFlags int
-
- // MaxBatchSize is the maximum size of a batch. Default value is
- // copied from DefaultMaxBatchSize in Open.
- //
- // If <=0, disables batching.
- //
- // Do not change concurrently with calls to Batch.
- MaxBatchSize int
-
- // MaxBatchDelay is the maximum delay before a batch starts.
- // Default value is copied from DefaultMaxBatchDelay in Open.
- //
- // If <=0, effectively disables batching.
- //
- // Do not change concurrently with calls to Batch.
- MaxBatchDelay time.Duration
-
- // AllocSize is the amount of space allocated when the database
- // needs to create new pages. This is done to amortize the cost
- // of truncate() and fsync() when growing the data file.
- AllocSize int
-
- path string
- openFile func(string, int, os.FileMode) (*os.File, error)
- file *os.File
- dataref []byte // mmap'ed readonly, write throws SEGV
- data *[maxMapSize]byte
- datasz int
- filesz int // current on disk file size
- meta0 *meta
- meta1 *meta
- pageSize int
- opened bool
- rwtx *Tx
- txs []*Tx
- stats Stats
-
- freelist *freelist
- freelistLoad sync.Once
-
- pagePool sync.Pool
-
- batchMu sync.Mutex
- batch *batch
-
- rwlock sync.Mutex // Allows only one writer at a time.
- metalock sync.Mutex // Protects meta page access.
- mmaplock sync.RWMutex // Protects mmap access during remapping.
- statlock sync.RWMutex // Protects stats access.
-
- ops struct {
- writeAt func(b []byte, off int64) (n int, err error)
- }
-
- // Read only mode.
- // When true, Update() and Begin(true) return ErrDatabaseReadOnly immediately.
- readOnly bool
-}
-
-// Path returns the path to currently open database file.
-func (db *DB) Path() string {
- return db.path
-}
-
-// GoString returns the Go string representation of the database.
-func (db *DB) GoString() string {
- return fmt.Sprintf("bolt.DB{path:%q}", db.path)
-}
-
-// String returns the string representation of the database.
-func (db *DB) String() string {
- return fmt.Sprintf("DB<%q>", db.path)
-}
-
-// Open creates and opens a database at the given path.
-// If the file does not exist then it will be created automatically.
-// Passing in nil options will cause Bolt to open the database with the default options.
-func Open(path string, mode os.FileMode, options *Options) (*DB, error) {
- db := &DB{
- opened: true,
- }
- // Set default options if no options are provided.
- if options == nil {
- options = DefaultOptions
- }
- db.NoSync = options.NoSync
- db.NoGrowSync = options.NoGrowSync
- db.MmapFlags = options.MmapFlags
- db.NoFreelistSync = options.NoFreelistSync
- db.FreelistType = options.FreelistType
-
- // Set default values for later DB operations.
- db.MaxBatchSize = DefaultMaxBatchSize
- db.MaxBatchDelay = DefaultMaxBatchDelay
- db.AllocSize = DefaultAllocSize
-
- flag := os.O_RDWR
- if options.ReadOnly {
- flag = os.O_RDONLY
- db.readOnly = true
- }
-
- db.openFile = options.OpenFile
- if db.openFile == nil {
- db.openFile = os.OpenFile
- }
-
- // Open data file and separate sync handler for metadata writes.
- var err error
- if db.file, err = db.openFile(path, flag|os.O_CREATE, mode); err != nil {
- _ = db.close()
- return nil, err
- }
- db.path = db.file.Name()
-
- // Lock file so that other processes using Bolt in read-write mode cannot
- // use the database at the same time. This would cause corruption since
- // the two processes would write meta pages and free pages separately.
- // The database file is locked exclusively (only one process can grab the lock)
- // if !options.ReadOnly.
- // The database file is locked using the shared lock (more than one process may
- // hold a lock at the same time) otherwise (options.ReadOnly is set).
- if err := flock(db, !db.readOnly, options.Timeout); err != nil {
- _ = db.close()
- return nil, err
- }
-
- // Default values for test hooks
- db.ops.writeAt = db.file.WriteAt
-
- if db.pageSize = options.PageSize; db.pageSize == 0 {
- // Set the default page size to the OS page size.
- db.pageSize = defaultPageSize
- }
-
- // Initialize the database if it doesn't exist.
- if info, err := db.file.Stat(); err != nil {
- _ = db.close()
- return nil, err
- } else if info.Size() == 0 {
- // Initialize new files with meta pages.
- if err := db.init(); err != nil {
- // clean up file descriptor on initialization fail
- _ = db.close()
- return nil, err
- }
- } else {
- // Read the first meta page to determine the page size.
- var buf [0x1000]byte
- // If we can't read the page size, but can read a page, assume
- // it's the same as the OS or one given -- since that's how the
- // page size was chosen in the first place.
- //
- // If the first page is invalid and this OS uses a different
- // page size than what the database was created with then we
- // are out of luck and cannot access the database.
- //
- // TODO: scan for next page
- if bw, err := db.file.ReadAt(buf[:], 0); err == nil && bw == len(buf) {
- if m := db.pageInBuffer(buf[:], 0).meta(); m.validate() == nil {
- db.pageSize = int(m.pageSize)
- }
- } else {
- _ = db.close()
- return nil, ErrInvalid
- }
- }
-
- // Initialize page pool.
- db.pagePool = sync.Pool{
- New: func() interface{} {
- return make([]byte, db.pageSize)
- },
- }
-
- // Memory map the data file.
- if err := db.mmap(options.InitialMmapSize); err != nil {
- _ = db.close()
- return nil, err
- }
-
- if db.readOnly {
- return db, nil
- }
-
- db.loadFreelist()
-
- // Flush freelist when transitioning from no sync to sync so
- // NoFreelistSync unaware boltdb can open the db later.
- if !db.NoFreelistSync && !db.hasSyncedFreelist() {
- tx, err := db.Begin(true)
- if tx != nil {
- err = tx.Commit()
- }
- if err != nil {
- _ = db.close()
- return nil, err
- }
- }
-
- // Mark the database as opened and return.
- return db, nil
-}
-
-// loadFreelist reads the freelist if it is synced, or reconstructs it
-// by scanning the DB if it is not synced. It assumes there are no
-// concurrent accesses being made to the freelist.
-func (db *DB) loadFreelist() {
- db.freelistLoad.Do(func() {
- db.freelist = newFreelist(db.FreelistType)
- if !db.hasSyncedFreelist() {
- // Reconstruct free list by scanning the DB.
- db.freelist.readIDs(db.freepages())
- } else {
- // Read free list from freelist page.
- db.freelist.read(db.page(db.meta().freelist))
- }
- db.stats.FreePageN = db.freelist.free_count()
- })
-}
-
-func (db *DB) hasSyncedFreelist() bool {
- return db.meta().freelist != pgidNoFreelist
-}
-
-// mmap opens the underlying memory-mapped file and initializes the meta references.
-// minsz is the minimum size that the new mmap can be.
-func (db *DB) mmap(minsz int) error {
- db.mmaplock.Lock()
- defer db.mmaplock.Unlock()
-
- info, err := db.file.Stat()
- if err != nil {
- return fmt.Errorf("mmap stat error: %s", err)
- } else if int(info.Size()) < db.pageSize*2 {
- return fmt.Errorf("file size too small")
- }
-
- // Ensure the size is at least the minimum size.
- var size = int(info.Size())
- if size < minsz {
- size = minsz
- }
- size, err = db.mmapSize(size)
- if err != nil {
- return err
- }
-
- // Dereference all mmap references before unmapping.
- if db.rwtx != nil {
- db.rwtx.root.dereference()
- }
-
- // Unmap existing data before continuing.
- if err := db.munmap(); err != nil {
- return err
- }
-
- // Memory-map the data file as a byte slice.
- if err := mmap(db, size); err != nil {
- return err
- }
-
- // Save references to the meta pages.
- db.meta0 = db.page(0).meta()
- db.meta1 = db.page(1).meta()
-
- // Validate the meta pages. We only return an error if both meta pages fail
- // validation, since meta0 failing validation means that it wasn't saved
- // properly -- but we can recover using meta1. And vice-versa.
- err0 := db.meta0.validate()
- err1 := db.meta1.validate()
- if err0 != nil && err1 != nil {
- return err0
- }
-
- return nil
-}
-
-// munmap unmaps the data file from memory.
-func (db *DB) munmap() error {
- if err := munmap(db); err != nil {
- return fmt.Errorf("unmap error: " + err.Error())
- }
- return nil
-}
-
-// mmapSize determines the appropriate size for the mmap given the current size
-// of the database. The minimum size is 32KB and doubles until it reaches 1GB.
-// Returns an error if the new mmap size is greater than the max allowed.
-func (db *DB) mmapSize(size int) (int, error) {
- // Double the size from 32KB until 1GB.
- for i := uint(15); i <= 30; i++ {
- if size <= 1< maxMapSize {
- return 0, fmt.Errorf("mmap too large")
- }
-
- // If larger than 1GB then grow by 1GB at a time.
- sz := int64(size)
- if remainder := sz % int64(maxMmapStep); remainder > 0 {
- sz += int64(maxMmapStep) - remainder
- }
-
- // Ensure that the mmap size is a multiple of the page size.
- // This should always be true since we're incrementing in MBs.
- pageSize := int64(db.pageSize)
- if (sz % pageSize) != 0 {
- sz = ((sz / pageSize) + 1) * pageSize
- }
-
- // If we've exceeded the max size then only grow up to the max size.
- if sz > maxMapSize {
- sz = maxMapSize
- }
-
- return int(sz), nil
-}
-
-// init creates a new database file and initializes its meta pages.
-func (db *DB) init() error {
- // Create two meta pages on a buffer.
- buf := make([]byte, db.pageSize*4)
- for i := 0; i < 2; i++ {
- p := db.pageInBuffer(buf[:], pgid(i))
- p.id = pgid(i)
- p.flags = metaPageFlag
-
- // Initialize the meta page.
- m := p.meta()
- m.magic = magic
- m.version = version
- m.pageSize = uint32(db.pageSize)
- m.freelist = 2
- m.root = bucket{root: 3}
- m.pgid = 4
- m.txid = txid(i)
- m.checksum = m.sum64()
- }
-
- // Write an empty freelist at page 3.
- p := db.pageInBuffer(buf[:], pgid(2))
- p.id = pgid(2)
- p.flags = freelistPageFlag
- p.count = 0
-
- // Write an empty leaf page at page 4.
- p = db.pageInBuffer(buf[:], pgid(3))
- p.id = pgid(3)
- p.flags = leafPageFlag
- p.count = 0
-
- // Write the buffer to our data file.
- if _, err := db.ops.writeAt(buf, 0); err != nil {
- return err
- }
- if err := fdatasync(db); err != nil {
- return err
- }
-
- return nil
-}
-
-// Close releases all database resources.
-// It will block waiting for any open transactions to finish
-// before closing the database and returning.
-func (db *DB) Close() error {
- db.rwlock.Lock()
- defer db.rwlock.Unlock()
-
- db.metalock.Lock()
- defer db.metalock.Unlock()
-
- db.mmaplock.Lock()
- defer db.mmaplock.Unlock()
-
- return db.close()
-}
-
-func (db *DB) close() error {
- if !db.opened {
- return nil
- }
-
- db.opened = false
-
- db.freelist = nil
-
- // Clear ops.
- db.ops.writeAt = nil
-
- // Close the mmap.
- if err := db.munmap(); err != nil {
- return err
- }
-
- // Close file handles.
- if db.file != nil {
- // No need to unlock read-only file.
- if !db.readOnly {
- // Unlock the file.
- if err := funlock(db); err != nil {
- log.Printf("bolt.Close(): funlock error: %s", err)
- }
- }
-
- // Close the file descriptor.
- if err := db.file.Close(); err != nil {
- return fmt.Errorf("db file close: %s", err)
- }
- db.file = nil
- }
-
- db.path = ""
- return nil
-}
-
-// Begin starts a new transaction.
-// Multiple read-only transactions can be used concurrently but only one
-// write transaction can be used at a time. Starting multiple write transactions
-// will cause the calls to block and be serialized until the current write
-// transaction finishes.
-//
-// Transactions should not be dependent on one another. Opening a read
-// transaction and a write transaction in the same goroutine can cause the
-// writer to deadlock because the database periodically needs to re-mmap itself
-// as it grows and it cannot do that while a read transaction is open.
-//
-// If a long running read transaction (for example, a snapshot transaction) is
-// needed, you might want to set DB.InitialMmapSize to a large enough value
-// to avoid potential blocking of write transaction.
-//
-// IMPORTANT: You must close read-only transactions after you are finished or
-// else the database will not reclaim old pages.
-func (db *DB) Begin(writable bool) (*Tx, error) {
- if writable {
- return db.beginRWTx()
- }
- return db.beginTx()
-}
-
-func (db *DB) beginTx() (*Tx, error) {
- // Lock the meta pages while we initialize the transaction. We obtain
- // the meta lock before the mmap lock because that's the order that the
- // write transaction will obtain them.
- db.metalock.Lock()
-
- // Obtain a read-only lock on the mmap. When the mmap is remapped it will
- // obtain a write lock so all transactions must finish before it can be
- // remapped.
- db.mmaplock.RLock()
-
- // Exit if the database is not open yet.
- if !db.opened {
- db.mmaplock.RUnlock()
- db.metalock.Unlock()
- return nil, ErrDatabaseNotOpen
- }
-
- // Create a transaction associated with the database.
- t := &Tx{}
- t.init(db)
-
- // Keep track of transaction until it closes.
- db.txs = append(db.txs, t)
- n := len(db.txs)
-
- // Unlock the meta pages.
- db.metalock.Unlock()
-
- // Update the transaction stats.
- db.statlock.Lock()
- db.stats.TxN++
- db.stats.OpenTxN = n
- db.statlock.Unlock()
-
- return t, nil
-}
-
-func (db *DB) beginRWTx() (*Tx, error) {
- // If the database was opened with Options.ReadOnly, return an error.
- if db.readOnly {
- return nil, ErrDatabaseReadOnly
- }
-
- // Obtain writer lock. This is released by the transaction when it closes.
- // This enforces only one writer transaction at a time.
- db.rwlock.Lock()
-
- // Once we have the writer lock then we can lock the meta pages so that
- // we can set up the transaction.
- db.metalock.Lock()
- defer db.metalock.Unlock()
-
- // Exit if the database is not open yet.
- if !db.opened {
- db.rwlock.Unlock()
- return nil, ErrDatabaseNotOpen
- }
-
- // Create a transaction associated with the database.
- t := &Tx{writable: true}
- t.init(db)
- db.rwtx = t
- db.freePages()
- return t, nil
-}
-
-// freePages releases any pages associated with closed read-only transactions.
-func (db *DB) freePages() {
- // Free all pending pages prior to earliest open transaction.
- sort.Sort(txsById(db.txs))
- minid := txid(0xFFFFFFFFFFFFFFFF)
- if len(db.txs) > 0 {
- minid = db.txs[0].meta.txid
- }
- if minid > 0 {
- db.freelist.release(minid - 1)
- }
- // Release unused txid extents.
- for _, t := range db.txs {
- db.freelist.releaseRange(minid, t.meta.txid-1)
- minid = t.meta.txid + 1
- }
- db.freelist.releaseRange(minid, txid(0xFFFFFFFFFFFFFFFF))
- // Any page both allocated and freed in an extent is safe to release.
-}
-
-type txsById []*Tx
-
-func (t txsById) Len() int { return len(t) }
-func (t txsById) Swap(i, j int) { t[i], t[j] = t[j], t[i] }
-func (t txsById) Less(i, j int) bool { return t[i].meta.txid < t[j].meta.txid }
-
-// removeTx removes a transaction from the database.
-func (db *DB) removeTx(tx *Tx) {
- // Release the read lock on the mmap.
- db.mmaplock.RUnlock()
-
- // Use the meta lock to restrict access to the DB object.
- db.metalock.Lock()
-
- // Remove the transaction.
- for i, t := range db.txs {
- if t == tx {
- last := len(db.txs) - 1
- db.txs[i] = db.txs[last]
- db.txs[last] = nil
- db.txs = db.txs[:last]
- break
- }
- }
- n := len(db.txs)
-
- // Unlock the meta pages.
- db.metalock.Unlock()
-
- // Merge statistics.
- db.statlock.Lock()
- db.stats.OpenTxN = n
- db.stats.TxStats.add(&tx.stats)
- db.statlock.Unlock()
-}
-
-// Update executes a function within the context of a read-write managed transaction.
-// If no error is returned from the function then the transaction is committed.
-// If an error is returned then the entire transaction is rolled back.
-// Any error that is returned from the function or returned from the commit is
-// returned from the Update() method.
-//
-// Attempting to manually commit or rollback within the function will cause a panic.
-func (db *DB) Update(fn func(*Tx) error) error {
- t, err := db.Begin(true)
- if err != nil {
- return err
- }
-
- // Make sure the transaction rolls back in the event of a panic.
- defer func() {
- if t.db != nil {
- t.rollback()
- }
- }()
-
- // Mark as a managed tx so that the inner function cannot manually commit.
- t.managed = true
-
- // If an error is returned from the function then rollback and return error.
- err = fn(t)
- t.managed = false
- if err != nil {
- _ = t.Rollback()
- return err
- }
-
- return t.Commit()
-}
-
-// View executes a function within the context of a managed read-only transaction.
-// Any error that is returned from the function is returned from the View() method.
-//
-// Attempting to manually rollback within the function will cause a panic.
-func (db *DB) View(fn func(*Tx) error) error {
- t, err := db.Begin(false)
- if err != nil {
- return err
- }
-
- // Make sure the transaction rolls back in the event of a panic.
- defer func() {
- if t.db != nil {
- t.rollback()
- }
- }()
-
- // Mark as a managed tx so that the inner function cannot manually rollback.
- t.managed = true
-
- // If an error is returned from the function then pass it through.
- err = fn(t)
- t.managed = false
- if err != nil {
- _ = t.Rollback()
- return err
- }
-
- return t.Rollback()
-}
-
-// Batch calls fn as part of a batch. It behaves similar to Update,
-// except:
-//
-// 1. concurrent Batch calls can be combined into a single Bolt
-// transaction.
-//
-// 2. the function passed to Batch may be called multiple times,
-// regardless of whether it returns error or not.
-//
-// This means that Batch function side effects must be idempotent and
-// take permanent effect only after a successful return is seen in
-// caller.
-//
-// The maximum batch size and delay can be adjusted with DB.MaxBatchSize
-// and DB.MaxBatchDelay, respectively.
-//
-// Batch is only useful when there are multiple goroutines calling it.
-func (db *DB) Batch(fn func(*Tx) error) error {
- errCh := make(chan error, 1)
-
- db.batchMu.Lock()
- if (db.batch == nil) || (db.batch != nil && len(db.batch.calls) >= db.MaxBatchSize) {
- // There is no existing batch, or the existing batch is full; start a new one.
- db.batch = &batch{
- db: db,
- }
- db.batch.timer = time.AfterFunc(db.MaxBatchDelay, db.batch.trigger)
- }
- db.batch.calls = append(db.batch.calls, call{fn: fn, err: errCh})
- if len(db.batch.calls) >= db.MaxBatchSize {
- // wake up batch, it's ready to run
- go db.batch.trigger()
- }
- db.batchMu.Unlock()
-
- err := <-errCh
- if err == trySolo {
- err = db.Update(fn)
- }
- return err
-}
-
-type call struct {
- fn func(*Tx) error
- err chan<- error
-}
-
-type batch struct {
- db *DB
- timer *time.Timer
- start sync.Once
- calls []call
-}
-
-// trigger runs the batch if it hasn't already been run.
-func (b *batch) trigger() {
- b.start.Do(b.run)
-}
-
-// run performs the transactions in the batch and communicates results
-// back to DB.Batch.
-func (b *batch) run() {
- b.db.batchMu.Lock()
- b.timer.Stop()
- // Make sure no new work is added to this batch, but don't break
- // other batches.
- if b.db.batch == b {
- b.db.batch = nil
- }
- b.db.batchMu.Unlock()
-
-retry:
- for len(b.calls) > 0 {
- var failIdx = -1
- err := b.db.Update(func(tx *Tx) error {
- for i, c := range b.calls {
- if err := safelyCall(c.fn, tx); err != nil {
- failIdx = i
- return err
- }
- }
- return nil
- })
-
- if failIdx >= 0 {
- // take the failing transaction out of the batch. it's
- // safe to shorten b.calls here because db.batch no longer
- // points to us, and we hold the mutex anyway.
- c := b.calls[failIdx]
- b.calls[failIdx], b.calls = b.calls[len(b.calls)-1], b.calls[:len(b.calls)-1]
- // tell the submitter re-run it solo, continue with the rest of the batch
- c.err <- trySolo
- continue retry
- }
-
- // pass success, or bolt internal errors, to all callers
- for _, c := range b.calls {
- c.err <- err
- }
- break retry
- }
-}
-
-// trySolo is a special sentinel error value used for signaling that a
-// transaction function should be re-run. It should never be seen by
-// callers.
-var trySolo = errors.New("batch function returned an error and should be re-run solo")
-
-type panicked struct {
- reason interface{}
-}
-
-func (p panicked) Error() string {
- if err, ok := p.reason.(error); ok {
- return err.Error()
- }
- return fmt.Sprintf("panic: %v", p.reason)
-}
-
-func safelyCall(fn func(*Tx) error, tx *Tx) (err error) {
- defer func() {
- if p := recover(); p != nil {
- err = panicked{p}
- }
- }()
- return fn(tx)
-}
-
-// Sync executes fdatasync() against the database file handle.
-//
-// This is not necessary under normal operation, however, if you use NoSync
-// then it allows you to force the database file to sync against the disk.
-func (db *DB) Sync() error { return fdatasync(db) }
-
-// Stats retrieves ongoing performance stats for the database.
-// This is only updated when a transaction closes.
-func (db *DB) Stats() Stats {
- db.statlock.RLock()
- defer db.statlock.RUnlock()
- return db.stats
-}
-
-// This is for internal access to the raw data bytes from the C cursor, use
-// carefully, or not at all.
-func (db *DB) Info() *Info {
- return &Info{uintptr(unsafe.Pointer(&db.data[0])), db.pageSize}
-}
-
-// page retrieves a page reference from the mmap based on the current page size.
-func (db *DB) page(id pgid) *page {
- pos := id * pgid(db.pageSize)
- return (*page)(unsafe.Pointer(&db.data[pos]))
-}
-
-// pageInBuffer retrieves a page reference from a given byte array based on the current page size.
-func (db *DB) pageInBuffer(b []byte, id pgid) *page {
- return (*page)(unsafe.Pointer(&b[id*pgid(db.pageSize)]))
-}
-
-// meta retrieves the current meta page reference.
-func (db *DB) meta() *meta {
- // We have to return the meta with the highest txid which doesn't fail
- // validation. Otherwise, we can cause errors when in fact the database is
- // in a consistent state. metaA is the one with the higher txid.
- metaA := db.meta0
- metaB := db.meta1
- if db.meta1.txid > db.meta0.txid {
- metaA = db.meta1
- metaB = db.meta0
- }
-
- // Use higher meta page if valid. Otherwise fallback to previous, if valid.
- if err := metaA.validate(); err == nil {
- return metaA
- } else if err := metaB.validate(); err == nil {
- return metaB
- }
-
- // This should never be reached, because both meta1 and meta0 were validated
- // on mmap() and we do fsync() on every write.
- panic("bolt.DB.meta(): invalid meta pages")
-}
-
-// allocate returns a contiguous block of memory starting at a given page.
-func (db *DB) allocate(txid txid, count int) (*page, error) {
- // Allocate a temporary buffer for the page.
- var buf []byte
- if count == 1 {
- buf = db.pagePool.Get().([]byte)
- } else {
- buf = make([]byte, count*db.pageSize)
- }
- p := (*page)(unsafe.Pointer(&buf[0]))
- p.overflow = uint32(count - 1)
-
- // Use pages from the freelist if they are available.
- if p.id = db.freelist.allocate(txid, count); p.id != 0 {
- return p, nil
- }
-
- // Resize mmap() if we're at the end.
- p.id = db.rwtx.meta.pgid
- var minsz = int((p.id+pgid(count))+1) * db.pageSize
- if minsz >= db.datasz {
- if err := db.mmap(minsz); err != nil {
- return nil, fmt.Errorf("mmap allocate error: %s", err)
- }
- }
-
- // Move the page id high water mark.
- db.rwtx.meta.pgid += pgid(count)
-
- return p, nil
-}
-
-// grow grows the size of the database to the given sz.
-func (db *DB) grow(sz int) error {
- // Ignore if the new size is less than available file size.
- if sz <= db.filesz {
- return nil
- }
-
- // If the data is smaller than the alloc size then only allocate what's needed.
- // Once it goes over the allocation size then allocate in chunks.
- if db.datasz < db.AllocSize {
- sz = db.datasz
- } else {
- sz += db.AllocSize
- }
-
- // Truncate and fsync to ensure file size metadata is flushed.
- // https://github.com/boltdb/bolt/issues/284
- if !db.NoGrowSync && !db.readOnly {
- if runtime.GOOS != "windows" {
- if err := db.file.Truncate(int64(sz)); err != nil {
- return fmt.Errorf("file resize error: %s", err)
- }
- }
- if err := db.file.Sync(); err != nil {
- return fmt.Errorf("file sync error: %s", err)
- }
- }
-
- db.filesz = sz
- return nil
-}
-
-func (db *DB) IsReadOnly() bool {
- return db.readOnly
-}
-
-func (db *DB) freepages() []pgid {
- tx, err := db.beginTx()
- defer func() {
- err = tx.Rollback()
- if err != nil {
- panic("freepages: failed to rollback tx")
- }
- }()
- if err != nil {
- panic("freepages: failed to open read only tx")
- }
-
- reachable := make(map[pgid]*page)
- nofreed := make(map[pgid]bool)
- ech := make(chan error)
- go func() {
- for e := range ech {
- panic(fmt.Sprintf("freepages: failed to get all reachable pages (%v)", e))
- }
- }()
- tx.checkBucket(&tx.root, reachable, nofreed, ech)
- close(ech)
-
- var fids []pgid
- for i := pgid(2); i < db.meta().pgid; i++ {
- if _, ok := reachable[i]; !ok {
- fids = append(fids, i)
- }
- }
- return fids
-}
-
-// Options represents the options that can be set when opening a database.
-type Options struct {
- // Timeout is the amount of time to wait to obtain a file lock.
- // When set to zero it will wait indefinitely. This option is only
- // available on Darwin and Linux.
- Timeout time.Duration
-
- // Sets the DB.NoGrowSync flag before memory mapping the file.
- NoGrowSync bool
-
- // Do not sync freelist to disk. This improves the database write performance
- // under normal operation, but requires a full database re-sync during recovery.
- NoFreelistSync bool
-
- // FreelistType sets the backend freelist type. There are two options. Array which is simple but endures
- // dramatic performance degradation if database is large and framentation in freelist is common.
- // The alternative one is using hashmap, it is faster in almost all circumstances
- // but it doesn't guarantee that it offers the smallest page id available. In normal case it is safe.
- // The default type is array
- FreelistType FreelistType
-
- // Open database in read-only mode. Uses flock(..., LOCK_SH |LOCK_NB) to
- // grab a shared lock (UNIX).
- ReadOnly bool
-
- // Sets the DB.MmapFlags flag before memory mapping the file.
- MmapFlags int
-
- // InitialMmapSize is the initial mmap size of the database
- // in bytes. Read transactions won't block write transaction
- // if the InitialMmapSize is large enough to hold database mmap
- // size. (See DB.Begin for more information)
- //
- // If <=0, the initial map size is 0.
- // If initialMmapSize is smaller than the previous database size,
- // it takes no effect.
- InitialMmapSize int
-
- // PageSize overrides the default OS page size.
- PageSize int
-
- // NoSync sets the initial value of DB.NoSync. Normally this can just be
- // set directly on the DB itself when returned from Open(), but this option
- // is useful in APIs which expose Options but not the underlying DB.
- NoSync bool
-
- // OpenFile is used to open files. It defaults to os.OpenFile. This option
- // is useful for writing hermetic tests.
- OpenFile func(string, int, os.FileMode) (*os.File, error)
-}
-
-// DefaultOptions represent the options used if nil options are passed into Open().
-// No timeout is used which will cause Bolt to wait indefinitely for a lock.
-var DefaultOptions = &Options{
- Timeout: 0,
- NoGrowSync: false,
- FreelistType: FreelistArrayType,
-}
-
-// Stats represents statistics about the database.
-type Stats struct {
- // Freelist stats
- FreePageN int // total number of free pages on the freelist
- PendingPageN int // total number of pending pages on the freelist
- FreeAlloc int // total bytes allocated in free pages
- FreelistInuse int // total bytes used by the freelist
-
- // Transaction stats
- TxN int // total number of started read transactions
- OpenTxN int // number of currently open read transactions
-
- TxStats TxStats // global, ongoing stats.
-}
-
-// Sub calculates and returns the difference between two sets of database stats.
-// This is useful when obtaining stats at two different points and time and
-// you need the performance counters that occurred within that time span.
-func (s *Stats) Sub(other *Stats) Stats {
- if other == nil {
- return *s
- }
- var diff Stats
- diff.FreePageN = s.FreePageN
- diff.PendingPageN = s.PendingPageN
- diff.FreeAlloc = s.FreeAlloc
- diff.FreelistInuse = s.FreelistInuse
- diff.TxN = s.TxN - other.TxN
- diff.TxStats = s.TxStats.Sub(&other.TxStats)
- return diff
-}
-
-type Info struct {
- Data uintptr
- PageSize int
-}
-
-type meta struct {
- magic uint32
- version uint32
- pageSize uint32
- flags uint32
- root bucket
- freelist pgid
- pgid pgid
- txid txid
- checksum uint64
-}
-
-// validate checks the marker bytes and version of the meta page to ensure it matches this binary.
-func (m *meta) validate() error {
- if m.magic != magic {
- return ErrInvalid
- } else if m.version != version {
- return ErrVersionMismatch
- } else if m.checksum != 0 && m.checksum != m.sum64() {
- return ErrChecksum
- }
- return nil
-}
-
-// copy copies one meta object to another.
-func (m *meta) copy(dest *meta) {
- *dest = *m
-}
-
-// write writes the meta onto a page.
-func (m *meta) write(p *page) {
- if m.root.root >= m.pgid {
- panic(fmt.Sprintf("root bucket pgid (%d) above high water mark (%d)", m.root.root, m.pgid))
- } else if m.freelist >= m.pgid && m.freelist != pgidNoFreelist {
- // TODO: reject pgidNoFreeList if !NoFreelistSync
- panic(fmt.Sprintf("freelist pgid (%d) above high water mark (%d)", m.freelist, m.pgid))
- }
-
- // Page id is either going to be 0 or 1 which we can determine by the transaction ID.
- p.id = pgid(m.txid % 2)
- p.flags |= metaPageFlag
-
- // Calculate the checksum.
- m.checksum = m.sum64()
-
- m.copy(p.meta())
-}
-
-// generates the checksum for the meta.
-func (m *meta) sum64() uint64 {
- var h = fnv.New64a()
- _, _ = h.Write((*[unsafe.Offsetof(meta{}.checksum)]byte)(unsafe.Pointer(m))[:])
- return h.Sum64()
-}
-
-// _assert will panic with a given formatted message if the given condition is false.
-func _assert(condition bool, msg string, v ...interface{}) {
- if !condition {
- panic(fmt.Sprintf("assertion failed: "+msg, v...))
- }
-}
diff --git a/vendor/go.etcd.io/bbolt/doc.go b/vendor/go.etcd.io/bbolt/doc.go
deleted file mode 100644
index 95f25f01..00000000
--- a/vendor/go.etcd.io/bbolt/doc.go
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
-package bbolt implements a low-level key/value store in pure Go. It supports
-fully serializable transactions, ACID semantics, and lock-free MVCC with
-multiple readers and a single writer. Bolt can be used for projects that
-want a simple data store without the need to add large dependencies such as
-Postgres or MySQL.
-
-Bolt is a single-level, zero-copy, B+tree data store. This means that Bolt is
-optimized for fast read access and does not require recovery in the event of a
-system crash. Transactions which have not finished committing will simply be
-rolled back in the event of a crash.
-
-The design of Bolt is based on Howard Chu's LMDB database project.
-
-Bolt currently works on Windows, Mac OS X, and Linux.
-
-
-Basics
-
-There are only a few types in Bolt: DB, Bucket, Tx, and Cursor. The DB is
-a collection of buckets and is represented by a single file on disk. A bucket is
-a collection of unique keys that are associated with values.
-
-Transactions provide either read-only or read-write access to the database.
-Read-only transactions can retrieve key/value pairs and can use Cursors to
-iterate over the dataset sequentially. Read-write transactions can create and
-delete buckets and can insert and remove keys. Only one read-write transaction
-is allowed at a time.
-
-
-Caveats
-
-The database uses a read-only, memory-mapped data file to ensure that
-applications cannot corrupt the database, however, this means that keys and
-values returned from Bolt cannot be changed. Writing to a read-only byte slice
-will cause Go to panic.
-
-Keys and values retrieved from the database are only valid for the life of
-the transaction. When used outside the transaction, these byte slices can
-point to different data or can point to invalid memory which will cause a panic.
-
-
-*/
-package bbolt
diff --git a/vendor/go.etcd.io/bbolt/errors.go b/vendor/go.etcd.io/bbolt/errors.go
deleted file mode 100644
index 48758ca5..00000000
--- a/vendor/go.etcd.io/bbolt/errors.go
+++ /dev/null
@@ -1,71 +0,0 @@
-package bbolt
-
-import "errors"
-
-// These errors can be returned when opening or calling methods on a DB.
-var (
- // ErrDatabaseNotOpen is returned when a DB instance is accessed before it
- // is opened or after it is closed.
- ErrDatabaseNotOpen = errors.New("database not open")
-
- // ErrDatabaseOpen is returned when opening a database that is
- // already open.
- ErrDatabaseOpen = errors.New("database already open")
-
- // ErrInvalid is returned when both meta pages on a database are invalid.
- // This typically occurs when a file is not a bolt database.
- ErrInvalid = errors.New("invalid database")
-
- // ErrVersionMismatch is returned when the data file was created with a
- // different version of Bolt.
- ErrVersionMismatch = errors.New("version mismatch")
-
- // ErrChecksum is returned when either meta page checksum does not match.
- ErrChecksum = errors.New("checksum error")
-
- // ErrTimeout is returned when a database cannot obtain an exclusive lock
- // on the data file after the timeout passed to Open().
- ErrTimeout = errors.New("timeout")
-)
-
-// These errors can occur when beginning or committing a Tx.
-var (
- // ErrTxNotWritable is returned when performing a write operation on a
- // read-only transaction.
- ErrTxNotWritable = errors.New("tx not writable")
-
- // ErrTxClosed is returned when committing or rolling back a transaction
- // that has already been committed or rolled back.
- ErrTxClosed = errors.New("tx closed")
-
- // ErrDatabaseReadOnly is returned when a mutating transaction is started on a
- // read-only database.
- ErrDatabaseReadOnly = errors.New("database is in read-only mode")
-)
-
-// These errors can occur when putting or deleting a value or a bucket.
-var (
- // ErrBucketNotFound is returned when trying to access a bucket that has
- // not been created yet.
- ErrBucketNotFound = errors.New("bucket not found")
-
- // ErrBucketExists is returned when creating a bucket that already exists.
- ErrBucketExists = errors.New("bucket already exists")
-
- // ErrBucketNameRequired is returned when creating a bucket with a blank name.
- ErrBucketNameRequired = errors.New("bucket name required")
-
- // ErrKeyRequired is returned when inserting a zero-length key.
- ErrKeyRequired = errors.New("key required")
-
- // ErrKeyTooLarge is returned when inserting a key that is larger than MaxKeySize.
- ErrKeyTooLarge = errors.New("key too large")
-
- // ErrValueTooLarge is returned when inserting a value that is larger than MaxValueSize.
- ErrValueTooLarge = errors.New("value too large")
-
- // ErrIncompatibleValue is returned when trying create or delete a bucket
- // on an existing non-bucket key or when trying to create or delete a
- // non-bucket key on an existing bucket key.
- ErrIncompatibleValue = errors.New("incompatible value")
-)
diff --git a/vendor/go.etcd.io/bbolt/freelist.go b/vendor/go.etcd.io/bbolt/freelist.go
deleted file mode 100644
index 697a4696..00000000
--- a/vendor/go.etcd.io/bbolt/freelist.go
+++ /dev/null
@@ -1,404 +0,0 @@
-package bbolt
-
-import (
- "fmt"
- "sort"
- "unsafe"
-)
-
-// txPending holds a list of pgids and corresponding allocation txns
-// that are pending to be freed.
-type txPending struct {
- ids []pgid
- alloctx []txid // txids allocating the ids
- lastReleaseBegin txid // beginning txid of last matching releaseRange
-}
-
-// pidSet holds the set of starting pgids which have the same span size
-type pidSet map[pgid]struct{}
-
-// freelist represents a list of all pages that are available for allocation.
-// It also tracks pages that have been freed but are still in use by open transactions.
-type freelist struct {
- freelistType FreelistType // freelist type
- ids []pgid // all free and available free page ids.
- allocs map[pgid]txid // mapping of txid that allocated a pgid.
- pending map[txid]*txPending // mapping of soon-to-be free page ids by tx.
- cache map[pgid]bool // fast lookup of all free and pending page ids.
- freemaps map[uint64]pidSet // key is the size of continuous pages(span), value is a set which contains the starting pgids of same size
- forwardMap map[pgid]uint64 // key is start pgid, value is its span size
- backwardMap map[pgid]uint64 // key is end pgid, value is its span size
- allocate func(txid txid, n int) pgid // the freelist allocate func
- free_count func() int // the function which gives you free page number
- mergeSpans func(ids pgids) // the mergeSpan func
- getFreePageIDs func() []pgid // get free pgids func
- readIDs func(pgids []pgid) // readIDs func reads list of pages and init the freelist
-}
-
-// newFreelist returns an empty, initialized freelist.
-func newFreelist(freelistType FreelistType) *freelist {
- f := &freelist{
- freelistType: freelistType,
- allocs: make(map[pgid]txid),
- pending: make(map[txid]*txPending),
- cache: make(map[pgid]bool),
- freemaps: make(map[uint64]pidSet),
- forwardMap: make(map[pgid]uint64),
- backwardMap: make(map[pgid]uint64),
- }
-
- if freelistType == FreelistMapType {
- f.allocate = f.hashmapAllocate
- f.free_count = f.hashmapFreeCount
- f.mergeSpans = f.hashmapMergeSpans
- f.getFreePageIDs = f.hashmapGetFreePageIDs
- f.readIDs = f.hashmapReadIDs
- } else {
- f.allocate = f.arrayAllocate
- f.free_count = f.arrayFreeCount
- f.mergeSpans = f.arrayMergeSpans
- f.getFreePageIDs = f.arrayGetFreePageIDs
- f.readIDs = f.arrayReadIDs
- }
-
- return f
-}
-
-// size returns the size of the page after serialization.
-func (f *freelist) size() int {
- n := f.count()
- if n >= 0xFFFF {
- // The first element will be used to store the count. See freelist.write.
- n++
- }
- return int(pageHeaderSize) + (int(unsafe.Sizeof(pgid(0))) * n)
-}
-
-// count returns count of pages on the freelist
-func (f *freelist) count() int {
- return f.free_count() + f.pending_count()
-}
-
-// arrayFreeCount returns count of free pages(array version)
-func (f *freelist) arrayFreeCount() int {
- return len(f.ids)
-}
-
-// pending_count returns count of pending pages
-func (f *freelist) pending_count() int {
- var count int
- for _, txp := range f.pending {
- count += len(txp.ids)
- }
- return count
-}
-
-// copyall copies a list of all free ids and all pending ids in one sorted list.
-// f.count returns the minimum length required for dst.
-func (f *freelist) copyall(dst []pgid) {
- m := make(pgids, 0, f.pending_count())
- for _, txp := range f.pending {
- m = append(m, txp.ids...)
- }
- sort.Sort(m)
- mergepgids(dst, f.getFreePageIDs(), m)
-}
-
-// arrayAllocate returns the starting page id of a contiguous list of pages of a given size.
-// If a contiguous block cannot be found then 0 is returned.
-func (f *freelist) arrayAllocate(txid txid, n int) pgid {
- if len(f.ids) == 0 {
- return 0
- }
-
- var initial, previd pgid
- for i, id := range f.ids {
- if id <= 1 {
- panic(fmt.Sprintf("invalid page allocation: %d", id))
- }
-
- // Reset initial page if this is not contiguous.
- if previd == 0 || id-previd != 1 {
- initial = id
- }
-
- // If we found a contiguous block then remove it and return it.
- if (id-initial)+1 == pgid(n) {
- // If we're allocating off the beginning then take the fast path
- // and just adjust the existing slice. This will use extra memory
- // temporarily but the append() in free() will realloc the slice
- // as is necessary.
- if (i + 1) == n {
- f.ids = f.ids[i+1:]
- } else {
- copy(f.ids[i-n+1:], f.ids[i+1:])
- f.ids = f.ids[:len(f.ids)-n]
- }
-
- // Remove from the free cache.
- for i := pgid(0); i < pgid(n); i++ {
- delete(f.cache, initial+i)
- }
- f.allocs[initial] = txid
- return initial
- }
-
- previd = id
- }
- return 0
-}
-
-// free releases a page and its overflow for a given transaction id.
-// If the page is already free then a panic will occur.
-func (f *freelist) free(txid txid, p *page) {
- if p.id <= 1 {
- panic(fmt.Sprintf("cannot free page 0 or 1: %d", p.id))
- }
-
- // Free page and all its overflow pages.
- txp := f.pending[txid]
- if txp == nil {
- txp = &txPending{}
- f.pending[txid] = txp
- }
- allocTxid, ok := f.allocs[p.id]
- if ok {
- delete(f.allocs, p.id)
- } else if (p.flags & freelistPageFlag) != 0 {
- // Freelist is always allocated by prior tx.
- allocTxid = txid - 1
- }
-
- for id := p.id; id <= p.id+pgid(p.overflow); id++ {
- // Verify that page is not already free.
- if f.cache[id] {
- panic(fmt.Sprintf("page %d already freed", id))
- }
- // Add to the freelist and cache.
- txp.ids = append(txp.ids, id)
- txp.alloctx = append(txp.alloctx, allocTxid)
- f.cache[id] = true
- }
-}
-
-// release moves all page ids for a transaction id (or older) to the freelist.
-func (f *freelist) release(txid txid) {
- m := make(pgids, 0)
- for tid, txp := range f.pending {
- if tid <= txid {
- // Move transaction's pending pages to the available freelist.
- // Don't remove from the cache since the page is still free.
- m = append(m, txp.ids...)
- delete(f.pending, tid)
- }
- }
- f.mergeSpans(m)
-}
-
-// releaseRange moves pending pages allocated within an extent [begin,end] to the free list.
-func (f *freelist) releaseRange(begin, end txid) {
- if begin > end {
- return
- }
- var m pgids
- for tid, txp := range f.pending {
- if tid < begin || tid > end {
- continue
- }
- // Don't recompute freed pages if ranges haven't updated.
- if txp.lastReleaseBegin == begin {
- continue
- }
- for i := 0; i < len(txp.ids); i++ {
- if atx := txp.alloctx[i]; atx < begin || atx > end {
- continue
- }
- m = append(m, txp.ids[i])
- txp.ids[i] = txp.ids[len(txp.ids)-1]
- txp.ids = txp.ids[:len(txp.ids)-1]
- txp.alloctx[i] = txp.alloctx[len(txp.alloctx)-1]
- txp.alloctx = txp.alloctx[:len(txp.alloctx)-1]
- i--
- }
- txp.lastReleaseBegin = begin
- if len(txp.ids) == 0 {
- delete(f.pending, tid)
- }
- }
- f.mergeSpans(m)
-}
-
-// rollback removes the pages from a given pending tx.
-func (f *freelist) rollback(txid txid) {
- // Remove page ids from cache.
- txp := f.pending[txid]
- if txp == nil {
- return
- }
- var m pgids
- for i, pgid := range txp.ids {
- delete(f.cache, pgid)
- tx := txp.alloctx[i]
- if tx == 0 {
- continue
- }
- if tx != txid {
- // Pending free aborted; restore page back to alloc list.
- f.allocs[pgid] = tx
- } else {
- // Freed page was allocated by this txn; OK to throw away.
- m = append(m, pgid)
- }
- }
- // Remove pages from pending list and mark as free if allocated by txid.
- delete(f.pending, txid)
- f.mergeSpans(m)
-}
-
-// freed returns whether a given page is in the free list.
-func (f *freelist) freed(pgid pgid) bool {
- return f.cache[pgid]
-}
-
-// read initializes the freelist from a freelist page.
-func (f *freelist) read(p *page) {
- if (p.flags & freelistPageFlag) == 0 {
- panic(fmt.Sprintf("invalid freelist page: %d, page type is %s", p.id, p.typ()))
- }
- // If the page.count is at the max uint16 value (64k) then it's considered
- // an overflow and the size of the freelist is stored as the first element.
- var idx, count = 0, int(p.count)
- if count == 0xFFFF {
- idx = 1
- c := *(*pgid)(unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)))
- count = int(c)
- if count < 0 {
- panic(fmt.Sprintf("leading element count %d overflows int", c))
- }
- }
-
- // Copy the list of page ids from the freelist.
- if count == 0 {
- f.ids = nil
- } else {
- var ids []pgid
- data := unsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p), unsafe.Sizeof(ids[0]), idx)
- unsafeSlice(unsafe.Pointer(&ids), data, count)
-
- // copy the ids, so we don't modify on the freelist page directly
- idsCopy := make([]pgid, count)
- copy(idsCopy, ids)
- // Make sure they're sorted.
- sort.Sort(pgids(idsCopy))
-
- f.readIDs(idsCopy)
- }
-}
-
-// arrayReadIDs initializes the freelist from a given list of ids.
-func (f *freelist) arrayReadIDs(ids []pgid) {
- f.ids = ids
- f.reindex()
-}
-
-func (f *freelist) arrayGetFreePageIDs() []pgid {
- return f.ids
-}
-
-// write writes the page ids onto a freelist page. All free and pending ids are
-// saved to disk since in the event of a program crash, all pending ids will
-// become free.
-func (f *freelist) write(p *page) error {
- // Combine the old free pgids and pgids waiting on an open transaction.
-
- // Update the header flag.
- p.flags |= freelistPageFlag
-
- // The page.count can only hold up to 64k elements so if we overflow that
- // number then we handle it by putting the size in the first element.
- l := f.count()
- if l == 0 {
- p.count = uint16(l)
- } else if l < 0xFFFF {
- p.count = uint16(l)
- var ids []pgid
- data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))
- unsafeSlice(unsafe.Pointer(&ids), data, l)
- f.copyall(ids)
- } else {
- p.count = 0xFFFF
- var ids []pgid
- data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))
- unsafeSlice(unsafe.Pointer(&ids), data, l+1)
- ids[0] = pgid(l)
- f.copyall(ids[1:])
- }
-
- return nil
-}
-
-// reload reads the freelist from a page and filters out pending items.
-func (f *freelist) reload(p *page) {
- f.read(p)
-
- // Build a cache of only pending pages.
- pcache := make(map[pgid]bool)
- for _, txp := range f.pending {
- for _, pendingID := range txp.ids {
- pcache[pendingID] = true
- }
- }
-
- // Check each page in the freelist and build a new available freelist
- // with any pages not in the pending lists.
- var a []pgid
- for _, id := range f.getFreePageIDs() {
- if !pcache[id] {
- a = append(a, id)
- }
- }
-
- f.readIDs(a)
-}
-
-// noSyncReload reads the freelist from pgids and filters out pending items.
-func (f *freelist) noSyncReload(pgids []pgid) {
- // Build a cache of only pending pages.
- pcache := make(map[pgid]bool)
- for _, txp := range f.pending {
- for _, pendingID := range txp.ids {
- pcache[pendingID] = true
- }
- }
-
- // Check each page in the freelist and build a new available freelist
- // with any pages not in the pending lists.
- var a []pgid
- for _, id := range pgids {
- if !pcache[id] {
- a = append(a, id)
- }
- }
-
- f.readIDs(a)
-}
-
-// reindex rebuilds the free cache based on available and pending free lists.
-func (f *freelist) reindex() {
- ids := f.getFreePageIDs()
- f.cache = make(map[pgid]bool, len(ids))
- for _, id := range ids {
- f.cache[id] = true
- }
- for _, txp := range f.pending {
- for _, pendingID := range txp.ids {
- f.cache[pendingID] = true
- }
- }
-}
-
-// arrayMergeSpans try to merge list of pages(represented by pgids) with existing spans but using array
-func (f *freelist) arrayMergeSpans(ids pgids) {
- sort.Sort(ids)
- f.ids = pgids(f.ids).merge(ids)
-}
diff --git a/vendor/go.etcd.io/bbolt/freelist_hmap.go b/vendor/go.etcd.io/bbolt/freelist_hmap.go
deleted file mode 100644
index 02ef2be0..00000000
--- a/vendor/go.etcd.io/bbolt/freelist_hmap.go
+++ /dev/null
@@ -1,178 +0,0 @@
-package bbolt
-
-import "sort"
-
-// hashmapFreeCount returns count of free pages(hashmap version)
-func (f *freelist) hashmapFreeCount() int {
- // use the forwardmap to get the total count
- count := 0
- for _, size := range f.forwardMap {
- count += int(size)
- }
- return count
-}
-
-// hashmapAllocate serves the same purpose as arrayAllocate, but use hashmap as backend
-func (f *freelist) hashmapAllocate(txid txid, n int) pgid {
- if n == 0 {
- return 0
- }
-
- // if we have a exact size match just return short path
- if bm, ok := f.freemaps[uint64(n)]; ok {
- for pid := range bm {
- // remove the span
- f.delSpan(pid, uint64(n))
-
- f.allocs[pid] = txid
-
- for i := pgid(0); i < pgid(n); i++ {
- delete(f.cache, pid+i)
- }
- return pid
- }
- }
-
- // lookup the map to find larger span
- for size, bm := range f.freemaps {
- if size < uint64(n) {
- continue
- }
-
- for pid := range bm {
- // remove the initial
- f.delSpan(pid, uint64(size))
-
- f.allocs[pid] = txid
-
- remain := size - uint64(n)
-
- // add remain span
- f.addSpan(pid+pgid(n), remain)
-
- for i := pgid(0); i < pgid(n); i++ {
- delete(f.cache, pid+pgid(i))
- }
- return pid
- }
- }
-
- return 0
-}
-
-// hashmapReadIDs reads pgids as input an initial the freelist(hashmap version)
-func (f *freelist) hashmapReadIDs(pgids []pgid) {
- f.init(pgids)
-
- // Rebuild the page cache.
- f.reindex()
-}
-
-// hashmapGetFreePageIDs returns the sorted free page ids
-func (f *freelist) hashmapGetFreePageIDs() []pgid {
- count := f.free_count()
- if count == 0 {
- return nil
- }
-
- m := make([]pgid, 0, count)
- for start, size := range f.forwardMap {
- for i := 0; i < int(size); i++ {
- m = append(m, start+pgid(i))
- }
- }
- sort.Sort(pgids(m))
-
- return m
-}
-
-// hashmapMergeSpans try to merge list of pages(represented by pgids) with existing spans
-func (f *freelist) hashmapMergeSpans(ids pgids) {
- for _, id := range ids {
- // try to see if we can merge and update
- f.mergeWithExistingSpan(id)
- }
-}
-
-// mergeWithExistingSpan merges pid to the existing free spans, try to merge it backward and forward
-func (f *freelist) mergeWithExistingSpan(pid pgid) {
- prev := pid - 1
- next := pid + 1
-
- preSize, mergeWithPrev := f.backwardMap[prev]
- nextSize, mergeWithNext := f.forwardMap[next]
- newStart := pid
- newSize := uint64(1)
-
- if mergeWithPrev {
- //merge with previous span
- start := prev + 1 - pgid(preSize)
- f.delSpan(start, preSize)
-
- newStart -= pgid(preSize)
- newSize += preSize
- }
-
- if mergeWithNext {
- // merge with next span
- f.delSpan(next, nextSize)
- newSize += nextSize
- }
-
- f.addSpan(newStart, newSize)
-}
-
-func (f *freelist) addSpan(start pgid, size uint64) {
- f.backwardMap[start-1+pgid(size)] = size
- f.forwardMap[start] = size
- if _, ok := f.freemaps[size]; !ok {
- f.freemaps[size] = make(map[pgid]struct{})
- }
-
- f.freemaps[size][start] = struct{}{}
-}
-
-func (f *freelist) delSpan(start pgid, size uint64) {
- delete(f.forwardMap, start)
- delete(f.backwardMap, start+pgid(size-1))
- delete(f.freemaps[size], start)
- if len(f.freemaps[size]) == 0 {
- delete(f.freemaps, size)
- }
-}
-
-// initial from pgids using when use hashmap version
-// pgids must be sorted
-func (f *freelist) init(pgids []pgid) {
- if len(pgids) == 0 {
- return
- }
-
- size := uint64(1)
- start := pgids[0]
-
- if !sort.SliceIsSorted([]pgid(pgids), func(i, j int) bool { return pgids[i] < pgids[j] }) {
- panic("pgids not sorted")
- }
-
- f.freemaps = make(map[uint64]pidSet)
- f.forwardMap = make(map[pgid]uint64)
- f.backwardMap = make(map[pgid]uint64)
-
- for i := 1; i < len(pgids); i++ {
- // continuous page
- if pgids[i] == pgids[i-1]+1 {
- size++
- } else {
- f.addSpan(start, size)
-
- size = 1
- start = pgids[i]
- }
- }
-
- // init the tail
- if size != 0 && start != 0 {
- f.addSpan(start, size)
- }
-}
diff --git a/vendor/go.etcd.io/bbolt/node.go b/vendor/go.etcd.io/bbolt/node.go
deleted file mode 100644
index 73988b5c..00000000
--- a/vendor/go.etcd.io/bbolt/node.go
+++ /dev/null
@@ -1,602 +0,0 @@
-package bbolt
-
-import (
- "bytes"
- "fmt"
- "sort"
- "unsafe"
-)
-
-// node represents an in-memory, deserialized page.
-type node struct {
- bucket *Bucket
- isLeaf bool
- unbalanced bool
- spilled bool
- key []byte
- pgid pgid
- parent *node
- children nodes
- inodes inodes
-}
-
-// root returns the top-level node this node is attached to.
-func (n *node) root() *node {
- if n.parent == nil {
- return n
- }
- return n.parent.root()
-}
-
-// minKeys returns the minimum number of inodes this node should have.
-func (n *node) minKeys() int {
- if n.isLeaf {
- return 1
- }
- return 2
-}
-
-// size returns the size of the node after serialization.
-func (n *node) size() int {
- sz, elsz := pageHeaderSize, n.pageElementSize()
- for i := 0; i < len(n.inodes); i++ {
- item := &n.inodes[i]
- sz += elsz + uintptr(len(item.key)) + uintptr(len(item.value))
- }
- return int(sz)
-}
-
-// sizeLessThan returns true if the node is less than a given size.
-// This is an optimization to avoid calculating a large node when we only need
-// to know if it fits inside a certain page size.
-func (n *node) sizeLessThan(v uintptr) bool {
- sz, elsz := pageHeaderSize, n.pageElementSize()
- for i := 0; i < len(n.inodes); i++ {
- item := &n.inodes[i]
- sz += elsz + uintptr(len(item.key)) + uintptr(len(item.value))
- if sz >= v {
- return false
- }
- }
- return true
-}
-
-// pageElementSize returns the size of each page element based on the type of node.
-func (n *node) pageElementSize() uintptr {
- if n.isLeaf {
- return leafPageElementSize
- }
- return branchPageElementSize
-}
-
-// childAt returns the child node at a given index.
-func (n *node) childAt(index int) *node {
- if n.isLeaf {
- panic(fmt.Sprintf("invalid childAt(%d) on a leaf node", index))
- }
- return n.bucket.node(n.inodes[index].pgid, n)
-}
-
-// childIndex returns the index of a given child node.
-func (n *node) childIndex(child *node) int {
- index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, child.key) != -1 })
- return index
-}
-
-// numChildren returns the number of children.
-func (n *node) numChildren() int {
- return len(n.inodes)
-}
-
-// nextSibling returns the next node with the same parent.
-func (n *node) nextSibling() *node {
- if n.parent == nil {
- return nil
- }
- index := n.parent.childIndex(n)
- if index >= n.parent.numChildren()-1 {
- return nil
- }
- return n.parent.childAt(index + 1)
-}
-
-// prevSibling returns the previous node with the same parent.
-func (n *node) prevSibling() *node {
- if n.parent == nil {
- return nil
- }
- index := n.parent.childIndex(n)
- if index == 0 {
- return nil
- }
- return n.parent.childAt(index - 1)
-}
-
-// put inserts a key/value.
-func (n *node) put(oldKey, newKey, value []byte, pgid pgid, flags uint32) {
- if pgid >= n.bucket.tx.meta.pgid {
- panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", pgid, n.bucket.tx.meta.pgid))
- } else if len(oldKey) <= 0 {
- panic("put: zero-length old key")
- } else if len(newKey) <= 0 {
- panic("put: zero-length new key")
- }
-
- // Find insertion index.
- index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, oldKey) != -1 })
-
- // Add capacity and shift nodes if we don't have an exact match and need to insert.
- exact := (len(n.inodes) > 0 && index < len(n.inodes) && bytes.Equal(n.inodes[index].key, oldKey))
- if !exact {
- n.inodes = append(n.inodes, inode{})
- copy(n.inodes[index+1:], n.inodes[index:])
- }
-
- inode := &n.inodes[index]
- inode.flags = flags
- inode.key = newKey
- inode.value = value
- inode.pgid = pgid
- _assert(len(inode.key) > 0, "put: zero-length inode key")
-}
-
-// del removes a key from the node.
-func (n *node) del(key []byte) {
- // Find index of key.
- index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, key) != -1 })
-
- // Exit if the key isn't found.
- if index >= len(n.inodes) || !bytes.Equal(n.inodes[index].key, key) {
- return
- }
-
- // Delete inode from the node.
- n.inodes = append(n.inodes[:index], n.inodes[index+1:]...)
-
- // Mark the node as needing rebalancing.
- n.unbalanced = true
-}
-
-// read initializes the node from a page.
-func (n *node) read(p *page) {
- n.pgid = p.id
- n.isLeaf = ((p.flags & leafPageFlag) != 0)
- n.inodes = make(inodes, int(p.count))
-
- for i := 0; i < int(p.count); i++ {
- inode := &n.inodes[i]
- if n.isLeaf {
- elem := p.leafPageElement(uint16(i))
- inode.flags = elem.flags
- inode.key = elem.key()
- inode.value = elem.value()
- } else {
- elem := p.branchPageElement(uint16(i))
- inode.pgid = elem.pgid
- inode.key = elem.key()
- }
- _assert(len(inode.key) > 0, "read: zero-length inode key")
- }
-
- // Save first key so we can find the node in the parent when we spill.
- if len(n.inodes) > 0 {
- n.key = n.inodes[0].key
- _assert(len(n.key) > 0, "read: zero-length node key")
- } else {
- n.key = nil
- }
-}
-
-// write writes the items onto one or more pages.
-func (n *node) write(p *page) {
- // Initialize page.
- if n.isLeaf {
- p.flags |= leafPageFlag
- } else {
- p.flags |= branchPageFlag
- }
-
- if len(n.inodes) >= 0xFFFF {
- panic(fmt.Sprintf("inode overflow: %d (pgid=%d)", len(n.inodes), p.id))
- }
- p.count = uint16(len(n.inodes))
-
- // Stop here if there are no items to write.
- if p.count == 0 {
- return
- }
-
- // Loop over each item and write it to the page.
- // off tracks the offset into p of the start of the next data.
- off := unsafe.Sizeof(*p) + n.pageElementSize()*uintptr(len(n.inodes))
- for i, item := range n.inodes {
- _assert(len(item.key) > 0, "write: zero-length inode key")
-
- // Create a slice to write into of needed size and advance
- // byte pointer for next iteration.
- sz := len(item.key) + len(item.value)
- b := unsafeByteSlice(unsafe.Pointer(p), off, 0, sz)
- off += uintptr(sz)
-
- // Write the page element.
- if n.isLeaf {
- elem := p.leafPageElement(uint16(i))
- elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem)))
- elem.flags = item.flags
- elem.ksize = uint32(len(item.key))
- elem.vsize = uint32(len(item.value))
- } else {
- elem := p.branchPageElement(uint16(i))
- elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem)))
- elem.ksize = uint32(len(item.key))
- elem.pgid = item.pgid
- _assert(elem.pgid != p.id, "write: circular dependency occurred")
- }
-
- // Write data for the element to the end of the page.
- l := copy(b, item.key)
- copy(b[l:], item.value)
- }
-
- // DEBUG ONLY: n.dump()
-}
-
-// split breaks up a node into multiple smaller nodes, if appropriate.
-// This should only be called from the spill() function.
-func (n *node) split(pageSize uintptr) []*node {
- var nodes []*node
-
- node := n
- for {
- // Split node into two.
- a, b := node.splitTwo(pageSize)
- nodes = append(nodes, a)
-
- // If we can't split then exit the loop.
- if b == nil {
- break
- }
-
- // Set node to b so it gets split on the next iteration.
- node = b
- }
-
- return nodes
-}
-
-// splitTwo breaks up a node into two smaller nodes, if appropriate.
-// This should only be called from the split() function.
-func (n *node) splitTwo(pageSize uintptr) (*node, *node) {
- // Ignore the split if the page doesn't have at least enough nodes for
- // two pages or if the nodes can fit in a single page.
- if len(n.inodes) <= (minKeysPerPage*2) || n.sizeLessThan(pageSize) {
- return n, nil
- }
-
- // Determine the threshold before starting a new node.
- var fillPercent = n.bucket.FillPercent
- if fillPercent < minFillPercent {
- fillPercent = minFillPercent
- } else if fillPercent > maxFillPercent {
- fillPercent = maxFillPercent
- }
- threshold := int(float64(pageSize) * fillPercent)
-
- // Determine split position and sizes of the two pages.
- splitIndex, _ := n.splitIndex(threshold)
-
- // Split node into two separate nodes.
- // If there's no parent then we'll need to create one.
- if n.parent == nil {
- n.parent = &node{bucket: n.bucket, children: []*node{n}}
- }
-
- // Create a new node and add it to the parent.
- next := &node{bucket: n.bucket, isLeaf: n.isLeaf, parent: n.parent}
- n.parent.children = append(n.parent.children, next)
-
- // Split inodes across two nodes.
- next.inodes = n.inodes[splitIndex:]
- n.inodes = n.inodes[:splitIndex]
-
- // Update the statistics.
- n.bucket.tx.stats.Split++
-
- return n, next
-}
-
-// splitIndex finds the position where a page will fill a given threshold.
-// It returns the index as well as the size of the first page.
-// This is only be called from split().
-func (n *node) splitIndex(threshold int) (index, sz uintptr) {
- sz = pageHeaderSize
-
- // Loop until we only have the minimum number of keys required for the second page.
- for i := 0; i < len(n.inodes)-minKeysPerPage; i++ {
- index = uintptr(i)
- inode := n.inodes[i]
- elsize := n.pageElementSize() + uintptr(len(inode.key)) + uintptr(len(inode.value))
-
- // If we have at least the minimum number of keys and adding another
- // node would put us over the threshold then exit and return.
- if index >= minKeysPerPage && sz+elsize > uintptr(threshold) {
- break
- }
-
- // Add the element size to the total size.
- sz += elsize
- }
-
- return
-}
-
-// spill writes the nodes to dirty pages and splits nodes as it goes.
-// Returns an error if dirty pages cannot be allocated.
-func (n *node) spill() error {
- var tx = n.bucket.tx
- if n.spilled {
- return nil
- }
-
- // Spill child nodes first. Child nodes can materialize sibling nodes in
- // the case of split-merge so we cannot use a range loop. We have to check
- // the children size on every loop iteration.
- sort.Sort(n.children)
- for i := 0; i < len(n.children); i++ {
- if err := n.children[i].spill(); err != nil {
- return err
- }
- }
-
- // We no longer need the child list because it's only used for spill tracking.
- n.children = nil
-
- // Split nodes into appropriate sizes. The first node will always be n.
- var nodes = n.split(uintptr(tx.db.pageSize))
- for _, node := range nodes {
- // Add node's page to the freelist if it's not new.
- if node.pgid > 0 {
- tx.db.freelist.free(tx.meta.txid, tx.page(node.pgid))
- node.pgid = 0
- }
-
- // Allocate contiguous space for the node.
- p, err := tx.allocate((node.size() + tx.db.pageSize - 1) / tx.db.pageSize)
- if err != nil {
- return err
- }
-
- // Write the node.
- if p.id >= tx.meta.pgid {
- panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", p.id, tx.meta.pgid))
- }
- node.pgid = p.id
- node.write(p)
- node.spilled = true
-
- // Insert into parent inodes.
- if node.parent != nil {
- var key = node.key
- if key == nil {
- key = node.inodes[0].key
- }
-
- node.parent.put(key, node.inodes[0].key, nil, node.pgid, 0)
- node.key = node.inodes[0].key
- _assert(len(node.key) > 0, "spill: zero-length node key")
- }
-
- // Update the statistics.
- tx.stats.Spill++
- }
-
- // If the root node split and created a new root then we need to spill that
- // as well. We'll clear out the children to make sure it doesn't try to respill.
- if n.parent != nil && n.parent.pgid == 0 {
- n.children = nil
- return n.parent.spill()
- }
-
- return nil
-}
-
-// rebalance attempts to combine the node with sibling nodes if the node fill
-// size is below a threshold or if there are not enough keys.
-func (n *node) rebalance() {
- if !n.unbalanced {
- return
- }
- n.unbalanced = false
-
- // Update statistics.
- n.bucket.tx.stats.Rebalance++
-
- // Ignore if node is above threshold (25%) and has enough keys.
- var threshold = n.bucket.tx.db.pageSize / 4
- if n.size() > threshold && len(n.inodes) > n.minKeys() {
- return
- }
-
- // Root node has special handling.
- if n.parent == nil {
- // If root node is a branch and only has one node then collapse it.
- if !n.isLeaf && len(n.inodes) == 1 {
- // Move root's child up.
- child := n.bucket.node(n.inodes[0].pgid, n)
- n.isLeaf = child.isLeaf
- n.inodes = child.inodes[:]
- n.children = child.children
-
- // Reparent all child nodes being moved.
- for _, inode := range n.inodes {
- if child, ok := n.bucket.nodes[inode.pgid]; ok {
- child.parent = n
- }
- }
-
- // Remove old child.
- child.parent = nil
- delete(n.bucket.nodes, child.pgid)
- child.free()
- }
-
- return
- }
-
- // If node has no keys then just remove it.
- if n.numChildren() == 0 {
- n.parent.del(n.key)
- n.parent.removeChild(n)
- delete(n.bucket.nodes, n.pgid)
- n.free()
- n.parent.rebalance()
- return
- }
-
- _assert(n.parent.numChildren() > 1, "parent must have at least 2 children")
-
- // Destination node is right sibling if idx == 0, otherwise left sibling.
- var target *node
- var useNextSibling = (n.parent.childIndex(n) == 0)
- if useNextSibling {
- target = n.nextSibling()
- } else {
- target = n.prevSibling()
- }
-
- // If both this node and the target node are too small then merge them.
- if useNextSibling {
- // Reparent all child nodes being moved.
- for _, inode := range target.inodes {
- if child, ok := n.bucket.nodes[inode.pgid]; ok {
- child.parent.removeChild(child)
- child.parent = n
- child.parent.children = append(child.parent.children, child)
- }
- }
-
- // Copy over inodes from target and remove target.
- n.inodes = append(n.inodes, target.inodes...)
- n.parent.del(target.key)
- n.parent.removeChild(target)
- delete(n.bucket.nodes, target.pgid)
- target.free()
- } else {
- // Reparent all child nodes being moved.
- for _, inode := range n.inodes {
- if child, ok := n.bucket.nodes[inode.pgid]; ok {
- child.parent.removeChild(child)
- child.parent = target
- child.parent.children = append(child.parent.children, child)
- }
- }
-
- // Copy over inodes to target and remove node.
- target.inodes = append(target.inodes, n.inodes...)
- n.parent.del(n.key)
- n.parent.removeChild(n)
- delete(n.bucket.nodes, n.pgid)
- n.free()
- }
-
- // Either this node or the target node was deleted from the parent so rebalance it.
- n.parent.rebalance()
-}
-
-// removes a node from the list of in-memory children.
-// This does not affect the inodes.
-func (n *node) removeChild(target *node) {
- for i, child := range n.children {
- if child == target {
- n.children = append(n.children[:i], n.children[i+1:]...)
- return
- }
- }
-}
-
-// dereference causes the node to copy all its inode key/value references to heap memory.
-// This is required when the mmap is reallocated so inodes are not pointing to stale data.
-func (n *node) dereference() {
- if n.key != nil {
- key := make([]byte, len(n.key))
- copy(key, n.key)
- n.key = key
- _assert(n.pgid == 0 || len(n.key) > 0, "dereference: zero-length node key on existing node")
- }
-
- for i := range n.inodes {
- inode := &n.inodes[i]
-
- key := make([]byte, len(inode.key))
- copy(key, inode.key)
- inode.key = key
- _assert(len(inode.key) > 0, "dereference: zero-length inode key")
-
- value := make([]byte, len(inode.value))
- copy(value, inode.value)
- inode.value = value
- }
-
- // Recursively dereference children.
- for _, child := range n.children {
- child.dereference()
- }
-
- // Update statistics.
- n.bucket.tx.stats.NodeDeref++
-}
-
-// free adds the node's underlying page to the freelist.
-func (n *node) free() {
- if n.pgid != 0 {
- n.bucket.tx.db.freelist.free(n.bucket.tx.meta.txid, n.bucket.tx.page(n.pgid))
- n.pgid = 0
- }
-}
-
-// dump writes the contents of the node to STDERR for debugging purposes.
-/*
-func (n *node) dump() {
- // Write node header.
- var typ = "branch"
- if n.isLeaf {
- typ = "leaf"
- }
- warnf("[NODE %d {type=%s count=%d}]", n.pgid, typ, len(n.inodes))
-
- // Write out abbreviated version of each item.
- for _, item := range n.inodes {
- if n.isLeaf {
- if item.flags&bucketLeafFlag != 0 {
- bucket := (*bucket)(unsafe.Pointer(&item.value[0]))
- warnf("+L %08x -> (bucket root=%d)", trunc(item.key, 4), bucket.root)
- } else {
- warnf("+L %08x -> %08x", trunc(item.key, 4), trunc(item.value, 4))
- }
- } else {
- warnf("+B %08x -> pgid=%d", trunc(item.key, 4), item.pgid)
- }
- }
- warn("")
-}
-*/
-
-type nodes []*node
-
-func (s nodes) Len() int { return len(s) }
-func (s nodes) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-func (s nodes) Less(i, j int) bool {
- return bytes.Compare(s[i].inodes[0].key, s[j].inodes[0].key) == -1
-}
-
-// inode represents an internal node inside of a node.
-// It can be used to point to elements in a page or point
-// to an element which hasn't been added to a page yet.
-type inode struct {
- flags uint32
- pgid pgid
- key []byte
- value []byte
-}
-
-type inodes []inode
diff --git a/vendor/go.etcd.io/bbolt/page.go b/vendor/go.etcd.io/bbolt/page.go
deleted file mode 100644
index c9a158fb..00000000
--- a/vendor/go.etcd.io/bbolt/page.go
+++ /dev/null
@@ -1,204 +0,0 @@
-package bbolt
-
-import (
- "fmt"
- "os"
- "sort"
- "unsafe"
-)
-
-const pageHeaderSize = unsafe.Sizeof(page{})
-
-const minKeysPerPage = 2
-
-const branchPageElementSize = unsafe.Sizeof(branchPageElement{})
-const leafPageElementSize = unsafe.Sizeof(leafPageElement{})
-
-const (
- branchPageFlag = 0x01
- leafPageFlag = 0x02
- metaPageFlag = 0x04
- freelistPageFlag = 0x10
-)
-
-const (
- bucketLeafFlag = 0x01
-)
-
-type pgid uint64
-
-type page struct {
- id pgid
- flags uint16
- count uint16
- overflow uint32
-}
-
-// typ returns a human readable page type string used for debugging.
-func (p *page) typ() string {
- if (p.flags & branchPageFlag) != 0 {
- return "branch"
- } else if (p.flags & leafPageFlag) != 0 {
- return "leaf"
- } else if (p.flags & metaPageFlag) != 0 {
- return "meta"
- } else if (p.flags & freelistPageFlag) != 0 {
- return "freelist"
- }
- return fmt.Sprintf("unknown<%02x>", p.flags)
-}
-
-// meta returns a pointer to the metadata section of the page.
-func (p *page) meta() *meta {
- return (*meta)(unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)))
-}
-
-// leafPageElement retrieves the leaf node by index
-func (p *page) leafPageElement(index uint16) *leafPageElement {
- return (*leafPageElement)(unsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p),
- leafPageElementSize, int(index)))
-}
-
-// leafPageElements retrieves a list of leaf nodes.
-func (p *page) leafPageElements() []leafPageElement {
- if p.count == 0 {
- return nil
- }
- var elems []leafPageElement
- data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))
- unsafeSlice(unsafe.Pointer(&elems), data, int(p.count))
- return elems
-}
-
-// branchPageElement retrieves the branch node by index
-func (p *page) branchPageElement(index uint16) *branchPageElement {
- return (*branchPageElement)(unsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p),
- unsafe.Sizeof(branchPageElement{}), int(index)))
-}
-
-// branchPageElements retrieves a list of branch nodes.
-func (p *page) branchPageElements() []branchPageElement {
- if p.count == 0 {
- return nil
- }
- var elems []branchPageElement
- data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))
- unsafeSlice(unsafe.Pointer(&elems), data, int(p.count))
- return elems
-}
-
-// dump writes n bytes of the page to STDERR as hex output.
-func (p *page) hexdump(n int) {
- buf := unsafeByteSlice(unsafe.Pointer(p), 0, 0, n)
- fmt.Fprintf(os.Stderr, "%x\n", buf)
-}
-
-type pages []*page
-
-func (s pages) Len() int { return len(s) }
-func (s pages) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-func (s pages) Less(i, j int) bool { return s[i].id < s[j].id }
-
-// branchPageElement represents a node on a branch page.
-type branchPageElement struct {
- pos uint32
- ksize uint32
- pgid pgid
-}
-
-// key returns a byte slice of the node key.
-func (n *branchPageElement) key() []byte {
- return unsafeByteSlice(unsafe.Pointer(n), 0, int(n.pos), int(n.pos)+int(n.ksize))
-}
-
-// leafPageElement represents a node on a leaf page.
-type leafPageElement struct {
- flags uint32
- pos uint32
- ksize uint32
- vsize uint32
-}
-
-// key returns a byte slice of the node key.
-func (n *leafPageElement) key() []byte {
- i := int(n.pos)
- j := i + int(n.ksize)
- return unsafeByteSlice(unsafe.Pointer(n), 0, i, j)
-}
-
-// value returns a byte slice of the node value.
-func (n *leafPageElement) value() []byte {
- i := int(n.pos) + int(n.ksize)
- j := i + int(n.vsize)
- return unsafeByteSlice(unsafe.Pointer(n), 0, i, j)
-}
-
-// PageInfo represents human readable information about a page.
-type PageInfo struct {
- ID int
- Type string
- Count int
- OverflowCount int
-}
-
-type pgids []pgid
-
-func (s pgids) Len() int { return len(s) }
-func (s pgids) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-func (s pgids) Less(i, j int) bool { return s[i] < s[j] }
-
-// merge returns the sorted union of a and b.
-func (a pgids) merge(b pgids) pgids {
- // Return the opposite slice if one is nil.
- if len(a) == 0 {
- return b
- }
- if len(b) == 0 {
- return a
- }
- merged := make(pgids, len(a)+len(b))
- mergepgids(merged, a, b)
- return merged
-}
-
-// mergepgids copies the sorted union of a and b into dst.
-// If dst is too small, it panics.
-func mergepgids(dst, a, b pgids) {
- if len(dst) < len(a)+len(b) {
- panic(fmt.Errorf("mergepgids bad len %d < %d + %d", len(dst), len(a), len(b)))
- }
- // Copy in the opposite slice if one is nil.
- if len(a) == 0 {
- copy(dst, b)
- return
- }
- if len(b) == 0 {
- copy(dst, a)
- return
- }
-
- // Merged will hold all elements from both lists.
- merged := dst[:0]
-
- // Assign lead to the slice with a lower starting value, follow to the higher value.
- lead, follow := a, b
- if b[0] < a[0] {
- lead, follow = b, a
- }
-
- // Continue while there are elements in the lead.
- for len(lead) > 0 {
- // Merge largest prefix of lead that is ahead of follow[0].
- n := sort.Search(len(lead), func(i int) bool { return lead[i] > follow[0] })
- merged = append(merged, lead[:n]...)
- if n >= len(lead) {
- break
- }
-
- // Swap lead and follow.
- lead, follow = follow, lead[n:]
- }
-
- // Append what's left in follow.
- _ = append(merged, follow...)
-}
diff --git a/vendor/go.etcd.io/bbolt/tx.go b/vendor/go.etcd.io/bbolt/tx.go
deleted file mode 100644
index 4b1a64a8..00000000
--- a/vendor/go.etcd.io/bbolt/tx.go
+++ /dev/null
@@ -1,724 +0,0 @@
-package bbolt
-
-import (
- "fmt"
- "io"
- "os"
- "sort"
- "strings"
- "time"
- "unsafe"
-)
-
-// txid represents the internal transaction identifier.
-type txid uint64
-
-// Tx represents a read-only or read/write transaction on the database.
-// Read-only transactions can be used for retrieving values for keys and creating cursors.
-// Read/write transactions can create and remove buckets and create and remove keys.
-//
-// IMPORTANT: You must commit or rollback transactions when you are done with
-// them. Pages can not be reclaimed by the writer until no more transactions
-// are using them. A long running read transaction can cause the database to
-// quickly grow.
-type Tx struct {
- writable bool
- managed bool
- db *DB
- meta *meta
- root Bucket
- pages map[pgid]*page
- stats TxStats
- commitHandlers []func()
-
- // WriteFlag specifies the flag for write-related methods like WriteTo().
- // Tx opens the database file with the specified flag to copy the data.
- //
- // By default, the flag is unset, which works well for mostly in-memory
- // workloads. For databases that are much larger than available RAM,
- // set the flag to syscall.O_DIRECT to avoid trashing the page cache.
- WriteFlag int
-}
-
-// init initializes the transaction.
-func (tx *Tx) init(db *DB) {
- tx.db = db
- tx.pages = nil
-
- // Copy the meta page since it can be changed by the writer.
- tx.meta = &meta{}
- db.meta().copy(tx.meta)
-
- // Copy over the root bucket.
- tx.root = newBucket(tx)
- tx.root.bucket = &bucket{}
- *tx.root.bucket = tx.meta.root
-
- // Increment the transaction id and add a page cache for writable transactions.
- if tx.writable {
- tx.pages = make(map[pgid]*page)
- tx.meta.txid += txid(1)
- }
-}
-
-// ID returns the transaction id.
-func (tx *Tx) ID() int {
- return int(tx.meta.txid)
-}
-
-// DB returns a reference to the database that created the transaction.
-func (tx *Tx) DB() *DB {
- return tx.db
-}
-
-// Size returns current database size in bytes as seen by this transaction.
-func (tx *Tx) Size() int64 {
- return int64(tx.meta.pgid) * int64(tx.db.pageSize)
-}
-
-// Writable returns whether the transaction can perform write operations.
-func (tx *Tx) Writable() bool {
- return tx.writable
-}
-
-// Cursor creates a cursor associated with the root bucket.
-// All items in the cursor will return a nil value because all root bucket keys point to buckets.
-// The cursor is only valid as long as the transaction is open.
-// Do not use a cursor after the transaction is closed.
-func (tx *Tx) Cursor() *Cursor {
- return tx.root.Cursor()
-}
-
-// Stats retrieves a copy of the current transaction statistics.
-func (tx *Tx) Stats() TxStats {
- return tx.stats
-}
-
-// Bucket retrieves a bucket by name.
-// Returns nil if the bucket does not exist.
-// The bucket instance is only valid for the lifetime of the transaction.
-func (tx *Tx) Bucket(name []byte) *Bucket {
- return tx.root.Bucket(name)
-}
-
-// CreateBucket creates a new bucket.
-// Returns an error if the bucket already exists, if the bucket name is blank, or if the bucket name is too long.
-// The bucket instance is only valid for the lifetime of the transaction.
-func (tx *Tx) CreateBucket(name []byte) (*Bucket, error) {
- return tx.root.CreateBucket(name)
-}
-
-// CreateBucketIfNotExists creates a new bucket if it doesn't already exist.
-// Returns an error if the bucket name is blank, or if the bucket name is too long.
-// The bucket instance is only valid for the lifetime of the transaction.
-func (tx *Tx) CreateBucketIfNotExists(name []byte) (*Bucket, error) {
- return tx.root.CreateBucketIfNotExists(name)
-}
-
-// DeleteBucket deletes a bucket.
-// Returns an error if the bucket cannot be found or if the key represents a non-bucket value.
-func (tx *Tx) DeleteBucket(name []byte) error {
- return tx.root.DeleteBucket(name)
-}
-
-// ForEach executes a function for each bucket in the root.
-// If the provided function returns an error then the iteration is stopped and
-// the error is returned to the caller.
-func (tx *Tx) ForEach(fn func(name []byte, b *Bucket) error) error {
- return tx.root.ForEach(func(k, v []byte) error {
- return fn(k, tx.root.Bucket(k))
- })
-}
-
-// OnCommit adds a handler function to be executed after the transaction successfully commits.
-func (tx *Tx) OnCommit(fn func()) {
- tx.commitHandlers = append(tx.commitHandlers, fn)
-}
-
-// Commit writes all changes to disk and updates the meta page.
-// Returns an error if a disk write error occurs, or if Commit is
-// called on a read-only transaction.
-func (tx *Tx) Commit() error {
- _assert(!tx.managed, "managed tx commit not allowed")
- if tx.db == nil {
- return ErrTxClosed
- } else if !tx.writable {
- return ErrTxNotWritable
- }
-
- // TODO(benbjohnson): Use vectorized I/O to write out dirty pages.
-
- // Rebalance nodes which have had deletions.
- var startTime = time.Now()
- tx.root.rebalance()
- if tx.stats.Rebalance > 0 {
- tx.stats.RebalanceTime += time.Since(startTime)
- }
-
- // spill data onto dirty pages.
- startTime = time.Now()
- if err := tx.root.spill(); err != nil {
- tx.rollback()
- return err
- }
- tx.stats.SpillTime += time.Since(startTime)
-
- // Free the old root bucket.
- tx.meta.root.root = tx.root.root
-
- // Free the old freelist because commit writes out a fresh freelist.
- if tx.meta.freelist != pgidNoFreelist {
- tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist))
- }
-
- if !tx.db.NoFreelistSync {
- err := tx.commitFreelist()
- if err != nil {
- return err
- }
- } else {
- tx.meta.freelist = pgidNoFreelist
- }
-
- // Write dirty pages to disk.
- startTime = time.Now()
- if err := tx.write(); err != nil {
- tx.rollback()
- return err
- }
-
- // If strict mode is enabled then perform a consistency check.
- // Only the first consistency error is reported in the panic.
- if tx.db.StrictMode {
- ch := tx.Check()
- var errs []string
- for {
- err, ok := <-ch
- if !ok {
- break
- }
- errs = append(errs, err.Error())
- }
- if len(errs) > 0 {
- panic("check fail: " + strings.Join(errs, "\n"))
- }
- }
-
- // Write meta to disk.
- if err := tx.writeMeta(); err != nil {
- tx.rollback()
- return err
- }
- tx.stats.WriteTime += time.Since(startTime)
-
- // Finalize the transaction.
- tx.close()
-
- // Execute commit handlers now that the locks have been removed.
- for _, fn := range tx.commitHandlers {
- fn()
- }
-
- return nil
-}
-
-func (tx *Tx) commitFreelist() error {
- // Allocate new pages for the new free list. This will overestimate
- // the size of the freelist but not underestimate the size (which would be bad).
- opgid := tx.meta.pgid
- p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1)
- if err != nil {
- tx.rollback()
- return err
- }
- if err := tx.db.freelist.write(p); err != nil {
- tx.rollback()
- return err
- }
- tx.meta.freelist = p.id
- // If the high water mark has moved up then attempt to grow the database.
- if tx.meta.pgid > opgid {
- if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil {
- tx.rollback()
- return err
- }
- }
-
- return nil
-}
-
-// Rollback closes the transaction and ignores all previous updates. Read-only
-// transactions must be rolled back and not committed.
-func (tx *Tx) Rollback() error {
- _assert(!tx.managed, "managed tx rollback not allowed")
- if tx.db == nil {
- return ErrTxClosed
- }
- tx.nonPhysicalRollback()
- return nil
-}
-
-// nonPhysicalRollback is called when user calls Rollback directly, in this case we do not need to reload the free pages from disk.
-func (tx *Tx) nonPhysicalRollback() {
- if tx.db == nil {
- return
- }
- if tx.writable {
- tx.db.freelist.rollback(tx.meta.txid)
- }
- tx.close()
-}
-
-// rollback needs to reload the free pages from disk in case some system error happens like fsync error.
-func (tx *Tx) rollback() {
- if tx.db == nil {
- return
- }
- if tx.writable {
- tx.db.freelist.rollback(tx.meta.txid)
- if !tx.db.hasSyncedFreelist() {
- // Reconstruct free page list by scanning the DB to get the whole free page list.
- // Note: scaning the whole db is heavy if your db size is large in NoSyncFreeList mode.
- tx.db.freelist.noSyncReload(tx.db.freepages())
- } else {
- // Read free page list from freelist page.
- tx.db.freelist.reload(tx.db.page(tx.db.meta().freelist))
- }
- }
- tx.close()
-}
-
-func (tx *Tx) close() {
- if tx.db == nil {
- return
- }
- if tx.writable {
- // Grab freelist stats.
- var freelistFreeN = tx.db.freelist.free_count()
- var freelistPendingN = tx.db.freelist.pending_count()
- var freelistAlloc = tx.db.freelist.size()
-
- // Remove transaction ref & writer lock.
- tx.db.rwtx = nil
- tx.db.rwlock.Unlock()
-
- // Merge statistics.
- tx.db.statlock.Lock()
- tx.db.stats.FreePageN = freelistFreeN
- tx.db.stats.PendingPageN = freelistPendingN
- tx.db.stats.FreeAlloc = (freelistFreeN + freelistPendingN) * tx.db.pageSize
- tx.db.stats.FreelistInuse = freelistAlloc
- tx.db.stats.TxStats.add(&tx.stats)
- tx.db.statlock.Unlock()
- } else {
- tx.db.removeTx(tx)
- }
-
- // Clear all references.
- tx.db = nil
- tx.meta = nil
- tx.root = Bucket{tx: tx}
- tx.pages = nil
-}
-
-// Copy writes the entire database to a writer.
-// This function exists for backwards compatibility.
-//
-// Deprecated; Use WriteTo() instead.
-func (tx *Tx) Copy(w io.Writer) error {
- _, err := tx.WriteTo(w)
- return err
-}
-
-// WriteTo writes the entire database to a writer.
-// If err == nil then exactly tx.Size() bytes will be written into the writer.
-func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) {
- // Attempt to open reader with WriteFlag
- f, err := tx.db.openFile(tx.db.path, os.O_RDONLY|tx.WriteFlag, 0)
- if err != nil {
- return 0, err
- }
- defer func() {
- if cerr := f.Close(); err == nil {
- err = cerr
- }
- }()
-
- // Generate a meta page. We use the same page data for both meta pages.
- buf := make([]byte, tx.db.pageSize)
- page := (*page)(unsafe.Pointer(&buf[0]))
- page.flags = metaPageFlag
- *page.meta() = *tx.meta
-
- // Write meta 0.
- page.id = 0
- page.meta().checksum = page.meta().sum64()
- nn, err := w.Write(buf)
- n += int64(nn)
- if err != nil {
- return n, fmt.Errorf("meta 0 copy: %s", err)
- }
-
- // Write meta 1 with a lower transaction id.
- page.id = 1
- page.meta().txid -= 1
- page.meta().checksum = page.meta().sum64()
- nn, err = w.Write(buf)
- n += int64(nn)
- if err != nil {
- return n, fmt.Errorf("meta 1 copy: %s", err)
- }
-
- // Move past the meta pages in the file.
- if _, err := f.Seek(int64(tx.db.pageSize*2), io.SeekStart); err != nil {
- return n, fmt.Errorf("seek: %s", err)
- }
-
- // Copy data pages.
- wn, err := io.CopyN(w, f, tx.Size()-int64(tx.db.pageSize*2))
- n += wn
- if err != nil {
- return n, err
- }
-
- return n, nil
-}
-
-// CopyFile copies the entire database to file at the given path.
-// A reader transaction is maintained during the copy so it is safe to continue
-// using the database while a copy is in progress.
-func (tx *Tx) CopyFile(path string, mode os.FileMode) error {
- f, err := tx.db.openFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, mode)
- if err != nil {
- return err
- }
-
- err = tx.Copy(f)
- if err != nil {
- _ = f.Close()
- return err
- }
- return f.Close()
-}
-
-// Check performs several consistency checks on the database for this transaction.
-// An error is returned if any inconsistency is found.
-//
-// It can be safely run concurrently on a writable transaction. However, this
-// incurs a high cost for large databases and databases with a lot of subbuckets
-// because of caching. This overhead can be removed if running on a read-only
-// transaction, however, it is not safe to execute other writer transactions at
-// the same time.
-func (tx *Tx) Check() <-chan error {
- ch := make(chan error)
- go tx.check(ch)
- return ch
-}
-
-func (tx *Tx) check(ch chan error) {
- // Force loading free list if opened in ReadOnly mode.
- tx.db.loadFreelist()
-
- // Check if any pages are double freed.
- freed := make(map[pgid]bool)
- all := make([]pgid, tx.db.freelist.count())
- tx.db.freelist.copyall(all)
- for _, id := range all {
- if freed[id] {
- ch <- fmt.Errorf("page %d: already freed", id)
- }
- freed[id] = true
- }
-
- // Track every reachable page.
- reachable := make(map[pgid]*page)
- reachable[0] = tx.page(0) // meta0
- reachable[1] = tx.page(1) // meta1
- if tx.meta.freelist != pgidNoFreelist {
- for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ {
- reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist)
- }
- }
-
- // Recursively check buckets.
- tx.checkBucket(&tx.root, reachable, freed, ch)
-
- // Ensure all pages below high water mark are either reachable or freed.
- for i := pgid(0); i < tx.meta.pgid; i++ {
- _, isReachable := reachable[i]
- if !isReachable && !freed[i] {
- ch <- fmt.Errorf("page %d: unreachable unfreed", int(i))
- }
- }
-
- // Close the channel to signal completion.
- close(ch)
-}
-
-func (tx *Tx) checkBucket(b *Bucket, reachable map[pgid]*page, freed map[pgid]bool, ch chan error) {
- // Ignore inline buckets.
- if b.root == 0 {
- return
- }
-
- // Check every page used by this bucket.
- b.tx.forEachPage(b.root, 0, func(p *page, _ int) {
- if p.id > tx.meta.pgid {
- ch <- fmt.Errorf("page %d: out of bounds: %d", int(p.id), int(b.tx.meta.pgid))
- }
-
- // Ensure each page is only referenced once.
- for i := pgid(0); i <= pgid(p.overflow); i++ {
- var id = p.id + i
- if _, ok := reachable[id]; ok {
- ch <- fmt.Errorf("page %d: multiple references", int(id))
- }
- reachable[id] = p
- }
-
- // We should only encounter un-freed leaf and branch pages.
- if freed[p.id] {
- ch <- fmt.Errorf("page %d: reachable freed", int(p.id))
- } else if (p.flags&branchPageFlag) == 0 && (p.flags&leafPageFlag) == 0 {
- ch <- fmt.Errorf("page %d: invalid type: %s", int(p.id), p.typ())
- }
- })
-
- // Check each bucket within this bucket.
- _ = b.ForEach(func(k, v []byte) error {
- if child := b.Bucket(k); child != nil {
- tx.checkBucket(child, reachable, freed, ch)
- }
- return nil
- })
-}
-
-// allocate returns a contiguous block of memory starting at a given page.
-func (tx *Tx) allocate(count int) (*page, error) {
- p, err := tx.db.allocate(tx.meta.txid, count)
- if err != nil {
- return nil, err
- }
-
- // Save to our page cache.
- tx.pages[p.id] = p
-
- // Update statistics.
- tx.stats.PageCount += count
- tx.stats.PageAlloc += count * tx.db.pageSize
-
- return p, nil
-}
-
-// write writes any dirty pages to disk.
-func (tx *Tx) write() error {
- // Sort pages by id.
- pages := make(pages, 0, len(tx.pages))
- for _, p := range tx.pages {
- pages = append(pages, p)
- }
- // Clear out page cache early.
- tx.pages = make(map[pgid]*page)
- sort.Sort(pages)
-
- // Write pages to disk in order.
- for _, p := range pages {
- rem := (uint64(p.overflow) + 1) * uint64(tx.db.pageSize)
- offset := int64(p.id) * int64(tx.db.pageSize)
- var written uintptr
-
- // Write out page in "max allocation" sized chunks.
- for {
- sz := rem
- if sz > maxAllocSize-1 {
- sz = maxAllocSize - 1
- }
- buf := unsafeByteSlice(unsafe.Pointer(p), written, 0, int(sz))
-
- if _, err := tx.db.ops.writeAt(buf, offset); err != nil {
- return err
- }
-
- // Update statistics.
- tx.stats.Write++
-
- // Exit inner for loop if we've written all the chunks.
- rem -= sz
- if rem == 0 {
- break
- }
-
- // Otherwise move offset forward and move pointer to next chunk.
- offset += int64(sz)
- written += uintptr(sz)
- }
- }
-
- // Ignore file sync if flag is set on DB.
- if !tx.db.NoSync || IgnoreNoSync {
- if err := fdatasync(tx.db); err != nil {
- return err
- }
- }
-
- // Put small pages back to page pool.
- for _, p := range pages {
- // Ignore page sizes over 1 page.
- // These are allocated using make() instead of the page pool.
- if int(p.overflow) != 0 {
- continue
- }
-
- buf := unsafeByteSlice(unsafe.Pointer(p), 0, 0, tx.db.pageSize)
-
- // See https://go.googlesource.com/go/+/f03c9202c43e0abb130669852082117ca50aa9b1
- for i := range buf {
- buf[i] = 0
- }
- tx.db.pagePool.Put(buf)
- }
-
- return nil
-}
-
-// writeMeta writes the meta to the disk.
-func (tx *Tx) writeMeta() error {
- // Create a temporary buffer for the meta page.
- buf := make([]byte, tx.db.pageSize)
- p := tx.db.pageInBuffer(buf, 0)
- tx.meta.write(p)
-
- // Write the meta page to file.
- if _, err := tx.db.ops.writeAt(buf, int64(p.id)*int64(tx.db.pageSize)); err != nil {
- return err
- }
- if !tx.db.NoSync || IgnoreNoSync {
- if err := fdatasync(tx.db); err != nil {
- return err
- }
- }
-
- // Update statistics.
- tx.stats.Write++
-
- return nil
-}
-
-// page returns a reference to the page with a given id.
-// If page has been written to then a temporary buffered page is returned.
-func (tx *Tx) page(id pgid) *page {
- // Check the dirty pages first.
- if tx.pages != nil {
- if p, ok := tx.pages[id]; ok {
- return p
- }
- }
-
- // Otherwise return directly from the mmap.
- return tx.db.page(id)
-}
-
-// forEachPage iterates over every page within a given page and executes a function.
-func (tx *Tx) forEachPage(pgid pgid, depth int, fn func(*page, int)) {
- p := tx.page(pgid)
-
- // Execute function.
- fn(p, depth)
-
- // Recursively loop over children.
- if (p.flags & branchPageFlag) != 0 {
- for i := 0; i < int(p.count); i++ {
- elem := p.branchPageElement(uint16(i))
- tx.forEachPage(elem.pgid, depth+1, fn)
- }
- }
-}
-
-// Page returns page information for a given page number.
-// This is only safe for concurrent use when used by a writable transaction.
-func (tx *Tx) Page(id int) (*PageInfo, error) {
- if tx.db == nil {
- return nil, ErrTxClosed
- } else if pgid(id) >= tx.meta.pgid {
- return nil, nil
- }
-
- // Build the page info.
- p := tx.db.page(pgid(id))
- info := &PageInfo{
- ID: id,
- Count: int(p.count),
- OverflowCount: int(p.overflow),
- }
-
- // Determine the type (or if it's free).
- if tx.db.freelist.freed(pgid(id)) {
- info.Type = "free"
- } else {
- info.Type = p.typ()
- }
-
- return info, nil
-}
-
-// TxStats represents statistics about the actions performed by the transaction.
-type TxStats struct {
- // Page statistics.
- PageCount int // number of page allocations
- PageAlloc int // total bytes allocated
-
- // Cursor statistics.
- CursorCount int // number of cursors created
-
- // Node statistics
- NodeCount int // number of node allocations
- NodeDeref int // number of node dereferences
-
- // Rebalance statistics.
- Rebalance int // number of node rebalances
- RebalanceTime time.Duration // total time spent rebalancing
-
- // Split/Spill statistics.
- Split int // number of nodes split
- Spill int // number of nodes spilled
- SpillTime time.Duration // total time spent spilling
-
- // Write statistics.
- Write int // number of writes performed
- WriteTime time.Duration // total time spent writing to disk
-}
-
-func (s *TxStats) add(other *TxStats) {
- s.PageCount += other.PageCount
- s.PageAlloc += other.PageAlloc
- s.CursorCount += other.CursorCount
- s.NodeCount += other.NodeCount
- s.NodeDeref += other.NodeDeref
- s.Rebalance += other.Rebalance
- s.RebalanceTime += other.RebalanceTime
- s.Split += other.Split
- s.Spill += other.Spill
- s.SpillTime += other.SpillTime
- s.Write += other.Write
- s.WriteTime += other.WriteTime
-}
-
-// Sub calculates and returns the difference between two sets of transaction stats.
-// This is useful when obtaining stats at two different points and time and
-// you need the performance counters that occurred within that time span.
-func (s *TxStats) Sub(other *TxStats) TxStats {
- var diff TxStats
- diff.PageCount = s.PageCount - other.PageCount
- diff.PageAlloc = s.PageAlloc - other.PageAlloc
- diff.CursorCount = s.CursorCount - other.CursorCount
- diff.NodeCount = s.NodeCount - other.NodeCount
- diff.NodeDeref = s.NodeDeref - other.NodeDeref
- diff.Rebalance = s.Rebalance - other.Rebalance
- diff.RebalanceTime = s.RebalanceTime - other.RebalanceTime
- diff.Split = s.Split - other.Split
- diff.Spill = s.Spill - other.Spill
- diff.SpillTime = s.SpillTime - other.SpillTime
- diff.Write = s.Write - other.Write
- diff.WriteTime = s.WriteTime - other.WriteTime
- return diff
-}
diff --git a/vendor/go.etcd.io/bbolt/unsafe.go b/vendor/go.etcd.io/bbolt/unsafe.go
deleted file mode 100644
index c0e50375..00000000
--- a/vendor/go.etcd.io/bbolt/unsafe.go
+++ /dev/null
@@ -1,39 +0,0 @@
-package bbolt
-
-import (
- "reflect"
- "unsafe"
-)
-
-func unsafeAdd(base unsafe.Pointer, offset uintptr) unsafe.Pointer {
- return unsafe.Pointer(uintptr(base) + offset)
-}
-
-func unsafeIndex(base unsafe.Pointer, offset uintptr, elemsz uintptr, n int) unsafe.Pointer {
- return unsafe.Pointer(uintptr(base) + offset + uintptr(n)*elemsz)
-}
-
-func unsafeByteSlice(base unsafe.Pointer, offset uintptr, i, j int) []byte {
- // See: https://github.com/golang/go/wiki/cgo#turning-c-arrays-into-go-slices
- //
- // This memory is not allocated from C, but it is unmanaged by Go's
- // garbage collector and should behave similarly, and the compiler
- // should produce similar code. Note that this conversion allows a
- // subslice to begin after the base address, with an optional offset,
- // while the URL above does not cover this case and only slices from
- // index 0. However, the wiki never says that the address must be to
- // the beginning of a C allocation (or even that malloc was used at
- // all), so this is believed to be correct.
- return (*[maxAllocSize]byte)(unsafeAdd(base, offset))[i:j:j]
-}
-
-// unsafeSlice modifies the data, len, and cap of a slice variable pointed to by
-// the slice parameter. This helper should be used over other direct
-// manipulation of reflect.SliceHeader to prevent misuse, namely, converting
-// from reflect.SliceHeader to a Go slice type.
-func unsafeSlice(slice, data unsafe.Pointer, len int) {
- s := (*reflect.SliceHeader)(slice)
- s.Data = uintptr(data)
- s.Cap = len
- s.Len = len
-}
diff --git a/vendor/golang.org/x/net/LICENSE b/vendor/golang.org/x/net/LICENSE
deleted file mode 100644
index 6a66aea5..00000000
--- a/vendor/golang.org/x/net/LICENSE
+++ /dev/null
@@ -1,27 +0,0 @@
-Copyright (c) 2009 The Go Authors. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/golang.org/x/net/PATENTS b/vendor/golang.org/x/net/PATENTS
deleted file mode 100644
index 73309904..00000000
--- a/vendor/golang.org/x/net/PATENTS
+++ /dev/null
@@ -1,22 +0,0 @@
-Additional IP Rights Grant (Patents)
-
-"This implementation" means the copyrightable works distributed by
-Google as part of the Go project.
-
-Google hereby grants to You a perpetual, worldwide, non-exclusive,
-no-charge, royalty-free, irrevocable (except as stated in this section)
-patent license to make, have made, use, offer to sell, sell, import,
-transfer and otherwise run, modify and propagate the contents of this
-implementation of Go, where such license applies only to those patent
-claims, both currently owned or controlled by Google and acquired in
-the future, licensable by Google that are necessarily infringed by this
-implementation of Go. This grant does not include claims that would be
-infringed only as a consequence of further modification of this
-implementation. If you or your agent or exclusive licensee institute or
-order or agree to the institution of patent litigation against any
-entity (including a cross-claim or counterclaim in a lawsuit) alleging
-that this implementation of Go or any code incorporated within this
-implementation of Go constitutes direct or contributory patent
-infringement, or inducement of patent infringement, then any patent
-rights granted to you under this License for this implementation of Go
-shall terminate as of the date such litigation is filed.
diff --git a/vendor/golang.org/x/net/internal/timeseries/timeseries.go b/vendor/golang.org/x/net/internal/timeseries/timeseries.go
deleted file mode 100644
index dc5225b6..00000000
--- a/vendor/golang.org/x/net/internal/timeseries/timeseries.go
+++ /dev/null
@@ -1,525 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package timeseries implements a time series structure for stats collection.
-package timeseries // import "golang.org/x/net/internal/timeseries"
-
-import (
- "fmt"
- "log"
- "time"
-)
-
-const (
- timeSeriesNumBuckets = 64
- minuteHourSeriesNumBuckets = 60
-)
-
-var timeSeriesResolutions = []time.Duration{
- 1 * time.Second,
- 10 * time.Second,
- 1 * time.Minute,
- 10 * time.Minute,
- 1 * time.Hour,
- 6 * time.Hour,
- 24 * time.Hour, // 1 day
- 7 * 24 * time.Hour, // 1 week
- 4 * 7 * 24 * time.Hour, // 4 weeks
- 16 * 7 * 24 * time.Hour, // 16 weeks
-}
-
-var minuteHourSeriesResolutions = []time.Duration{
- 1 * time.Second,
- 1 * time.Minute,
-}
-
-// An Observable is a kind of data that can be aggregated in a time series.
-type Observable interface {
- Multiply(ratio float64) // Multiplies the data in self by a given ratio
- Add(other Observable) // Adds the data from a different observation to self
- Clear() // Clears the observation so it can be reused.
- CopyFrom(other Observable) // Copies the contents of a given observation to self
-}
-
-// Float attaches the methods of Observable to a float64.
-type Float float64
-
-// NewFloat returns a Float.
-func NewFloat() Observable {
- f := Float(0)
- return &f
-}
-
-// String returns the float as a string.
-func (f *Float) String() string { return fmt.Sprintf("%g", f.Value()) }
-
-// Value returns the float's value.
-func (f *Float) Value() float64 { return float64(*f) }
-
-func (f *Float) Multiply(ratio float64) { *f *= Float(ratio) }
-
-func (f *Float) Add(other Observable) {
- o := other.(*Float)
- *f += *o
-}
-
-func (f *Float) Clear() { *f = 0 }
-
-func (f *Float) CopyFrom(other Observable) {
- o := other.(*Float)
- *f = *o
-}
-
-// A Clock tells the current time.
-type Clock interface {
- Time() time.Time
-}
-
-type defaultClock int
-
-var defaultClockInstance defaultClock
-
-func (defaultClock) Time() time.Time { return time.Now() }
-
-// Information kept per level. Each level consists of a circular list of
-// observations. The start of the level may be derived from end and the
-// len(buckets) * sizeInMillis.
-type tsLevel struct {
- oldest int // index to oldest bucketed Observable
- newest int // index to newest bucketed Observable
- end time.Time // end timestamp for this level
- size time.Duration // duration of the bucketed Observable
- buckets []Observable // collections of observations
- provider func() Observable // used for creating new Observable
-}
-
-func (l *tsLevel) Clear() {
- l.oldest = 0
- l.newest = len(l.buckets) - 1
- l.end = time.Time{}
- for i := range l.buckets {
- if l.buckets[i] != nil {
- l.buckets[i].Clear()
- l.buckets[i] = nil
- }
- }
-}
-
-func (l *tsLevel) InitLevel(size time.Duration, numBuckets int, f func() Observable) {
- l.size = size
- l.provider = f
- l.buckets = make([]Observable, numBuckets)
-}
-
-// Keeps a sequence of levels. Each level is responsible for storing data at
-// a given resolution. For example, the first level stores data at a one
-// minute resolution while the second level stores data at a one hour
-// resolution.
-
-// Each level is represented by a sequence of buckets. Each bucket spans an
-// interval equal to the resolution of the level. New observations are added
-// to the last bucket.
-type timeSeries struct {
- provider func() Observable // make more Observable
- numBuckets int // number of buckets in each level
- levels []*tsLevel // levels of bucketed Observable
- lastAdd time.Time // time of last Observable tracked
- total Observable // convenient aggregation of all Observable
- clock Clock // Clock for getting current time
- pending Observable // observations not yet bucketed
- pendingTime time.Time // what time are we keeping in pending
- dirty bool // if there are pending observations
-}
-
-// init initializes a level according to the supplied criteria.
-func (ts *timeSeries) init(resolutions []time.Duration, f func() Observable, numBuckets int, clock Clock) {
- ts.provider = f
- ts.numBuckets = numBuckets
- ts.clock = clock
- ts.levels = make([]*tsLevel, len(resolutions))
-
- for i := range resolutions {
- if i > 0 && resolutions[i-1] >= resolutions[i] {
- log.Print("timeseries: resolutions must be monotonically increasing")
- break
- }
- newLevel := new(tsLevel)
- newLevel.InitLevel(resolutions[i], ts.numBuckets, ts.provider)
- ts.levels[i] = newLevel
- }
-
- ts.Clear()
-}
-
-// Clear removes all observations from the time series.
-func (ts *timeSeries) Clear() {
- ts.lastAdd = time.Time{}
- ts.total = ts.resetObservation(ts.total)
- ts.pending = ts.resetObservation(ts.pending)
- ts.pendingTime = time.Time{}
- ts.dirty = false
-
- for i := range ts.levels {
- ts.levels[i].Clear()
- }
-}
-
-// Add records an observation at the current time.
-func (ts *timeSeries) Add(observation Observable) {
- ts.AddWithTime(observation, ts.clock.Time())
-}
-
-// AddWithTime records an observation at the specified time.
-func (ts *timeSeries) AddWithTime(observation Observable, t time.Time) {
-
- smallBucketDuration := ts.levels[0].size
-
- if t.After(ts.lastAdd) {
- ts.lastAdd = t
- }
-
- if t.After(ts.pendingTime) {
- ts.advance(t)
- ts.mergePendingUpdates()
- ts.pendingTime = ts.levels[0].end
- ts.pending.CopyFrom(observation)
- ts.dirty = true
- } else if t.After(ts.pendingTime.Add(-1 * smallBucketDuration)) {
- // The observation is close enough to go into the pending bucket.
- // This compensates for clock skewing and small scheduling delays
- // by letting the update stay in the fast path.
- ts.pending.Add(observation)
- ts.dirty = true
- } else {
- ts.mergeValue(observation, t)
- }
-}
-
-// mergeValue inserts the observation at the specified time in the past into all levels.
-func (ts *timeSeries) mergeValue(observation Observable, t time.Time) {
- for _, level := range ts.levels {
- index := (ts.numBuckets - 1) - int(level.end.Sub(t)/level.size)
- if 0 <= index && index < ts.numBuckets {
- bucketNumber := (level.oldest + index) % ts.numBuckets
- if level.buckets[bucketNumber] == nil {
- level.buckets[bucketNumber] = level.provider()
- }
- level.buckets[bucketNumber].Add(observation)
- }
- }
- ts.total.Add(observation)
-}
-
-// mergePendingUpdates applies the pending updates into all levels.
-func (ts *timeSeries) mergePendingUpdates() {
- if ts.dirty {
- ts.mergeValue(ts.pending, ts.pendingTime)
- ts.pending = ts.resetObservation(ts.pending)
- ts.dirty = false
- }
-}
-
-// advance cycles the buckets at each level until the latest bucket in
-// each level can hold the time specified.
-func (ts *timeSeries) advance(t time.Time) {
- if !t.After(ts.levels[0].end) {
- return
- }
- for i := 0; i < len(ts.levels); i++ {
- level := ts.levels[i]
- if !level.end.Before(t) {
- break
- }
-
- // If the time is sufficiently far, just clear the level and advance
- // directly.
- if !t.Before(level.end.Add(level.size * time.Duration(ts.numBuckets))) {
- for _, b := range level.buckets {
- ts.resetObservation(b)
- }
- level.end = time.Unix(0, (t.UnixNano()/level.size.Nanoseconds())*level.size.Nanoseconds())
- }
-
- for t.After(level.end) {
- level.end = level.end.Add(level.size)
- level.newest = level.oldest
- level.oldest = (level.oldest + 1) % ts.numBuckets
- ts.resetObservation(level.buckets[level.newest])
- }
-
- t = level.end
- }
-}
-
-// Latest returns the sum of the num latest buckets from the level.
-func (ts *timeSeries) Latest(level, num int) Observable {
- now := ts.clock.Time()
- if ts.levels[0].end.Before(now) {
- ts.advance(now)
- }
-
- ts.mergePendingUpdates()
-
- result := ts.provider()
- l := ts.levels[level]
- index := l.newest
-
- for i := 0; i < num; i++ {
- if l.buckets[index] != nil {
- result.Add(l.buckets[index])
- }
- if index == 0 {
- index = ts.numBuckets
- }
- index--
- }
-
- return result
-}
-
-// LatestBuckets returns a copy of the num latest buckets from level.
-func (ts *timeSeries) LatestBuckets(level, num int) []Observable {
- if level < 0 || level > len(ts.levels) {
- log.Print("timeseries: bad level argument: ", level)
- return nil
- }
- if num < 0 || num >= ts.numBuckets {
- log.Print("timeseries: bad num argument: ", num)
- return nil
- }
-
- results := make([]Observable, num)
- now := ts.clock.Time()
- if ts.levels[0].end.Before(now) {
- ts.advance(now)
- }
-
- ts.mergePendingUpdates()
-
- l := ts.levels[level]
- index := l.newest
-
- for i := 0; i < num; i++ {
- result := ts.provider()
- results[i] = result
- if l.buckets[index] != nil {
- result.CopyFrom(l.buckets[index])
- }
-
- if index == 0 {
- index = ts.numBuckets
- }
- index -= 1
- }
- return results
-}
-
-// ScaleBy updates observations by scaling by factor.
-func (ts *timeSeries) ScaleBy(factor float64) {
- for _, l := range ts.levels {
- for i := 0; i < ts.numBuckets; i++ {
- l.buckets[i].Multiply(factor)
- }
- }
-
- ts.total.Multiply(factor)
- ts.pending.Multiply(factor)
-}
-
-// Range returns the sum of observations added over the specified time range.
-// If start or finish times don't fall on bucket boundaries of the same
-// level, then return values are approximate answers.
-func (ts *timeSeries) Range(start, finish time.Time) Observable {
- return ts.ComputeRange(start, finish, 1)[0]
-}
-
-// Recent returns the sum of observations from the last delta.
-func (ts *timeSeries) Recent(delta time.Duration) Observable {
- now := ts.clock.Time()
- return ts.Range(now.Add(-delta), now)
-}
-
-// Total returns the total of all observations.
-func (ts *timeSeries) Total() Observable {
- ts.mergePendingUpdates()
- return ts.total
-}
-
-// ComputeRange computes a specified number of values into a slice using
-// the observations recorded over the specified time period. The return
-// values are approximate if the start or finish times don't fall on the
-// bucket boundaries at the same level or if the number of buckets spanning
-// the range is not an integral multiple of num.
-func (ts *timeSeries) ComputeRange(start, finish time.Time, num int) []Observable {
- if start.After(finish) {
- log.Printf("timeseries: start > finish, %v>%v", start, finish)
- return nil
- }
-
- if num < 0 {
- log.Printf("timeseries: num < 0, %v", num)
- return nil
- }
-
- results := make([]Observable, num)
-
- for _, l := range ts.levels {
- if !start.Before(l.end.Add(-l.size * time.Duration(ts.numBuckets))) {
- ts.extract(l, start, finish, num, results)
- return results
- }
- }
-
- // Failed to find a level that covers the desired range. So just
- // extract from the last level, even if it doesn't cover the entire
- // desired range.
- ts.extract(ts.levels[len(ts.levels)-1], start, finish, num, results)
-
- return results
-}
-
-// RecentList returns the specified number of values in slice over the most
-// recent time period of the specified range.
-func (ts *timeSeries) RecentList(delta time.Duration, num int) []Observable {
- if delta < 0 {
- return nil
- }
- now := ts.clock.Time()
- return ts.ComputeRange(now.Add(-delta), now, num)
-}
-
-// extract returns a slice of specified number of observations from a given
-// level over a given range.
-func (ts *timeSeries) extract(l *tsLevel, start, finish time.Time, num int, results []Observable) {
- ts.mergePendingUpdates()
-
- srcInterval := l.size
- dstInterval := finish.Sub(start) / time.Duration(num)
- dstStart := start
- srcStart := l.end.Add(-srcInterval * time.Duration(ts.numBuckets))
-
- srcIndex := 0
-
- // Where should scanning start?
- if dstStart.After(srcStart) {
- advance := int(dstStart.Sub(srcStart) / srcInterval)
- srcIndex += advance
- srcStart = srcStart.Add(time.Duration(advance) * srcInterval)
- }
-
- // The i'th value is computed as show below.
- // interval = (finish/start)/num
- // i'th value = sum of observation in range
- // [ start + i * interval,
- // start + (i + 1) * interval )
- for i := 0; i < num; i++ {
- results[i] = ts.resetObservation(results[i])
- dstEnd := dstStart.Add(dstInterval)
- for srcIndex < ts.numBuckets && srcStart.Before(dstEnd) {
- srcEnd := srcStart.Add(srcInterval)
- if srcEnd.After(ts.lastAdd) {
- srcEnd = ts.lastAdd
- }
-
- if !srcEnd.Before(dstStart) {
- srcValue := l.buckets[(srcIndex+l.oldest)%ts.numBuckets]
- if !srcStart.Before(dstStart) && !srcEnd.After(dstEnd) {
- // dst completely contains src.
- if srcValue != nil {
- results[i].Add(srcValue)
- }
- } else {
- // dst partially overlaps src.
- overlapStart := maxTime(srcStart, dstStart)
- overlapEnd := minTime(srcEnd, dstEnd)
- base := srcEnd.Sub(srcStart)
- fraction := overlapEnd.Sub(overlapStart).Seconds() / base.Seconds()
-
- used := ts.provider()
- if srcValue != nil {
- used.CopyFrom(srcValue)
- }
- used.Multiply(fraction)
- results[i].Add(used)
- }
-
- if srcEnd.After(dstEnd) {
- break
- }
- }
- srcIndex++
- srcStart = srcStart.Add(srcInterval)
- }
- dstStart = dstStart.Add(dstInterval)
- }
-}
-
-// resetObservation clears the content so the struct may be reused.
-func (ts *timeSeries) resetObservation(observation Observable) Observable {
- if observation == nil {
- observation = ts.provider()
- } else {
- observation.Clear()
- }
- return observation
-}
-
-// TimeSeries tracks data at granularities from 1 second to 16 weeks.
-type TimeSeries struct {
- timeSeries
-}
-
-// NewTimeSeries creates a new TimeSeries using the function provided for creating new Observable.
-func NewTimeSeries(f func() Observable) *TimeSeries {
- return NewTimeSeriesWithClock(f, defaultClockInstance)
-}
-
-// NewTimeSeriesWithClock creates a new TimeSeries using the function provided for creating new Observable and the clock for
-// assigning timestamps.
-func NewTimeSeriesWithClock(f func() Observable, clock Clock) *TimeSeries {
- ts := new(TimeSeries)
- ts.timeSeries.init(timeSeriesResolutions, f, timeSeriesNumBuckets, clock)
- return ts
-}
-
-// MinuteHourSeries tracks data at granularities of 1 minute and 1 hour.
-type MinuteHourSeries struct {
- timeSeries
-}
-
-// NewMinuteHourSeries creates a new MinuteHourSeries using the function provided for creating new Observable.
-func NewMinuteHourSeries(f func() Observable) *MinuteHourSeries {
- return NewMinuteHourSeriesWithClock(f, defaultClockInstance)
-}
-
-// NewMinuteHourSeriesWithClock creates a new MinuteHourSeries using the function provided for creating new Observable and the clock for
-// assigning timestamps.
-func NewMinuteHourSeriesWithClock(f func() Observable, clock Clock) *MinuteHourSeries {
- ts := new(MinuteHourSeries)
- ts.timeSeries.init(minuteHourSeriesResolutions, f,
- minuteHourSeriesNumBuckets, clock)
- return ts
-}
-
-func (ts *MinuteHourSeries) Minute() Observable {
- return ts.timeSeries.Latest(0, 60)
-}
-
-func (ts *MinuteHourSeries) Hour() Observable {
- return ts.timeSeries.Latest(1, 60)
-}
-
-func minTime(a, b time.Time) time.Time {
- if a.Before(b) {
- return a
- }
- return b
-}
-
-func maxTime(a, b time.Time) time.Time {
- if a.After(b) {
- return a
- }
- return b
-}
diff --git a/vendor/golang.org/x/net/trace/events.go b/vendor/golang.org/x/net/trace/events.go
deleted file mode 100644
index c646a695..00000000
--- a/vendor/golang.org/x/net/trace/events.go
+++ /dev/null
@@ -1,532 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package trace
-
-import (
- "bytes"
- "fmt"
- "html/template"
- "io"
- "log"
- "net/http"
- "runtime"
- "sort"
- "strconv"
- "strings"
- "sync"
- "sync/atomic"
- "text/tabwriter"
- "time"
-)
-
-const maxEventsPerLog = 100
-
-type bucket struct {
- MaxErrAge time.Duration
- String string
-}
-
-var buckets = []bucket{
- {0, "total"},
- {10 * time.Second, "errs<10s"},
- {1 * time.Minute, "errs<1m"},
- {10 * time.Minute, "errs<10m"},
- {1 * time.Hour, "errs<1h"},
- {10 * time.Hour, "errs<10h"},
- {24000 * time.Hour, "errors"},
-}
-
-// RenderEvents renders the HTML page typically served at /debug/events.
-// It does not do any auth checking. The request may be nil.
-//
-// Most users will use the Events handler.
-func RenderEvents(w http.ResponseWriter, req *http.Request, sensitive bool) {
- now := time.Now()
- data := &struct {
- Families []string // family names
- Buckets []bucket
- Counts [][]int // eventLog count per family/bucket
-
- // Set when a bucket has been selected.
- Family string
- Bucket int
- EventLogs eventLogs
- Expanded bool
- }{
- Buckets: buckets,
- }
-
- data.Families = make([]string, 0, len(families))
- famMu.RLock()
- for name := range families {
- data.Families = append(data.Families, name)
- }
- famMu.RUnlock()
- sort.Strings(data.Families)
-
- // Count the number of eventLogs in each family for each error age.
- data.Counts = make([][]int, len(data.Families))
- for i, name := range data.Families {
- // TODO(sameer): move this loop under the family lock.
- f := getEventFamily(name)
- data.Counts[i] = make([]int, len(data.Buckets))
- for j, b := range data.Buckets {
- data.Counts[i][j] = f.Count(now, b.MaxErrAge)
- }
- }
-
- if req != nil {
- var ok bool
- data.Family, data.Bucket, ok = parseEventsArgs(req)
- if !ok {
- // No-op
- } else {
- data.EventLogs = getEventFamily(data.Family).Copy(now, buckets[data.Bucket].MaxErrAge)
- }
- if data.EventLogs != nil {
- defer data.EventLogs.Free()
- sort.Sort(data.EventLogs)
- }
- if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil {
- data.Expanded = exp
- }
- }
-
- famMu.RLock()
- defer famMu.RUnlock()
- if err := eventsTmpl().Execute(w, data); err != nil {
- log.Printf("net/trace: Failed executing template: %v", err)
- }
-}
-
-func parseEventsArgs(req *http.Request) (fam string, b int, ok bool) {
- fam, bStr := req.FormValue("fam"), req.FormValue("b")
- if fam == "" || bStr == "" {
- return "", 0, false
- }
- b, err := strconv.Atoi(bStr)
- if err != nil || b < 0 || b >= len(buckets) {
- return "", 0, false
- }
- return fam, b, true
-}
-
-// An EventLog provides a log of events associated with a specific object.
-type EventLog interface {
- // Printf formats its arguments with fmt.Sprintf and adds the
- // result to the event log.
- Printf(format string, a ...interface{})
-
- // Errorf is like Printf, but it marks this event as an error.
- Errorf(format string, a ...interface{})
-
- // Finish declares that this event log is complete.
- // The event log should not be used after calling this method.
- Finish()
-}
-
-// NewEventLog returns a new EventLog with the specified family name
-// and title.
-func NewEventLog(family, title string) EventLog {
- el := newEventLog()
- el.ref()
- el.Family, el.Title = family, title
- el.Start = time.Now()
- el.events = make([]logEntry, 0, maxEventsPerLog)
- el.stack = make([]uintptr, 32)
- n := runtime.Callers(2, el.stack)
- el.stack = el.stack[:n]
-
- getEventFamily(family).add(el)
- return el
-}
-
-func (el *eventLog) Finish() {
- getEventFamily(el.Family).remove(el)
- el.unref() // matches ref in New
-}
-
-var (
- famMu sync.RWMutex
- families = make(map[string]*eventFamily) // family name => family
-)
-
-func getEventFamily(fam string) *eventFamily {
- famMu.Lock()
- defer famMu.Unlock()
- f := families[fam]
- if f == nil {
- f = &eventFamily{}
- families[fam] = f
- }
- return f
-}
-
-type eventFamily struct {
- mu sync.RWMutex
- eventLogs eventLogs
-}
-
-func (f *eventFamily) add(el *eventLog) {
- f.mu.Lock()
- f.eventLogs = append(f.eventLogs, el)
- f.mu.Unlock()
-}
-
-func (f *eventFamily) remove(el *eventLog) {
- f.mu.Lock()
- defer f.mu.Unlock()
- for i, el0 := range f.eventLogs {
- if el == el0 {
- copy(f.eventLogs[i:], f.eventLogs[i+1:])
- f.eventLogs = f.eventLogs[:len(f.eventLogs)-1]
- return
- }
- }
-}
-
-func (f *eventFamily) Count(now time.Time, maxErrAge time.Duration) (n int) {
- f.mu.RLock()
- defer f.mu.RUnlock()
- for _, el := range f.eventLogs {
- if el.hasRecentError(now, maxErrAge) {
- n++
- }
- }
- return
-}
-
-func (f *eventFamily) Copy(now time.Time, maxErrAge time.Duration) (els eventLogs) {
- f.mu.RLock()
- defer f.mu.RUnlock()
- els = make(eventLogs, 0, len(f.eventLogs))
- for _, el := range f.eventLogs {
- if el.hasRecentError(now, maxErrAge) {
- el.ref()
- els = append(els, el)
- }
- }
- return
-}
-
-type eventLogs []*eventLog
-
-// Free calls unref on each element of the list.
-func (els eventLogs) Free() {
- for _, el := range els {
- el.unref()
- }
-}
-
-// eventLogs may be sorted in reverse chronological order.
-func (els eventLogs) Len() int { return len(els) }
-func (els eventLogs) Less(i, j int) bool { return els[i].Start.After(els[j].Start) }
-func (els eventLogs) Swap(i, j int) { els[i], els[j] = els[j], els[i] }
-
-// A logEntry is a timestamped log entry in an event log.
-type logEntry struct {
- When time.Time
- Elapsed time.Duration // since previous event in log
- NewDay bool // whether this event is on a different day to the previous event
- What string
- IsErr bool
-}
-
-// WhenString returns a string representation of the elapsed time of the event.
-// It will include the date if midnight was crossed.
-func (e logEntry) WhenString() string {
- if e.NewDay {
- return e.When.Format("2006/01/02 15:04:05.000000")
- }
- return e.When.Format("15:04:05.000000")
-}
-
-// An eventLog represents an active event log.
-type eventLog struct {
- // Family is the top-level grouping of event logs to which this belongs.
- Family string
-
- // Title is the title of this event log.
- Title string
-
- // Timing information.
- Start time.Time
-
- // Call stack where this event log was created.
- stack []uintptr
-
- // Append-only sequence of events.
- //
- // TODO(sameer): change this to a ring buffer to avoid the array copy
- // when we hit maxEventsPerLog.
- mu sync.RWMutex
- events []logEntry
- LastErrorTime time.Time
- discarded int
-
- refs int32 // how many buckets this is in
-}
-
-func (el *eventLog) reset() {
- // Clear all but the mutex. Mutexes may not be copied, even when unlocked.
- el.Family = ""
- el.Title = ""
- el.Start = time.Time{}
- el.stack = nil
- el.events = nil
- el.LastErrorTime = time.Time{}
- el.discarded = 0
- el.refs = 0
-}
-
-func (el *eventLog) hasRecentError(now time.Time, maxErrAge time.Duration) bool {
- if maxErrAge == 0 {
- return true
- }
- el.mu.RLock()
- defer el.mu.RUnlock()
- return now.Sub(el.LastErrorTime) < maxErrAge
-}
-
-// delta returns the elapsed time since the last event or the log start,
-// and whether it spans midnight.
-// L >= el.mu
-func (el *eventLog) delta(t time.Time) (time.Duration, bool) {
- if len(el.events) == 0 {
- return t.Sub(el.Start), false
- }
- prev := el.events[len(el.events)-1].When
- return t.Sub(prev), prev.Day() != t.Day()
-
-}
-
-func (el *eventLog) Printf(format string, a ...interface{}) {
- el.printf(false, format, a...)
-}
-
-func (el *eventLog) Errorf(format string, a ...interface{}) {
- el.printf(true, format, a...)
-}
-
-func (el *eventLog) printf(isErr bool, format string, a ...interface{}) {
- e := logEntry{When: time.Now(), IsErr: isErr, What: fmt.Sprintf(format, a...)}
- el.mu.Lock()
- e.Elapsed, e.NewDay = el.delta(e.When)
- if len(el.events) < maxEventsPerLog {
- el.events = append(el.events, e)
- } else {
- // Discard the oldest event.
- if el.discarded == 0 {
- // el.discarded starts at two to count for the event it
- // is replacing, plus the next one that we are about to
- // drop.
- el.discarded = 2
- } else {
- el.discarded++
- }
- // TODO(sameer): if this causes allocations on a critical path,
- // change eventLog.What to be a fmt.Stringer, as in trace.go.
- el.events[0].What = fmt.Sprintf("(%d events discarded)", el.discarded)
- // The timestamp of the discarded meta-event should be
- // the time of the last event it is representing.
- el.events[0].When = el.events[1].When
- copy(el.events[1:], el.events[2:])
- el.events[maxEventsPerLog-1] = e
- }
- if e.IsErr {
- el.LastErrorTime = e.When
- }
- el.mu.Unlock()
-}
-
-func (el *eventLog) ref() {
- atomic.AddInt32(&el.refs, 1)
-}
-
-func (el *eventLog) unref() {
- if atomic.AddInt32(&el.refs, -1) == 0 {
- freeEventLog(el)
- }
-}
-
-func (el *eventLog) When() string {
- return el.Start.Format("2006/01/02 15:04:05.000000")
-}
-
-func (el *eventLog) ElapsedTime() string {
- elapsed := time.Since(el.Start)
- return fmt.Sprintf("%.6f", elapsed.Seconds())
-}
-
-func (el *eventLog) Stack() string {
- buf := new(bytes.Buffer)
- tw := tabwriter.NewWriter(buf, 1, 8, 1, '\t', 0)
- printStackRecord(tw, el.stack)
- tw.Flush()
- return buf.String()
-}
-
-// printStackRecord prints the function + source line information
-// for a single stack trace.
-// Adapted from runtime/pprof/pprof.go.
-func printStackRecord(w io.Writer, stk []uintptr) {
- for _, pc := range stk {
- f := runtime.FuncForPC(pc)
- if f == nil {
- continue
- }
- file, line := f.FileLine(pc)
- name := f.Name()
- // Hide runtime.goexit and any runtime functions at the beginning.
- if strings.HasPrefix(name, "runtime.") {
- continue
- }
- fmt.Fprintf(w, "# %s\t%s:%d\n", name, file, line)
- }
-}
-
-func (el *eventLog) Events() []logEntry {
- el.mu.RLock()
- defer el.mu.RUnlock()
- return el.events
-}
-
-// freeEventLogs is a freelist of *eventLog
-var freeEventLogs = make(chan *eventLog, 1000)
-
-// newEventLog returns a event log ready to use.
-func newEventLog() *eventLog {
- select {
- case el := <-freeEventLogs:
- return el
- default:
- return new(eventLog)
- }
-}
-
-// freeEventLog adds el to freeEventLogs if there's room.
-// This is non-blocking.
-func freeEventLog(el *eventLog) {
- el.reset()
- select {
- case freeEventLogs <- el:
- default:
- }
-}
-
-var eventsTmplCache *template.Template
-var eventsTmplOnce sync.Once
-
-func eventsTmpl() *template.Template {
- eventsTmplOnce.Do(func() {
- eventsTmplCache = template.Must(template.New("events").Funcs(template.FuncMap{
- "elapsed": elapsed,
- "trimSpace": strings.TrimSpace,
- }).Parse(eventsHTML))
- })
- return eventsTmplCache
-}
-
-const eventsHTML = `
-
-
- events
-
-
-
-
-/debug/events
-
-
-
-{{if $.EventLogs}}
-
-Family: {{$.Family}}
-
-{{if $.Expanded}}{{end}}
-[Summary]{{if $.Expanded}} {{end}}
-
-{{if not $.Expanded}}{{end}}
-[Expanded]{{if not $.Expanded}} {{end}}
-
-
- When Elapsed
- {{range $el := $.EventLogs}}
-
- {{$el.When}}
- {{$el.ElapsedTime}}
- {{$el.Title}}
-
- {{if $.Expanded}}
-
-
-
- {{$el.Stack|trimSpace}}
-
- {{range $el.Events}}
-
- {{.WhenString}}
- {{elapsed .Elapsed}}
- .{{if .IsErr}}E{{else}}.{{end}}. {{.What}}
-
- {{end}}
- {{end}}
- {{end}}
-
-{{end}}
-
-
-`
diff --git a/vendor/golang.org/x/net/trace/histogram.go b/vendor/golang.org/x/net/trace/histogram.go
deleted file mode 100644
index d6c71101..00000000
--- a/vendor/golang.org/x/net/trace/histogram.go
+++ /dev/null
@@ -1,365 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package trace
-
-// This file implements histogramming for RPC statistics collection.
-
-import (
- "bytes"
- "fmt"
- "html/template"
- "log"
- "math"
- "sync"
-
- "golang.org/x/net/internal/timeseries"
-)
-
-const (
- bucketCount = 38
-)
-
-// histogram keeps counts of values in buckets that are spaced
-// out in powers of 2: 0-1, 2-3, 4-7...
-// histogram implements timeseries.Observable
-type histogram struct {
- sum int64 // running total of measurements
- sumOfSquares float64 // square of running total
- buckets []int64 // bucketed values for histogram
- value int // holds a single value as an optimization
- valueCount int64 // number of values recorded for single value
-}
-
-// addMeasurement records a value measurement observation to the histogram.
-func (h *histogram) addMeasurement(value int64) {
- // TODO: assert invariant
- h.sum += value
- h.sumOfSquares += float64(value) * float64(value)
-
- bucketIndex := getBucket(value)
-
- if h.valueCount == 0 || (h.valueCount > 0 && h.value == bucketIndex) {
- h.value = bucketIndex
- h.valueCount++
- } else {
- h.allocateBuckets()
- h.buckets[bucketIndex]++
- }
-}
-
-func (h *histogram) allocateBuckets() {
- if h.buckets == nil {
- h.buckets = make([]int64, bucketCount)
- h.buckets[h.value] = h.valueCount
- h.value = 0
- h.valueCount = -1
- }
-}
-
-func log2(i int64) int {
- n := 0
- for ; i >= 0x100; i >>= 8 {
- n += 8
- }
- for ; i > 0; i >>= 1 {
- n += 1
- }
- return n
-}
-
-func getBucket(i int64) (index int) {
- index = log2(i) - 1
- if index < 0 {
- index = 0
- }
- if index >= bucketCount {
- index = bucketCount - 1
- }
- return
-}
-
-// Total returns the number of recorded observations.
-func (h *histogram) total() (total int64) {
- if h.valueCount >= 0 {
- total = h.valueCount
- }
- for _, val := range h.buckets {
- total += int64(val)
- }
- return
-}
-
-// Average returns the average value of recorded observations.
-func (h *histogram) average() float64 {
- t := h.total()
- if t == 0 {
- return 0
- }
- return float64(h.sum) / float64(t)
-}
-
-// Variance returns the variance of recorded observations.
-func (h *histogram) variance() float64 {
- t := float64(h.total())
- if t == 0 {
- return 0
- }
- s := float64(h.sum) / t
- return h.sumOfSquares/t - s*s
-}
-
-// StandardDeviation returns the standard deviation of recorded observations.
-func (h *histogram) standardDeviation() float64 {
- return math.Sqrt(h.variance())
-}
-
-// PercentileBoundary estimates the value that the given fraction of recorded
-// observations are less than.
-func (h *histogram) percentileBoundary(percentile float64) int64 {
- total := h.total()
-
- // Corner cases (make sure result is strictly less than Total())
- if total == 0 {
- return 0
- } else if total == 1 {
- return int64(h.average())
- }
-
- percentOfTotal := round(float64(total) * percentile)
- var runningTotal int64
-
- for i := range h.buckets {
- value := h.buckets[i]
- runningTotal += value
- if runningTotal == percentOfTotal {
- // We hit an exact bucket boundary. If the next bucket has data, it is a
- // good estimate of the value. If the bucket is empty, we interpolate the
- // midpoint between the next bucket's boundary and the next non-zero
- // bucket. If the remaining buckets are all empty, then we use the
- // boundary for the next bucket as the estimate.
- j := uint8(i + 1)
- min := bucketBoundary(j)
- if runningTotal < total {
- for h.buckets[j] == 0 {
- j++
- }
- }
- max := bucketBoundary(j)
- return min + round(float64(max-min)/2)
- } else if runningTotal > percentOfTotal {
- // The value is in this bucket. Interpolate the value.
- delta := runningTotal - percentOfTotal
- percentBucket := float64(value-delta) / float64(value)
- bucketMin := bucketBoundary(uint8(i))
- nextBucketMin := bucketBoundary(uint8(i + 1))
- bucketSize := nextBucketMin - bucketMin
- return bucketMin + round(percentBucket*float64(bucketSize))
- }
- }
- return bucketBoundary(bucketCount - 1)
-}
-
-// Median returns the estimated median of the observed values.
-func (h *histogram) median() int64 {
- return h.percentileBoundary(0.5)
-}
-
-// Add adds other to h.
-func (h *histogram) Add(other timeseries.Observable) {
- o := other.(*histogram)
- if o.valueCount == 0 {
- // Other histogram is empty
- } else if h.valueCount >= 0 && o.valueCount > 0 && h.value == o.value {
- // Both have a single bucketed value, aggregate them
- h.valueCount += o.valueCount
- } else {
- // Two different values necessitate buckets in this histogram
- h.allocateBuckets()
- if o.valueCount >= 0 {
- h.buckets[o.value] += o.valueCount
- } else {
- for i := range h.buckets {
- h.buckets[i] += o.buckets[i]
- }
- }
- }
- h.sumOfSquares += o.sumOfSquares
- h.sum += o.sum
-}
-
-// Clear resets the histogram to an empty state, removing all observed values.
-func (h *histogram) Clear() {
- h.buckets = nil
- h.value = 0
- h.valueCount = 0
- h.sum = 0
- h.sumOfSquares = 0
-}
-
-// CopyFrom copies from other, which must be a *histogram, into h.
-func (h *histogram) CopyFrom(other timeseries.Observable) {
- o := other.(*histogram)
- if o.valueCount == -1 {
- h.allocateBuckets()
- copy(h.buckets, o.buckets)
- }
- h.sum = o.sum
- h.sumOfSquares = o.sumOfSquares
- h.value = o.value
- h.valueCount = o.valueCount
-}
-
-// Multiply scales the histogram by the specified ratio.
-func (h *histogram) Multiply(ratio float64) {
- if h.valueCount == -1 {
- for i := range h.buckets {
- h.buckets[i] = int64(float64(h.buckets[i]) * ratio)
- }
- } else {
- h.valueCount = int64(float64(h.valueCount) * ratio)
- }
- h.sum = int64(float64(h.sum) * ratio)
- h.sumOfSquares = h.sumOfSquares * ratio
-}
-
-// New creates a new histogram.
-func (h *histogram) New() timeseries.Observable {
- r := new(histogram)
- r.Clear()
- return r
-}
-
-func (h *histogram) String() string {
- return fmt.Sprintf("%d, %f, %d, %d, %v",
- h.sum, h.sumOfSquares, h.value, h.valueCount, h.buckets)
-}
-
-// round returns the closest int64 to the argument
-func round(in float64) int64 {
- return int64(math.Floor(in + 0.5))
-}
-
-// bucketBoundary returns the first value in the bucket.
-func bucketBoundary(bucket uint8) int64 {
- if bucket == 0 {
- return 0
- }
- return 1 << bucket
-}
-
-// bucketData holds data about a specific bucket for use in distTmpl.
-type bucketData struct {
- Lower, Upper int64
- N int64
- Pct, CumulativePct float64
- GraphWidth int
-}
-
-// data holds data about a Distribution for use in distTmpl.
-type data struct {
- Buckets []*bucketData
- Count, Median int64
- Mean, StandardDeviation float64
-}
-
-// maxHTMLBarWidth is the maximum width of the HTML bar for visualizing buckets.
-const maxHTMLBarWidth = 350.0
-
-// newData returns data representing h for use in distTmpl.
-func (h *histogram) newData() *data {
- // Force the allocation of buckets to simplify the rendering implementation
- h.allocateBuckets()
- // We scale the bars on the right so that the largest bar is
- // maxHTMLBarWidth pixels in width.
- maxBucket := int64(0)
- for _, n := range h.buckets {
- if n > maxBucket {
- maxBucket = n
- }
- }
- total := h.total()
- barsizeMult := maxHTMLBarWidth / float64(maxBucket)
- var pctMult float64
- if total == 0 {
- pctMult = 1.0
- } else {
- pctMult = 100.0 / float64(total)
- }
-
- buckets := make([]*bucketData, len(h.buckets))
- runningTotal := int64(0)
- for i, n := range h.buckets {
- if n == 0 {
- continue
- }
- runningTotal += n
- var upperBound int64
- if i < bucketCount-1 {
- upperBound = bucketBoundary(uint8(i + 1))
- } else {
- upperBound = math.MaxInt64
- }
- buckets[i] = &bucketData{
- Lower: bucketBoundary(uint8(i)),
- Upper: upperBound,
- N: n,
- Pct: float64(n) * pctMult,
- CumulativePct: float64(runningTotal) * pctMult,
- GraphWidth: int(float64(n) * barsizeMult),
- }
- }
- return &data{
- Buckets: buckets,
- Count: total,
- Median: h.median(),
- Mean: h.average(),
- StandardDeviation: h.standardDeviation(),
- }
-}
-
-func (h *histogram) html() template.HTML {
- buf := new(bytes.Buffer)
- if err := distTmpl().Execute(buf, h.newData()); err != nil {
- buf.Reset()
- log.Printf("net/trace: couldn't execute template: %v", err)
- }
- return template.HTML(buf.String())
-}
-
-var distTmplCache *template.Template
-var distTmplOnce sync.Once
-
-func distTmpl() *template.Template {
- distTmplOnce.Do(func() {
- // Input: data
- distTmplCache = template.Must(template.New("distTmpl").Parse(`
-
-
- Count: {{.Count}}
- Mean: {{printf "%.0f" .Mean}}
- StdDev: {{printf "%.0f" .StandardDeviation}}
- Median: {{.Median}}
-
-
-
-
-{{range $b := .Buckets}}
-{{if $b}}
-
- [
- {{.Lower}},
- {{.Upper}})
- {{.N}}
- {{printf "%#.3f" .Pct}}%
- {{printf "%#.3f" .CumulativePct}}%
-
-
-{{end}}
-{{end}}
-
-`))
- })
- return distTmplCache
-}
diff --git a/vendor/golang.org/x/net/trace/trace.go b/vendor/golang.org/x/net/trace/trace.go
deleted file mode 100644
index eae2a99f..00000000
--- a/vendor/golang.org/x/net/trace/trace.go
+++ /dev/null
@@ -1,1130 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-/*
-Package trace implements tracing of requests and long-lived objects.
-It exports HTTP interfaces on /debug/requests and /debug/events.
-
-A trace.Trace provides tracing for short-lived objects, usually requests.
-A request handler might be implemented like this:
-
- func fooHandler(w http.ResponseWriter, req *http.Request) {
- tr := trace.New("mypkg.Foo", req.URL.Path)
- defer tr.Finish()
- ...
- tr.LazyPrintf("some event %q happened", str)
- ...
- if err := somethingImportant(); err != nil {
- tr.LazyPrintf("somethingImportant failed: %v", err)
- tr.SetError()
- }
- }
-
-The /debug/requests HTTP endpoint organizes the traces by family,
-errors, and duration. It also provides histogram of request duration
-for each family.
-
-A trace.EventLog provides tracing for long-lived objects, such as RPC
-connections.
-
- // A Fetcher fetches URL paths for a single domain.
- type Fetcher struct {
- domain string
- events trace.EventLog
- }
-
- func NewFetcher(domain string) *Fetcher {
- return &Fetcher{
- domain,
- trace.NewEventLog("mypkg.Fetcher", domain),
- }
- }
-
- func (f *Fetcher) Fetch(path string) (string, error) {
- resp, err := http.Get("http://" + f.domain + "/" + path)
- if err != nil {
- f.events.Errorf("Get(%q) = %v", path, err)
- return "", err
- }
- f.events.Printf("Get(%q) = %s", path, resp.Status)
- ...
- }
-
- func (f *Fetcher) Close() error {
- f.events.Finish()
- return nil
- }
-
-The /debug/events HTTP endpoint organizes the event logs by family and
-by time since the last error. The expanded view displays recent log
-entries and the log's call stack.
-*/
-package trace // import "golang.org/x/net/trace"
-
-import (
- "bytes"
- "context"
- "fmt"
- "html/template"
- "io"
- "log"
- "net"
- "net/http"
- "net/url"
- "runtime"
- "sort"
- "strconv"
- "sync"
- "sync/atomic"
- "time"
-
- "golang.org/x/net/internal/timeseries"
-)
-
-// DebugUseAfterFinish controls whether to debug uses of Trace values after finishing.
-// FOR DEBUGGING ONLY. This will slow down the program.
-var DebugUseAfterFinish = false
-
-// HTTP ServeMux paths.
-const (
- debugRequestsPath = "/debug/requests"
- debugEventsPath = "/debug/events"
-)
-
-// AuthRequest determines whether a specific request is permitted to load the
-// /debug/requests or /debug/events pages.
-//
-// It returns two bools; the first indicates whether the page may be viewed at all,
-// and the second indicates whether sensitive events will be shown.
-//
-// AuthRequest may be replaced by a program to customize its authorization requirements.
-//
-// The default AuthRequest function returns (true, true) if and only if the request
-// comes from localhost/127.0.0.1/[::1].
-var AuthRequest = func(req *http.Request) (any, sensitive bool) {
- // RemoteAddr is commonly in the form "IP" or "IP:port".
- // If it is in the form "IP:port", split off the port.
- host, _, err := net.SplitHostPort(req.RemoteAddr)
- if err != nil {
- host = req.RemoteAddr
- }
- switch host {
- case "localhost", "127.0.0.1", "::1":
- return true, true
- default:
- return false, false
- }
-}
-
-func init() {
- _, pat := http.DefaultServeMux.Handler(&http.Request{URL: &url.URL{Path: debugRequestsPath}})
- if pat == debugRequestsPath {
- panic("/debug/requests is already registered. You may have two independent copies of " +
- "golang.org/x/net/trace in your binary, trying to maintain separate state. This may " +
- "involve a vendored copy of golang.org/x/net/trace.")
- }
-
- // TODO(jbd): Serve Traces from /debug/traces in the future?
- // There is no requirement for a request to be present to have traces.
- http.HandleFunc(debugRequestsPath, Traces)
- http.HandleFunc(debugEventsPath, Events)
-}
-
-// NewContext returns a copy of the parent context
-// and associates it with a Trace.
-func NewContext(ctx context.Context, tr Trace) context.Context {
- return context.WithValue(ctx, contextKey, tr)
-}
-
-// FromContext returns the Trace bound to the context, if any.
-func FromContext(ctx context.Context) (tr Trace, ok bool) {
- tr, ok = ctx.Value(contextKey).(Trace)
- return
-}
-
-// Traces responds with traces from the program.
-// The package initialization registers it in http.DefaultServeMux
-// at /debug/requests.
-//
-// It performs authorization by running AuthRequest.
-func Traces(w http.ResponseWriter, req *http.Request) {
- any, sensitive := AuthRequest(req)
- if !any {
- http.Error(w, "not allowed", http.StatusUnauthorized)
- return
- }
- w.Header().Set("Content-Type", "text/html; charset=utf-8")
- Render(w, req, sensitive)
-}
-
-// Events responds with a page of events collected by EventLogs.
-// The package initialization registers it in http.DefaultServeMux
-// at /debug/events.
-//
-// It performs authorization by running AuthRequest.
-func Events(w http.ResponseWriter, req *http.Request) {
- any, sensitive := AuthRequest(req)
- if !any {
- http.Error(w, "not allowed", http.StatusUnauthorized)
- return
- }
- w.Header().Set("Content-Type", "text/html; charset=utf-8")
- RenderEvents(w, req, sensitive)
-}
-
-// Render renders the HTML page typically served at /debug/requests.
-// It does not do any auth checking. The request may be nil.
-//
-// Most users will use the Traces handler.
-func Render(w io.Writer, req *http.Request, sensitive bool) {
- data := &struct {
- Families []string
- ActiveTraceCount map[string]int
- CompletedTraces map[string]*family
-
- // Set when a bucket has been selected.
- Traces traceList
- Family string
- Bucket int
- Expanded bool
- Traced bool
- Active bool
- ShowSensitive bool // whether to show sensitive events
-
- Histogram template.HTML
- HistogramWindow string // e.g. "last minute", "last hour", "all time"
-
- // If non-zero, the set of traces is a partial set,
- // and this is the total number.
- Total int
- }{
- CompletedTraces: completedTraces,
- }
-
- data.ShowSensitive = sensitive
- if req != nil {
- // Allow show_sensitive=0 to force hiding of sensitive data for testing.
- // This only goes one way; you can't use show_sensitive=1 to see things.
- if req.FormValue("show_sensitive") == "0" {
- data.ShowSensitive = false
- }
-
- if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil {
- data.Expanded = exp
- }
- if exp, err := strconv.ParseBool(req.FormValue("rtraced")); err == nil {
- data.Traced = exp
- }
- }
-
- completedMu.RLock()
- data.Families = make([]string, 0, len(completedTraces))
- for fam := range completedTraces {
- data.Families = append(data.Families, fam)
- }
- completedMu.RUnlock()
- sort.Strings(data.Families)
-
- // We are careful here to minimize the time spent locking activeMu,
- // since that lock is required every time an RPC starts and finishes.
- data.ActiveTraceCount = make(map[string]int, len(data.Families))
- activeMu.RLock()
- for fam, s := range activeTraces {
- data.ActiveTraceCount[fam] = s.Len()
- }
- activeMu.RUnlock()
-
- var ok bool
- data.Family, data.Bucket, ok = parseArgs(req)
- switch {
- case !ok:
- // No-op
- case data.Bucket == -1:
- data.Active = true
- n := data.ActiveTraceCount[data.Family]
- data.Traces = getActiveTraces(data.Family)
- if len(data.Traces) < n {
- data.Total = n
- }
- case data.Bucket < bucketsPerFamily:
- if b := lookupBucket(data.Family, data.Bucket); b != nil {
- data.Traces = b.Copy(data.Traced)
- }
- default:
- if f := getFamily(data.Family, false); f != nil {
- var obs timeseries.Observable
- f.LatencyMu.RLock()
- switch o := data.Bucket - bucketsPerFamily; o {
- case 0:
- obs = f.Latency.Minute()
- data.HistogramWindow = "last minute"
- case 1:
- obs = f.Latency.Hour()
- data.HistogramWindow = "last hour"
- case 2:
- obs = f.Latency.Total()
- data.HistogramWindow = "all time"
- }
- f.LatencyMu.RUnlock()
- if obs != nil {
- data.Histogram = obs.(*histogram).html()
- }
- }
- }
-
- if data.Traces != nil {
- defer data.Traces.Free()
- sort.Sort(data.Traces)
- }
-
- completedMu.RLock()
- defer completedMu.RUnlock()
- if err := pageTmpl().ExecuteTemplate(w, "Page", data); err != nil {
- log.Printf("net/trace: Failed executing template: %v", err)
- }
-}
-
-func parseArgs(req *http.Request) (fam string, b int, ok bool) {
- if req == nil {
- return "", 0, false
- }
- fam, bStr := req.FormValue("fam"), req.FormValue("b")
- if fam == "" || bStr == "" {
- return "", 0, false
- }
- b, err := strconv.Atoi(bStr)
- if err != nil || b < -1 {
- return "", 0, false
- }
-
- return fam, b, true
-}
-
-func lookupBucket(fam string, b int) *traceBucket {
- f := getFamily(fam, false)
- if f == nil || b < 0 || b >= len(f.Buckets) {
- return nil
- }
- return f.Buckets[b]
-}
-
-type contextKeyT string
-
-var contextKey = contextKeyT("golang.org/x/net/trace.Trace")
-
-// Trace represents an active request.
-type Trace interface {
- // LazyLog adds x to the event log. It will be evaluated each time the
- // /debug/requests page is rendered. Any memory referenced by x will be
- // pinned until the trace is finished and later discarded.
- LazyLog(x fmt.Stringer, sensitive bool)
-
- // LazyPrintf evaluates its arguments with fmt.Sprintf each time the
- // /debug/requests page is rendered. Any memory referenced by a will be
- // pinned until the trace is finished and later discarded.
- LazyPrintf(format string, a ...interface{})
-
- // SetError declares that this trace resulted in an error.
- SetError()
-
- // SetRecycler sets a recycler for the trace.
- // f will be called for each event passed to LazyLog at a time when
- // it is no longer required, whether while the trace is still active
- // and the event is discarded, or when a completed trace is discarded.
- SetRecycler(f func(interface{}))
-
- // SetTraceInfo sets the trace info for the trace.
- // This is currently unused.
- SetTraceInfo(traceID, spanID uint64)
-
- // SetMaxEvents sets the maximum number of events that will be stored
- // in the trace. This has no effect if any events have already been
- // added to the trace.
- SetMaxEvents(m int)
-
- // Finish declares that this trace is complete.
- // The trace should not be used after calling this method.
- Finish()
-}
-
-type lazySprintf struct {
- format string
- a []interface{}
-}
-
-func (l *lazySprintf) String() string {
- return fmt.Sprintf(l.format, l.a...)
-}
-
-// New returns a new Trace with the specified family and title.
-func New(family, title string) Trace {
- tr := newTrace()
- tr.ref()
- tr.Family, tr.Title = family, title
- tr.Start = time.Now()
- tr.maxEvents = maxEventsPerTrace
- tr.events = tr.eventsBuf[:0]
-
- activeMu.RLock()
- s := activeTraces[tr.Family]
- activeMu.RUnlock()
- if s == nil {
- activeMu.Lock()
- s = activeTraces[tr.Family] // check again
- if s == nil {
- s = new(traceSet)
- activeTraces[tr.Family] = s
- }
- activeMu.Unlock()
- }
- s.Add(tr)
-
- // Trigger allocation of the completed trace structure for this family.
- // This will cause the family to be present in the request page during
- // the first trace of this family. We don't care about the return value,
- // nor is there any need for this to run inline, so we execute it in its
- // own goroutine, but only if the family isn't allocated yet.
- completedMu.RLock()
- if _, ok := completedTraces[tr.Family]; !ok {
- go allocFamily(tr.Family)
- }
- completedMu.RUnlock()
-
- return tr
-}
-
-func (tr *trace) Finish() {
- elapsed := time.Since(tr.Start)
- tr.mu.Lock()
- tr.Elapsed = elapsed
- tr.mu.Unlock()
-
- if DebugUseAfterFinish {
- buf := make([]byte, 4<<10) // 4 KB should be enough
- n := runtime.Stack(buf, false)
- tr.finishStack = buf[:n]
- }
-
- activeMu.RLock()
- m := activeTraces[tr.Family]
- activeMu.RUnlock()
- m.Remove(tr)
-
- f := getFamily(tr.Family, true)
- tr.mu.RLock() // protects tr fields in Cond.match calls
- for _, b := range f.Buckets {
- if b.Cond.match(tr) {
- b.Add(tr)
- }
- }
- tr.mu.RUnlock()
-
- // Add a sample of elapsed time as microseconds to the family's timeseries
- h := new(histogram)
- h.addMeasurement(elapsed.Nanoseconds() / 1e3)
- f.LatencyMu.Lock()
- f.Latency.Add(h)
- f.LatencyMu.Unlock()
-
- tr.unref() // matches ref in New
-}
-
-const (
- bucketsPerFamily = 9
- tracesPerBucket = 10
- maxActiveTraces = 20 // Maximum number of active traces to show.
- maxEventsPerTrace = 10
- numHistogramBuckets = 38
-)
-
-var (
- // The active traces.
- activeMu sync.RWMutex
- activeTraces = make(map[string]*traceSet) // family -> traces
-
- // Families of completed traces.
- completedMu sync.RWMutex
- completedTraces = make(map[string]*family) // family -> traces
-)
-
-type traceSet struct {
- mu sync.RWMutex
- m map[*trace]bool
-
- // We could avoid the entire map scan in FirstN by having a slice of all the traces
- // ordered by start time, and an index into that from the trace struct, with a periodic
- // repack of the slice after enough traces finish; we could also use a skip list or similar.
- // However, that would shift some of the expense from /debug/requests time to RPC time,
- // which is probably the wrong trade-off.
-}
-
-func (ts *traceSet) Len() int {
- ts.mu.RLock()
- defer ts.mu.RUnlock()
- return len(ts.m)
-}
-
-func (ts *traceSet) Add(tr *trace) {
- ts.mu.Lock()
- if ts.m == nil {
- ts.m = make(map[*trace]bool)
- }
- ts.m[tr] = true
- ts.mu.Unlock()
-}
-
-func (ts *traceSet) Remove(tr *trace) {
- ts.mu.Lock()
- delete(ts.m, tr)
- ts.mu.Unlock()
-}
-
-// FirstN returns the first n traces ordered by time.
-func (ts *traceSet) FirstN(n int) traceList {
- ts.mu.RLock()
- defer ts.mu.RUnlock()
-
- if n > len(ts.m) {
- n = len(ts.m)
- }
- trl := make(traceList, 0, n)
-
- // Fast path for when no selectivity is needed.
- if n == len(ts.m) {
- for tr := range ts.m {
- tr.ref()
- trl = append(trl, tr)
- }
- sort.Sort(trl)
- return trl
- }
-
- // Pick the oldest n traces.
- // This is inefficient. See the comment in the traceSet struct.
- for tr := range ts.m {
- // Put the first n traces into trl in the order they occur.
- // When we have n, sort trl, and thereafter maintain its order.
- if len(trl) < n {
- tr.ref()
- trl = append(trl, tr)
- if len(trl) == n {
- // This is guaranteed to happen exactly once during this loop.
- sort.Sort(trl)
- }
- continue
- }
- if tr.Start.After(trl[n-1].Start) {
- continue
- }
-
- // Find where to insert this one.
- tr.ref()
- i := sort.Search(n, func(i int) bool { return trl[i].Start.After(tr.Start) })
- trl[n-1].unref()
- copy(trl[i+1:], trl[i:])
- trl[i] = tr
- }
-
- return trl
-}
-
-func getActiveTraces(fam string) traceList {
- activeMu.RLock()
- s := activeTraces[fam]
- activeMu.RUnlock()
- if s == nil {
- return nil
- }
- return s.FirstN(maxActiveTraces)
-}
-
-func getFamily(fam string, allocNew bool) *family {
- completedMu.RLock()
- f := completedTraces[fam]
- completedMu.RUnlock()
- if f == nil && allocNew {
- f = allocFamily(fam)
- }
- return f
-}
-
-func allocFamily(fam string) *family {
- completedMu.Lock()
- defer completedMu.Unlock()
- f := completedTraces[fam]
- if f == nil {
- f = newFamily()
- completedTraces[fam] = f
- }
- return f
-}
-
-// family represents a set of trace buckets and associated latency information.
-type family struct {
- // traces may occur in multiple buckets.
- Buckets [bucketsPerFamily]*traceBucket
-
- // latency time series
- LatencyMu sync.RWMutex
- Latency *timeseries.MinuteHourSeries
-}
-
-func newFamily() *family {
- return &family{
- Buckets: [bucketsPerFamily]*traceBucket{
- {Cond: minCond(0)},
- {Cond: minCond(50 * time.Millisecond)},
- {Cond: minCond(100 * time.Millisecond)},
- {Cond: minCond(200 * time.Millisecond)},
- {Cond: minCond(500 * time.Millisecond)},
- {Cond: minCond(1 * time.Second)},
- {Cond: minCond(10 * time.Second)},
- {Cond: minCond(100 * time.Second)},
- {Cond: errorCond{}},
- },
- Latency: timeseries.NewMinuteHourSeries(func() timeseries.Observable { return new(histogram) }),
- }
-}
-
-// traceBucket represents a size-capped bucket of historic traces,
-// along with a condition for a trace to belong to the bucket.
-type traceBucket struct {
- Cond cond
-
- // Ring buffer implementation of a fixed-size FIFO queue.
- mu sync.RWMutex
- buf [tracesPerBucket]*trace
- start int // < tracesPerBucket
- length int // <= tracesPerBucket
-}
-
-func (b *traceBucket) Add(tr *trace) {
- b.mu.Lock()
- defer b.mu.Unlock()
-
- i := b.start + b.length
- if i >= tracesPerBucket {
- i -= tracesPerBucket
- }
- if b.length == tracesPerBucket {
- // "Remove" an element from the bucket.
- b.buf[i].unref()
- b.start++
- if b.start == tracesPerBucket {
- b.start = 0
- }
- }
- b.buf[i] = tr
- if b.length < tracesPerBucket {
- b.length++
- }
- tr.ref()
-}
-
-// Copy returns a copy of the traces in the bucket.
-// If tracedOnly is true, only the traces with trace information will be returned.
-// The logs will be ref'd before returning; the caller should call
-// the Free method when it is done with them.
-// TODO(dsymonds): keep track of traced requests in separate buckets.
-func (b *traceBucket) Copy(tracedOnly bool) traceList {
- b.mu.RLock()
- defer b.mu.RUnlock()
-
- trl := make(traceList, 0, b.length)
- for i, x := 0, b.start; i < b.length; i++ {
- tr := b.buf[x]
- if !tracedOnly || tr.spanID != 0 {
- tr.ref()
- trl = append(trl, tr)
- }
- x++
- if x == b.length {
- x = 0
- }
- }
- return trl
-}
-
-func (b *traceBucket) Empty() bool {
- b.mu.RLock()
- defer b.mu.RUnlock()
- return b.length == 0
-}
-
-// cond represents a condition on a trace.
-type cond interface {
- match(t *trace) bool
- String() string
-}
-
-type minCond time.Duration
-
-func (m minCond) match(t *trace) bool { return t.Elapsed >= time.Duration(m) }
-func (m minCond) String() string { return fmt.Sprintf("≥%gs", time.Duration(m).Seconds()) }
-
-type errorCond struct{}
-
-func (e errorCond) match(t *trace) bool { return t.IsError }
-func (e errorCond) String() string { return "errors" }
-
-type traceList []*trace
-
-// Free calls unref on each element of the list.
-func (trl traceList) Free() {
- for _, t := range trl {
- t.unref()
- }
-}
-
-// traceList may be sorted in reverse chronological order.
-func (trl traceList) Len() int { return len(trl) }
-func (trl traceList) Less(i, j int) bool { return trl[i].Start.After(trl[j].Start) }
-func (trl traceList) Swap(i, j int) { trl[i], trl[j] = trl[j], trl[i] }
-
-// An event is a timestamped log entry in a trace.
-type event struct {
- When time.Time
- Elapsed time.Duration // since previous event in trace
- NewDay bool // whether this event is on a different day to the previous event
- Recyclable bool // whether this event was passed via LazyLog
- Sensitive bool // whether this event contains sensitive information
- What interface{} // string or fmt.Stringer
-}
-
-// WhenString returns a string representation of the elapsed time of the event.
-// It will include the date if midnight was crossed.
-func (e event) WhenString() string {
- if e.NewDay {
- return e.When.Format("2006/01/02 15:04:05.000000")
- }
- return e.When.Format("15:04:05.000000")
-}
-
-// discarded represents a number of discarded events.
-// It is stored as *discarded to make it easier to update in-place.
-type discarded int
-
-func (d *discarded) String() string {
- return fmt.Sprintf("(%d events discarded)", int(*d))
-}
-
-// trace represents an active or complete request,
-// either sent or received by this program.
-type trace struct {
- // Family is the top-level grouping of traces to which this belongs.
- Family string
-
- // Title is the title of this trace.
- Title string
-
- // Start time of the this trace.
- Start time.Time
-
- mu sync.RWMutex
- events []event // Append-only sequence of events (modulo discards).
- maxEvents int
- recycler func(interface{})
- IsError bool // Whether this trace resulted in an error.
- Elapsed time.Duration // Elapsed time for this trace, zero while active.
- traceID uint64 // Trace information if non-zero.
- spanID uint64
-
- refs int32 // how many buckets this is in
- disc discarded // scratch space to avoid allocation
-
- finishStack []byte // where finish was called, if DebugUseAfterFinish is set
-
- eventsBuf [4]event // preallocated buffer in case we only log a few events
-}
-
-func (tr *trace) reset() {
- // Clear all but the mutex. Mutexes may not be copied, even when unlocked.
- tr.Family = ""
- tr.Title = ""
- tr.Start = time.Time{}
-
- tr.mu.Lock()
- tr.Elapsed = 0
- tr.traceID = 0
- tr.spanID = 0
- tr.IsError = false
- tr.maxEvents = 0
- tr.events = nil
- tr.recycler = nil
- tr.mu.Unlock()
-
- tr.refs = 0
- tr.disc = 0
- tr.finishStack = nil
- for i := range tr.eventsBuf {
- tr.eventsBuf[i] = event{}
- }
-}
-
-// delta returns the elapsed time since the last event or the trace start,
-// and whether it spans midnight.
-// L >= tr.mu
-func (tr *trace) delta(t time.Time) (time.Duration, bool) {
- if len(tr.events) == 0 {
- return t.Sub(tr.Start), false
- }
- prev := tr.events[len(tr.events)-1].When
- return t.Sub(prev), prev.Day() != t.Day()
-}
-
-func (tr *trace) addEvent(x interface{}, recyclable, sensitive bool) {
- if DebugUseAfterFinish && tr.finishStack != nil {
- buf := make([]byte, 4<<10) // 4 KB should be enough
- n := runtime.Stack(buf, false)
- log.Printf("net/trace: trace used after finish:\nFinished at:\n%s\nUsed at:\n%s", tr.finishStack, buf[:n])
- }
-
- /*
- NOTE TO DEBUGGERS
-
- If you are here because your program panicked in this code,
- it is almost definitely the fault of code using this package,
- and very unlikely to be the fault of this code.
-
- The most likely scenario is that some code elsewhere is using
- a trace.Trace after its Finish method is called.
- You can temporarily set the DebugUseAfterFinish var
- to help discover where that is; do not leave that var set,
- since it makes this package much less efficient.
- */
-
- e := event{When: time.Now(), What: x, Recyclable: recyclable, Sensitive: sensitive}
- tr.mu.Lock()
- e.Elapsed, e.NewDay = tr.delta(e.When)
- if len(tr.events) < tr.maxEvents {
- tr.events = append(tr.events, e)
- } else {
- // Discard the middle events.
- di := int((tr.maxEvents - 1) / 2)
- if d, ok := tr.events[di].What.(*discarded); ok {
- (*d)++
- } else {
- // disc starts at two to count for the event it is replacing,
- // plus the next one that we are about to drop.
- tr.disc = 2
- if tr.recycler != nil && tr.events[di].Recyclable {
- go tr.recycler(tr.events[di].What)
- }
- tr.events[di].What = &tr.disc
- }
- // The timestamp of the discarded meta-event should be
- // the time of the last event it is representing.
- tr.events[di].When = tr.events[di+1].When
-
- if tr.recycler != nil && tr.events[di+1].Recyclable {
- go tr.recycler(tr.events[di+1].What)
- }
- copy(tr.events[di+1:], tr.events[di+2:])
- tr.events[tr.maxEvents-1] = e
- }
- tr.mu.Unlock()
-}
-
-func (tr *trace) LazyLog(x fmt.Stringer, sensitive bool) {
- tr.addEvent(x, true, sensitive)
-}
-
-func (tr *trace) LazyPrintf(format string, a ...interface{}) {
- tr.addEvent(&lazySprintf{format, a}, false, false)
-}
-
-func (tr *trace) SetError() {
- tr.mu.Lock()
- tr.IsError = true
- tr.mu.Unlock()
-}
-
-func (tr *trace) SetRecycler(f func(interface{})) {
- tr.mu.Lock()
- tr.recycler = f
- tr.mu.Unlock()
-}
-
-func (tr *trace) SetTraceInfo(traceID, spanID uint64) {
- tr.mu.Lock()
- tr.traceID, tr.spanID = traceID, spanID
- tr.mu.Unlock()
-}
-
-func (tr *trace) SetMaxEvents(m int) {
- tr.mu.Lock()
- // Always keep at least three events: first, discarded count, last.
- if len(tr.events) == 0 && m > 3 {
- tr.maxEvents = m
- }
- tr.mu.Unlock()
-}
-
-func (tr *trace) ref() {
- atomic.AddInt32(&tr.refs, 1)
-}
-
-func (tr *trace) unref() {
- if atomic.AddInt32(&tr.refs, -1) == 0 {
- tr.mu.RLock()
- if tr.recycler != nil {
- // freeTrace clears tr, so we hold tr.recycler and tr.events here.
- go func(f func(interface{}), es []event) {
- for _, e := range es {
- if e.Recyclable {
- f(e.What)
- }
- }
- }(tr.recycler, tr.events)
- }
- tr.mu.RUnlock()
-
- freeTrace(tr)
- }
-}
-
-func (tr *trace) When() string {
- return tr.Start.Format("2006/01/02 15:04:05.000000")
-}
-
-func (tr *trace) ElapsedTime() string {
- tr.mu.RLock()
- t := tr.Elapsed
- tr.mu.RUnlock()
-
- if t == 0 {
- // Active trace.
- t = time.Since(tr.Start)
- }
- return fmt.Sprintf("%.6f", t.Seconds())
-}
-
-func (tr *trace) Events() []event {
- tr.mu.RLock()
- defer tr.mu.RUnlock()
- return tr.events
-}
-
-var traceFreeList = make(chan *trace, 1000) // TODO(dsymonds): Use sync.Pool?
-
-// newTrace returns a trace ready to use.
-func newTrace() *trace {
- select {
- case tr := <-traceFreeList:
- return tr
- default:
- return new(trace)
- }
-}
-
-// freeTrace adds tr to traceFreeList if there's room.
-// This is non-blocking.
-func freeTrace(tr *trace) {
- if DebugUseAfterFinish {
- return // never reuse
- }
- tr.reset()
- select {
- case traceFreeList <- tr:
- default:
- }
-}
-
-func elapsed(d time.Duration) string {
- b := []byte(fmt.Sprintf("%.6f", d.Seconds()))
-
- // For subsecond durations, blank all zeros before decimal point,
- // and all zeros between the decimal point and the first non-zero digit.
- if d < time.Second {
- dot := bytes.IndexByte(b, '.')
- for i := 0; i < dot; i++ {
- b[i] = ' '
- }
- for i := dot + 1; i < len(b); i++ {
- if b[i] == '0' {
- b[i] = ' '
- } else {
- break
- }
- }
- }
-
- return string(b)
-}
-
-var pageTmplCache *template.Template
-var pageTmplOnce sync.Once
-
-func pageTmpl() *template.Template {
- pageTmplOnce.Do(func() {
- pageTmplCache = template.Must(template.New("Page").Funcs(template.FuncMap{
- "elapsed": elapsed,
- "add": func(a, b int) int { return a + b },
- }).Parse(pageHTML))
- })
- return pageTmplCache
-}
-
-const pageHTML = `
-{{template "Prolog" .}}
-{{template "StatusTable" .}}
-{{template "Epilog" .}}
-
-{{define "Prolog"}}
-
-
- /debug/requests
-
-
-
-
-/debug/requests
-{{end}} {{/* end of Prolog */}}
-
-{{define "StatusTable"}}
-
-{{end}} {{/* end of StatusTable */}}
-
-{{define "Epilog"}}
-{{if $.Traces}}
-
-Family: {{$.Family}}
-
-{{if or $.Expanded $.Traced}}
- [Normal/Summary]
-{{else}}
- [Normal/Summary]
-{{end}}
-
-{{if or (not $.Expanded) $.Traced}}
- [Normal/Expanded]
-{{else}}
- [Normal/Expanded]
-{{end}}
-
-{{if not $.Active}}
- {{if or $.Expanded (not $.Traced)}}
- [Traced/Summary]
- {{else}}
- [Traced/Summary]
- {{end}}
- {{if or (not $.Expanded) (not $.Traced)}}
- [Traced/Expanded]
- {{else}}
- [Traced/Expanded]
- {{end}}
-{{end}}
-
-{{if $.Total}}
- Showing {{len $.Traces}} of {{$.Total}} traces.
-{{end}}
-
-
-
- {{if $.Active}}Active{{else}}Completed{{end}} Requests
-
- When Elapsed (s)
- {{range $tr := $.Traces}}
-
- {{$tr.When}}
- {{$tr.ElapsedTime}}
- {{$tr.Title}}
- {{/* TODO: include traceID/spanID */}}
-
- {{if $.Expanded}}
- {{range $tr.Events}}
-
- {{.WhenString}}
- {{elapsed .Elapsed}}
- {{if or $.ShowSensitive (not .Sensitive)}}... {{.What}}{{else}}[redacted] {{end}}
-
- {{end}}
- {{end}}
- {{end}}
-
-{{end}} {{/* if $.Traces */}}
-
-{{if $.Histogram}}
-Latency (µs) of {{$.Family}} over {{$.HistogramWindow}}
-{{$.Histogram}}
-{{end}} {{/* if $.Histogram */}}
-
-
-
-{{end}} {{/* end of Epilog */}}
-`
diff --git a/vendor/golang.org/x/sys/LICENSE b/vendor/golang.org/x/sys/LICENSE
deleted file mode 100644
index 6a66aea5..00000000
--- a/vendor/golang.org/x/sys/LICENSE
+++ /dev/null
@@ -1,27 +0,0 @@
-Copyright (c) 2009 The Go Authors. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/golang.org/x/sys/PATENTS b/vendor/golang.org/x/sys/PATENTS
deleted file mode 100644
index 73309904..00000000
--- a/vendor/golang.org/x/sys/PATENTS
+++ /dev/null
@@ -1,22 +0,0 @@
-Additional IP Rights Grant (Patents)
-
-"This implementation" means the copyrightable works distributed by
-Google as part of the Go project.
-
-Google hereby grants to You a perpetual, worldwide, non-exclusive,
-no-charge, royalty-free, irrevocable (except as stated in this section)
-patent license to make, have made, use, offer to sell, sell, import,
-transfer and otherwise run, modify and propagate the contents of this
-implementation of Go, where such license applies only to those patent
-claims, both currently owned or controlled by Google and acquired in
-the future, licensable by Google that are necessarily infringed by this
-implementation of Go. This grant does not include claims that would be
-infringed only as a consequence of further modification of this
-implementation. If you or your agent or exclusive licensee institute or
-order or agree to the institution of patent litigation against any
-entity (including a cross-claim or counterclaim in a lawsuit) alleging
-that this implementation of Go or any code incorporated within this
-implementation of Go constitutes direct or contributory patent
-infringement, or inducement of patent infringement, then any patent
-rights granted to you under this License for this implementation of Go
-shall terminate as of the date such litigation is filed.
diff --git a/vendor/golang.org/x/sys/unix/.gitignore b/vendor/golang.org/x/sys/unix/.gitignore
deleted file mode 100644
index e3e0fc6f..00000000
--- a/vendor/golang.org/x/sys/unix/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-_obj/
-unix.test
diff --git a/vendor/golang.org/x/sys/unix/README.md b/vendor/golang.org/x/sys/unix/README.md
deleted file mode 100644
index 7d3c060e..00000000
--- a/vendor/golang.org/x/sys/unix/README.md
+++ /dev/null
@@ -1,184 +0,0 @@
-# Building `sys/unix`
-
-The sys/unix package provides access to the raw system call interface of the
-underlying operating system. See: https://godoc.org/golang.org/x/sys/unix
-
-Porting Go to a new architecture/OS combination or adding syscalls, types, or
-constants to an existing architecture/OS pair requires some manual effort;
-however, there are tools that automate much of the process.
-
-## Build Systems
-
-There are currently two ways we generate the necessary files. We are currently
-migrating the build system to use containers so the builds are reproducible.
-This is being done on an OS-by-OS basis. Please update this documentation as
-components of the build system change.
-
-### Old Build System (currently for `GOOS != "linux"`)
-
-The old build system generates the Go files based on the C header files
-present on your system. This means that files
-for a given GOOS/GOARCH pair must be generated on a system with that OS and
-architecture. This also means that the generated code can differ from system
-to system, based on differences in the header files.
-
-To avoid this, if you are using the old build system, only generate the Go
-files on an installation with unmodified header files. It is also important to
-keep track of which version of the OS the files were generated from (ex.
-Darwin 14 vs Darwin 15). This makes it easier to track the progress of changes
-and have each OS upgrade correspond to a single change.
-
-To build the files for your current OS and architecture, make sure GOOS and
-GOARCH are set correctly and run `mkall.sh`. This will generate the files for
-your specific system. Running `mkall.sh -n` shows the commands that will be run.
-
-Requirements: bash, go
-
-### New Build System (currently for `GOOS == "linux"`)
-
-The new build system uses a Docker container to generate the go files directly
-from source checkouts of the kernel and various system libraries. This means
-that on any platform that supports Docker, all the files using the new build
-system can be generated at once, and generated files will not change based on
-what the person running the scripts has installed on their computer.
-
-The OS specific files for the new build system are located in the `${GOOS}`
-directory, and the build is coordinated by the `${GOOS}/mkall.go` program. When
-the kernel or system library updates, modify the Dockerfile at
-`${GOOS}/Dockerfile` to checkout the new release of the source.
-
-To build all the files under the new build system, you must be on an amd64/Linux
-system and have your GOOS and GOARCH set accordingly. Running `mkall.sh` will
-then generate all of the files for all of the GOOS/GOARCH pairs in the new build
-system. Running `mkall.sh -n` shows the commands that will be run.
-
-Requirements: bash, go, docker
-
-## Component files
-
-This section describes the various files used in the code generation process.
-It also contains instructions on how to modify these files to add a new
-architecture/OS or to add additional syscalls, types, or constants. Note that
-if you are using the new build system, the scripts/programs cannot be called normally.
-They must be called from within the docker container.
-
-### asm files
-
-The hand-written assembly file at `asm_${GOOS}_${GOARCH}.s` implements system
-call dispatch. There are three entry points:
-```
- func Syscall(trap, a1, a2, a3 uintptr) (r1, r2, err uintptr)
- func Syscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr)
- func RawSyscall(trap, a1, a2, a3 uintptr) (r1, r2, err uintptr)
-```
-The first and second are the standard ones; they differ only in how many
-arguments can be passed to the kernel. The third is for low-level use by the
-ForkExec wrapper. Unlike the first two, it does not call into the scheduler to
-let it know that a system call is running.
-
-When porting Go to a new architecture/OS, this file must be implemented for
-each GOOS/GOARCH pair.
-
-### mksysnum
-
-Mksysnum is a Go program located at `${GOOS}/mksysnum.go` (or `mksysnum_${GOOS}.go`
-for the old system). This program takes in a list of header files containing the
-syscall number declarations and parses them to produce the corresponding list of
-Go numeric constants. See `zsysnum_${GOOS}_${GOARCH}.go` for the generated
-constants.
-
-Adding new syscall numbers is mostly done by running the build on a sufficiently
-new installation of the target OS (or updating the source checkouts for the
-new build system). However, depending on the OS, you may need to update the
-parsing in mksysnum.
-
-### mksyscall.go
-
-The `syscall.go`, `syscall_${GOOS}.go`, `syscall_${GOOS}_${GOARCH}.go` are
-hand-written Go files which implement system calls (for unix, the specific OS,
-or the specific OS/Architecture pair respectively) that need special handling
-and list `//sys` comments giving prototypes for ones that can be generated.
-
-The mksyscall.go program takes the `//sys` and `//sysnb` comments and converts
-them into syscalls. This requires the name of the prototype in the comment to
-match a syscall number in the `zsysnum_${GOOS}_${GOARCH}.go` file. The function
-prototype can be exported (capitalized) or not.
-
-Adding a new syscall often just requires adding a new `//sys` function prototype
-with the desired arguments and a capitalized name so it is exported. However, if
-you want the interface to the syscall to be different, often one will make an
-unexported `//sys` prototype, and then write a custom wrapper in
-`syscall_${GOOS}.go`.
-
-### types files
-
-For each OS, there is a hand-written Go file at `${GOOS}/types.go` (or
-`types_${GOOS}.go` on the old system). This file includes standard C headers and
-creates Go type aliases to the corresponding C types. The file is then fed
-through godef to get the Go compatible definitions. Finally, the generated code
-is fed though mkpost.go to format the code correctly and remove any hidden or
-private identifiers. This cleaned-up code is written to
-`ztypes_${GOOS}_${GOARCH}.go`.
-
-The hardest part about preparing this file is figuring out which headers to
-include and which symbols need to be `#define`d to get the actual data
-structures that pass through to the kernel system calls. Some C libraries
-preset alternate versions for binary compatibility and translate them on the
-way in and out of system calls, but there is almost always a `#define` that can
-get the real ones.
-See `types_darwin.go` and `linux/types.go` for examples.
-
-To add a new type, add in the necessary include statement at the top of the
-file (if it is not already there) and add in a type alias line. Note that if
-your type is significantly different on different architectures, you may need
-some `#if/#elif` macros in your include statements.
-
-### mkerrors.sh
-
-This script is used to generate the system's various constants. This doesn't
-just include the error numbers and error strings, but also the signal numbers
-and a wide variety of miscellaneous constants. The constants come from the list
-of include files in the `includes_${uname}` variable. A regex then picks out
-the desired `#define` statements, and generates the corresponding Go constants.
-The error numbers and strings are generated from `#include `, and the
-signal numbers and strings are generated from `#include `. All of
-these constants are written to `zerrors_${GOOS}_${GOARCH}.go` via a C program,
-`_errors.c`, which prints out all the constants.
-
-To add a constant, add the header that includes it to the appropriate variable.
-Then, edit the regex (if necessary) to match the desired constant. Avoid making
-the regex too broad to avoid matching unintended constants.
-
-### internal/mkmerge
-
-This program is used to extract duplicate const, func, and type declarations
-from the generated architecture-specific files listed below, and merge these
-into a common file for each OS.
-
-The merge is performed in the following steps:
-1. Construct the set of common code that is idential in all architecture-specific files.
-2. Write this common code to the merged file.
-3. Remove the common code from all architecture-specific files.
-
-
-## Generated files
-
-### `zerrors_${GOOS}_${GOARCH}.go`
-
-A file containing all of the system's generated error numbers, error strings,
-signal numbers, and constants. Generated by `mkerrors.sh` (see above).
-
-### `zsyscall_${GOOS}_${GOARCH}.go`
-
-A file containing all the generated syscalls for a specific GOOS and GOARCH.
-Generated by `mksyscall.go` (see above).
-
-### `zsysnum_${GOOS}_${GOARCH}.go`
-
-A list of numeric constants for all the syscall number of the specific GOOS
-and GOARCH. Generated by mksysnum (see above).
-
-### `ztypes_${GOOS}_${GOARCH}.go`
-
-A file containing Go types for passing into (or returning from) syscalls.
-Generated by godefs and the types file (see above).
diff --git a/vendor/golang.org/x/sys/unix/affinity_linux.go b/vendor/golang.org/x/sys/unix/affinity_linux.go
deleted file mode 100644
index 6e5c81ac..00000000
--- a/vendor/golang.org/x/sys/unix/affinity_linux.go
+++ /dev/null
@@ -1,86 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// CPU affinity functions
-
-package unix
-
-import (
- "math/bits"
- "unsafe"
-)
-
-const cpuSetSize = _CPU_SETSIZE / _NCPUBITS
-
-// CPUSet represents a CPU affinity mask.
-type CPUSet [cpuSetSize]cpuMask
-
-func schedAffinity(trap uintptr, pid int, set *CPUSet) error {
- _, _, e := RawSyscall(trap, uintptr(pid), uintptr(unsafe.Sizeof(*set)), uintptr(unsafe.Pointer(set)))
- if e != 0 {
- return errnoErr(e)
- }
- return nil
-}
-
-// SchedGetaffinity gets the CPU affinity mask of the thread specified by pid.
-// If pid is 0 the calling thread is used.
-func SchedGetaffinity(pid int, set *CPUSet) error {
- return schedAffinity(SYS_SCHED_GETAFFINITY, pid, set)
-}
-
-// SchedSetaffinity sets the CPU affinity mask of the thread specified by pid.
-// If pid is 0 the calling thread is used.
-func SchedSetaffinity(pid int, set *CPUSet) error {
- return schedAffinity(SYS_SCHED_SETAFFINITY, pid, set)
-}
-
-// Zero clears the set s, so that it contains no CPUs.
-func (s *CPUSet) Zero() {
- for i := range s {
- s[i] = 0
- }
-}
-
-func cpuBitsIndex(cpu int) int {
- return cpu / _NCPUBITS
-}
-
-func cpuBitsMask(cpu int) cpuMask {
- return cpuMask(1 << (uint(cpu) % _NCPUBITS))
-}
-
-// Set adds cpu to the set s.
-func (s *CPUSet) Set(cpu int) {
- i := cpuBitsIndex(cpu)
- if i < len(s) {
- s[i] |= cpuBitsMask(cpu)
- }
-}
-
-// Clear removes cpu from the set s.
-func (s *CPUSet) Clear(cpu int) {
- i := cpuBitsIndex(cpu)
- if i < len(s) {
- s[i] &^= cpuBitsMask(cpu)
- }
-}
-
-// IsSet reports whether cpu is in the set s.
-func (s *CPUSet) IsSet(cpu int) bool {
- i := cpuBitsIndex(cpu)
- if i < len(s) {
- return s[i]&cpuBitsMask(cpu) != 0
- }
- return false
-}
-
-// Count returns the number of CPUs in the set s.
-func (s *CPUSet) Count() int {
- c := 0
- for _, b := range s {
- c += bits.OnesCount64(uint64(b))
- }
- return c
-}
diff --git a/vendor/golang.org/x/sys/unix/aliases.go b/vendor/golang.org/x/sys/unix/aliases.go
deleted file mode 100644
index abc89c10..00000000
--- a/vendor/golang.org/x/sys/unix/aliases.go
+++ /dev/null
@@ -1,15 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos) && go1.9
-// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos
-// +build go1.9
-
-package unix
-
-import "syscall"
-
-type Signal = syscall.Signal
-type Errno = syscall.Errno
-type SysProcAttr = syscall.SysProcAttr
diff --git a/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s b/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s
deleted file mode 100644
index db9171c2..00000000
--- a/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s
+++ /dev/null
@@ -1,18 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build gc
-// +build gc
-
-#include "textflag.h"
-
-//
-// System calls for ppc64, AIX are implemented in runtime/syscall_aix.go
-//
-
-TEXT ·syscall6(SB),NOSPLIT,$0-88
- JMP syscall·syscall6(SB)
-
-TEXT ·rawSyscall6(SB),NOSPLIT,$0-88
- JMP syscall·rawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_386.s b/vendor/golang.org/x/sys/unix/asm_bsd_386.s
deleted file mode 100644
index e0fcd9b3..00000000
--- a/vendor/golang.org/x/sys/unix/asm_bsd_386.s
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build (freebsd || netbsd || openbsd) && gc
-// +build freebsd netbsd openbsd
-// +build gc
-
-#include "textflag.h"
-
-// System call support for 386 BSD
-
-// Just jump to package syscall's implementation for all these functions.
-// The runtime may know about them.
-
-TEXT ·Syscall(SB),NOSPLIT,$0-28
- JMP syscall·Syscall(SB)
-
-TEXT ·Syscall6(SB),NOSPLIT,$0-40
- JMP syscall·Syscall6(SB)
-
-TEXT ·Syscall9(SB),NOSPLIT,$0-52
- JMP syscall·Syscall9(SB)
-
-TEXT ·RawSyscall(SB),NOSPLIT,$0-28
- JMP syscall·RawSyscall(SB)
-
-TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
- JMP syscall·RawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_amd64.s b/vendor/golang.org/x/sys/unix/asm_bsd_amd64.s
deleted file mode 100644
index 2b99c349..00000000
--- a/vendor/golang.org/x/sys/unix/asm_bsd_amd64.s
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build (darwin || dragonfly || freebsd || netbsd || openbsd) && gc
-// +build darwin dragonfly freebsd netbsd openbsd
-// +build gc
-
-#include "textflag.h"
-
-// System call support for AMD64 BSD
-
-// Just jump to package syscall's implementation for all these functions.
-// The runtime may know about them.
-
-TEXT ·Syscall(SB),NOSPLIT,$0-56
- JMP syscall·Syscall(SB)
-
-TEXT ·Syscall6(SB),NOSPLIT,$0-80
- JMP syscall·Syscall6(SB)
-
-TEXT ·Syscall9(SB),NOSPLIT,$0-104
- JMP syscall·Syscall9(SB)
-
-TEXT ·RawSyscall(SB),NOSPLIT,$0-56
- JMP syscall·RawSyscall(SB)
-
-TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
- JMP syscall·RawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_arm.s b/vendor/golang.org/x/sys/unix/asm_bsd_arm.s
deleted file mode 100644
index d702d4ad..00000000
--- a/vendor/golang.org/x/sys/unix/asm_bsd_arm.s
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build (freebsd || netbsd || openbsd) && gc
-// +build freebsd netbsd openbsd
-// +build gc
-
-#include "textflag.h"
-
-// System call support for ARM BSD
-
-// Just jump to package syscall's implementation for all these functions.
-// The runtime may know about them.
-
-TEXT ·Syscall(SB),NOSPLIT,$0-28
- B syscall·Syscall(SB)
-
-TEXT ·Syscall6(SB),NOSPLIT,$0-40
- B syscall·Syscall6(SB)
-
-TEXT ·Syscall9(SB),NOSPLIT,$0-52
- B syscall·Syscall9(SB)
-
-TEXT ·RawSyscall(SB),NOSPLIT,$0-28
- B syscall·RawSyscall(SB)
-
-TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
- B syscall·RawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_arm64.s b/vendor/golang.org/x/sys/unix/asm_bsd_arm64.s
deleted file mode 100644
index fe36a739..00000000
--- a/vendor/golang.org/x/sys/unix/asm_bsd_arm64.s
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build (darwin || freebsd || netbsd || openbsd) && gc
-// +build darwin freebsd netbsd openbsd
-// +build gc
-
-#include "textflag.h"
-
-// System call support for ARM64 BSD
-
-// Just jump to package syscall's implementation for all these functions.
-// The runtime may know about them.
-
-TEXT ·Syscall(SB),NOSPLIT,$0-56
- JMP syscall·Syscall(SB)
-
-TEXT ·Syscall6(SB),NOSPLIT,$0-80
- JMP syscall·Syscall6(SB)
-
-TEXT ·Syscall9(SB),NOSPLIT,$0-104
- JMP syscall·Syscall9(SB)
-
-TEXT ·RawSyscall(SB),NOSPLIT,$0-56
- JMP syscall·RawSyscall(SB)
-
-TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
- JMP syscall·RawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s b/vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s
deleted file mode 100644
index e5b9a848..00000000
--- a/vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build (darwin || freebsd || netbsd || openbsd) && gc
-// +build darwin freebsd netbsd openbsd
-// +build gc
-
-#include "textflag.h"
-
-//
-// System call support for ppc64, BSD
-//
-
-// Just jump to package syscall's implementation for all these functions.
-// The runtime may know about them.
-
-TEXT ·Syscall(SB),NOSPLIT,$0-56
- JMP syscall·Syscall(SB)
-
-TEXT ·Syscall6(SB),NOSPLIT,$0-80
- JMP syscall·Syscall6(SB)
-
-TEXT ·Syscall9(SB),NOSPLIT,$0-104
- JMP syscall·Syscall9(SB)
-
-TEXT ·RawSyscall(SB),NOSPLIT,$0-56
- JMP syscall·RawSyscall(SB)
-
-TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
- JMP syscall·RawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_riscv64.s b/vendor/golang.org/x/sys/unix/asm_bsd_riscv64.s
deleted file mode 100644
index d560019e..00000000
--- a/vendor/golang.org/x/sys/unix/asm_bsd_riscv64.s
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build (darwin || freebsd || netbsd || openbsd) && gc
-// +build darwin freebsd netbsd openbsd
-// +build gc
-
-#include "textflag.h"
-
-// System call support for RISCV64 BSD
-
-// Just jump to package syscall's implementation for all these functions.
-// The runtime may know about them.
-
-TEXT ·Syscall(SB),NOSPLIT,$0-56
- JMP syscall·Syscall(SB)
-
-TEXT ·Syscall6(SB),NOSPLIT,$0-80
- JMP syscall·Syscall6(SB)
-
-TEXT ·Syscall9(SB),NOSPLIT,$0-104
- JMP syscall·Syscall9(SB)
-
-TEXT ·RawSyscall(SB),NOSPLIT,$0-56
- JMP syscall·RawSyscall(SB)
-
-TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
- JMP syscall·RawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_linux_386.s b/vendor/golang.org/x/sys/unix/asm_linux_386.s
deleted file mode 100644
index 8fd101d0..00000000
--- a/vendor/golang.org/x/sys/unix/asm_linux_386.s
+++ /dev/null
@@ -1,66 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build gc
-// +build gc
-
-#include "textflag.h"
-
-//
-// System calls for 386, Linux
-//
-
-// See ../runtime/sys_linux_386.s for the reason why we always use int 0x80
-// instead of the glibc-specific "CALL 0x10(GS)".
-#define INVOKE_SYSCALL INT $0x80
-
-// Just jump to package syscall's implementation for all these functions.
-// The runtime may know about them.
-
-TEXT ·Syscall(SB),NOSPLIT,$0-28
- JMP syscall·Syscall(SB)
-
-TEXT ·Syscall6(SB),NOSPLIT,$0-40
- JMP syscall·Syscall6(SB)
-
-TEXT ·SyscallNoError(SB),NOSPLIT,$0-24
- CALL runtime·entersyscall(SB)
- MOVL trap+0(FP), AX // syscall entry
- MOVL a1+4(FP), BX
- MOVL a2+8(FP), CX
- MOVL a3+12(FP), DX
- MOVL $0, SI
- MOVL $0, DI
- INVOKE_SYSCALL
- MOVL AX, r1+16(FP)
- MOVL DX, r2+20(FP)
- CALL runtime·exitsyscall(SB)
- RET
-
-TEXT ·RawSyscall(SB),NOSPLIT,$0-28
- JMP syscall·RawSyscall(SB)
-
-TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
- JMP syscall·RawSyscall6(SB)
-
-TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-24
- MOVL trap+0(FP), AX // syscall entry
- MOVL a1+4(FP), BX
- MOVL a2+8(FP), CX
- MOVL a3+12(FP), DX
- MOVL $0, SI
- MOVL $0, DI
- INVOKE_SYSCALL
- MOVL AX, r1+16(FP)
- MOVL DX, r2+20(FP)
- RET
-
-TEXT ·socketcall(SB),NOSPLIT,$0-36
- JMP syscall·socketcall(SB)
-
-TEXT ·rawsocketcall(SB),NOSPLIT,$0-36
- JMP syscall·rawsocketcall(SB)
-
-TEXT ·seek(SB),NOSPLIT,$0-28
- JMP syscall·seek(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_linux_amd64.s b/vendor/golang.org/x/sys/unix/asm_linux_amd64.s
deleted file mode 100644
index 7ed38e43..00000000
--- a/vendor/golang.org/x/sys/unix/asm_linux_amd64.s
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build gc
-// +build gc
-
-#include "textflag.h"
-
-//
-// System calls for AMD64, Linux
-//
-
-// Just jump to package syscall's implementation for all these functions.
-// The runtime may know about them.
-
-TEXT ·Syscall(SB),NOSPLIT,$0-56
- JMP syscall·Syscall(SB)
-
-TEXT ·Syscall6(SB),NOSPLIT,$0-80
- JMP syscall·Syscall6(SB)
-
-TEXT ·SyscallNoError(SB),NOSPLIT,$0-48
- CALL runtime·entersyscall(SB)
- MOVQ a1+8(FP), DI
- MOVQ a2+16(FP), SI
- MOVQ a3+24(FP), DX
- MOVQ $0, R10
- MOVQ $0, R8
- MOVQ $0, R9
- MOVQ trap+0(FP), AX // syscall entry
- SYSCALL
- MOVQ AX, r1+32(FP)
- MOVQ DX, r2+40(FP)
- CALL runtime·exitsyscall(SB)
- RET
-
-TEXT ·RawSyscall(SB),NOSPLIT,$0-56
- JMP syscall·RawSyscall(SB)
-
-TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
- JMP syscall·RawSyscall6(SB)
-
-TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48
- MOVQ a1+8(FP), DI
- MOVQ a2+16(FP), SI
- MOVQ a3+24(FP), DX
- MOVQ $0, R10
- MOVQ $0, R8
- MOVQ $0, R9
- MOVQ trap+0(FP), AX // syscall entry
- SYSCALL
- MOVQ AX, r1+32(FP)
- MOVQ DX, r2+40(FP)
- RET
-
-TEXT ·gettimeofday(SB),NOSPLIT,$0-16
- JMP syscall·gettimeofday(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_linux_arm.s b/vendor/golang.org/x/sys/unix/asm_linux_arm.s
deleted file mode 100644
index 8ef1d514..00000000
--- a/vendor/golang.org/x/sys/unix/asm_linux_arm.s
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build gc
-// +build gc
-
-#include "textflag.h"
-
-//
-// System calls for arm, Linux
-//
-
-// Just jump to package syscall's implementation for all these functions.
-// The runtime may know about them.
-
-TEXT ·Syscall(SB),NOSPLIT,$0-28
- B syscall·Syscall(SB)
-
-TEXT ·Syscall6(SB),NOSPLIT,$0-40
- B syscall·Syscall6(SB)
-
-TEXT ·SyscallNoError(SB),NOSPLIT,$0-24
- BL runtime·entersyscall(SB)
- MOVW trap+0(FP), R7
- MOVW a1+4(FP), R0
- MOVW a2+8(FP), R1
- MOVW a3+12(FP), R2
- MOVW $0, R3
- MOVW $0, R4
- MOVW $0, R5
- SWI $0
- MOVW R0, r1+16(FP)
- MOVW $0, R0
- MOVW R0, r2+20(FP)
- BL runtime·exitsyscall(SB)
- RET
-
-TEXT ·RawSyscall(SB),NOSPLIT,$0-28
- B syscall·RawSyscall(SB)
-
-TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
- B syscall·RawSyscall6(SB)
-
-TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-24
- MOVW trap+0(FP), R7 // syscall entry
- MOVW a1+4(FP), R0
- MOVW a2+8(FP), R1
- MOVW a3+12(FP), R2
- SWI $0
- MOVW R0, r1+16(FP)
- MOVW $0, R0
- MOVW R0, r2+20(FP)
- RET
-
-TEXT ·seek(SB),NOSPLIT,$0-28
- B syscall·seek(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_linux_arm64.s b/vendor/golang.org/x/sys/unix/asm_linux_arm64.s
deleted file mode 100644
index 98ae0276..00000000
--- a/vendor/golang.org/x/sys/unix/asm_linux_arm64.s
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build linux && arm64 && gc
-// +build linux
-// +build arm64
-// +build gc
-
-#include "textflag.h"
-
-// Just jump to package syscall's implementation for all these functions.
-// The runtime may know about them.
-
-TEXT ·Syscall(SB),NOSPLIT,$0-56
- B syscall·Syscall(SB)
-
-TEXT ·Syscall6(SB),NOSPLIT,$0-80
- B syscall·Syscall6(SB)
-
-TEXT ·SyscallNoError(SB),NOSPLIT,$0-48
- BL runtime·entersyscall(SB)
- MOVD a1+8(FP), R0
- MOVD a2+16(FP), R1
- MOVD a3+24(FP), R2
- MOVD $0, R3
- MOVD $0, R4
- MOVD $0, R5
- MOVD trap+0(FP), R8 // syscall entry
- SVC
- MOVD R0, r1+32(FP) // r1
- MOVD R1, r2+40(FP) // r2
- BL runtime·exitsyscall(SB)
- RET
-
-TEXT ·RawSyscall(SB),NOSPLIT,$0-56
- B syscall·RawSyscall(SB)
-
-TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
- B syscall·RawSyscall6(SB)
-
-TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48
- MOVD a1+8(FP), R0
- MOVD a2+16(FP), R1
- MOVD a3+24(FP), R2
- MOVD $0, R3
- MOVD $0, R4
- MOVD $0, R5
- MOVD trap+0(FP), R8 // syscall entry
- SVC
- MOVD R0, r1+32(FP)
- MOVD R1, r2+40(FP)
- RET
diff --git a/vendor/golang.org/x/sys/unix/asm_linux_loong64.s b/vendor/golang.org/x/sys/unix/asm_linux_loong64.s
deleted file mode 100644
index 56535728..00000000
--- a/vendor/golang.org/x/sys/unix/asm_linux_loong64.s
+++ /dev/null
@@ -1,54 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build linux && loong64 && gc
-// +build linux
-// +build loong64
-// +build gc
-
-#include "textflag.h"
-
-
-// Just jump to package syscall's implementation for all these functions.
-// The runtime may know about them.
-
-TEXT ·Syscall(SB),NOSPLIT,$0-56
- JMP syscall·Syscall(SB)
-
-TEXT ·Syscall6(SB),NOSPLIT,$0-80
- JMP syscall·Syscall6(SB)
-
-TEXT ·SyscallNoError(SB),NOSPLIT,$0-48
- JAL runtime·entersyscall(SB)
- MOVV a1+8(FP), R4
- MOVV a2+16(FP), R5
- MOVV a3+24(FP), R6
- MOVV R0, R7
- MOVV R0, R8
- MOVV R0, R9
- MOVV trap+0(FP), R11 // syscall entry
- SYSCALL
- MOVV R4, r1+32(FP)
- MOVV R0, r2+40(FP) // r2 is not used. Always set to 0
- JAL runtime·exitsyscall(SB)
- RET
-
-TEXT ·RawSyscall(SB),NOSPLIT,$0-56
- JMP syscall·RawSyscall(SB)
-
-TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
- JMP syscall·RawSyscall6(SB)
-
-TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48
- MOVV a1+8(FP), R4
- MOVV a2+16(FP), R5
- MOVV a3+24(FP), R6
- MOVV R0, R7
- MOVV R0, R8
- MOVV R0, R9
- MOVV trap+0(FP), R11 // syscall entry
- SYSCALL
- MOVV R4, r1+32(FP)
- MOVV R0, r2+40(FP) // r2 is not used. Always set to 0
- RET
diff --git a/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s b/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s
deleted file mode 100644
index 21231d2c..00000000
--- a/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build linux && (mips64 || mips64le) && gc
-// +build linux
-// +build mips64 mips64le
-// +build gc
-
-#include "textflag.h"
-
-//
-// System calls for mips64, Linux
-//
-
-// Just jump to package syscall's implementation for all these functions.
-// The runtime may know about them.
-
-TEXT ·Syscall(SB),NOSPLIT,$0-56
- JMP syscall·Syscall(SB)
-
-TEXT ·Syscall6(SB),NOSPLIT,$0-80
- JMP syscall·Syscall6(SB)
-
-TEXT ·SyscallNoError(SB),NOSPLIT,$0-48
- JAL runtime·entersyscall(SB)
- MOVV a1+8(FP), R4
- MOVV a2+16(FP), R5
- MOVV a3+24(FP), R6
- MOVV R0, R7
- MOVV R0, R8
- MOVV R0, R9
- MOVV trap+0(FP), R2 // syscall entry
- SYSCALL
- MOVV R2, r1+32(FP)
- MOVV R3, r2+40(FP)
- JAL runtime·exitsyscall(SB)
- RET
-
-TEXT ·RawSyscall(SB),NOSPLIT,$0-56
- JMP syscall·RawSyscall(SB)
-
-TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
- JMP syscall·RawSyscall6(SB)
-
-TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48
- MOVV a1+8(FP), R4
- MOVV a2+16(FP), R5
- MOVV a3+24(FP), R6
- MOVV R0, R7
- MOVV R0, R8
- MOVV R0, R9
- MOVV trap+0(FP), R2 // syscall entry
- SYSCALL
- MOVV R2, r1+32(FP)
- MOVV R3, r2+40(FP)
- RET
diff --git a/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s b/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s
deleted file mode 100644
index 6783b26c..00000000
--- a/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build linux && (mips || mipsle) && gc
-// +build linux
-// +build mips mipsle
-// +build gc
-
-#include "textflag.h"
-
-//
-// System calls for mips, Linux
-//
-
-// Just jump to package syscall's implementation for all these functions.
-// The runtime may know about them.
-
-TEXT ·Syscall(SB),NOSPLIT,$0-28
- JMP syscall·Syscall(SB)
-
-TEXT ·Syscall6(SB),NOSPLIT,$0-40
- JMP syscall·Syscall6(SB)
-
-TEXT ·Syscall9(SB),NOSPLIT,$0-52
- JMP syscall·Syscall9(SB)
-
-TEXT ·SyscallNoError(SB),NOSPLIT,$0-24
- JAL runtime·entersyscall(SB)
- MOVW a1+4(FP), R4
- MOVW a2+8(FP), R5
- MOVW a3+12(FP), R6
- MOVW R0, R7
- MOVW trap+0(FP), R2 // syscall entry
- SYSCALL
- MOVW R2, r1+16(FP) // r1
- MOVW R3, r2+20(FP) // r2
- JAL runtime·exitsyscall(SB)
- RET
-
-TEXT ·RawSyscall(SB),NOSPLIT,$0-28
- JMP syscall·RawSyscall(SB)
-
-TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
- JMP syscall·RawSyscall6(SB)
-
-TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-24
- MOVW a1+4(FP), R4
- MOVW a2+8(FP), R5
- MOVW a3+12(FP), R6
- MOVW trap+0(FP), R2 // syscall entry
- SYSCALL
- MOVW R2, r1+16(FP)
- MOVW R3, r2+20(FP)
- RET
diff --git a/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s b/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s
deleted file mode 100644
index 19d49893..00000000
--- a/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build linux && (ppc64 || ppc64le) && gc
-// +build linux
-// +build ppc64 ppc64le
-// +build gc
-
-#include "textflag.h"
-
-//
-// System calls for ppc64, Linux
-//
-
-// Just jump to package syscall's implementation for all these functions.
-// The runtime may know about them.
-
-TEXT ·SyscallNoError(SB),NOSPLIT,$0-48
- BL runtime·entersyscall(SB)
- MOVD a1+8(FP), R3
- MOVD a2+16(FP), R4
- MOVD a3+24(FP), R5
- MOVD R0, R6
- MOVD R0, R7
- MOVD R0, R8
- MOVD trap+0(FP), R9 // syscall entry
- SYSCALL R9
- MOVD R3, r1+32(FP)
- MOVD R4, r2+40(FP)
- BL runtime·exitsyscall(SB)
- RET
-
-TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48
- MOVD a1+8(FP), R3
- MOVD a2+16(FP), R4
- MOVD a3+24(FP), R5
- MOVD R0, R6
- MOVD R0, R7
- MOVD R0, R8
- MOVD trap+0(FP), R9 // syscall entry
- SYSCALL R9
- MOVD R3, r1+32(FP)
- MOVD R4, r2+40(FP)
- RET
diff --git a/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s b/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s
deleted file mode 100644
index e42eb81d..00000000
--- a/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build riscv64 && gc
-// +build riscv64
-// +build gc
-
-#include "textflag.h"
-
-//
-// System calls for linux/riscv64.
-//
-// Where available, just jump to package syscall's implementation of
-// these functions.
-
-TEXT ·Syscall(SB),NOSPLIT,$0-56
- JMP syscall·Syscall(SB)
-
-TEXT ·Syscall6(SB),NOSPLIT,$0-80
- JMP syscall·Syscall6(SB)
-
-TEXT ·SyscallNoError(SB),NOSPLIT,$0-48
- CALL runtime·entersyscall(SB)
- MOV a1+8(FP), A0
- MOV a2+16(FP), A1
- MOV a3+24(FP), A2
- MOV trap+0(FP), A7 // syscall entry
- ECALL
- MOV A0, r1+32(FP) // r1
- MOV A1, r2+40(FP) // r2
- CALL runtime·exitsyscall(SB)
- RET
-
-TEXT ·RawSyscall(SB),NOSPLIT,$0-56
- JMP syscall·RawSyscall(SB)
-
-TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
- JMP syscall·RawSyscall6(SB)
-
-TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48
- MOV a1+8(FP), A0
- MOV a2+16(FP), A1
- MOV a3+24(FP), A2
- MOV trap+0(FP), A7 // syscall entry
- ECALL
- MOV A0, r1+32(FP)
- MOV A1, r2+40(FP)
- RET
diff --git a/vendor/golang.org/x/sys/unix/asm_linux_s390x.s b/vendor/golang.org/x/sys/unix/asm_linux_s390x.s
deleted file mode 100644
index c46aab33..00000000
--- a/vendor/golang.org/x/sys/unix/asm_linux_s390x.s
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build linux && s390x && gc
-// +build linux
-// +build s390x
-// +build gc
-
-#include "textflag.h"
-
-//
-// System calls for s390x, Linux
-//
-
-// Just jump to package syscall's implementation for all these functions.
-// The runtime may know about them.
-
-TEXT ·Syscall(SB),NOSPLIT,$0-56
- BR syscall·Syscall(SB)
-
-TEXT ·Syscall6(SB),NOSPLIT,$0-80
- BR syscall·Syscall6(SB)
-
-TEXT ·SyscallNoError(SB),NOSPLIT,$0-48
- BL runtime·entersyscall(SB)
- MOVD a1+8(FP), R2
- MOVD a2+16(FP), R3
- MOVD a3+24(FP), R4
- MOVD $0, R5
- MOVD $0, R6
- MOVD $0, R7
- MOVD trap+0(FP), R1 // syscall entry
- SYSCALL
- MOVD R2, r1+32(FP)
- MOVD R3, r2+40(FP)
- BL runtime·exitsyscall(SB)
- RET
-
-TEXT ·RawSyscall(SB),NOSPLIT,$0-56
- BR syscall·RawSyscall(SB)
-
-TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
- BR syscall·RawSyscall6(SB)
-
-TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48
- MOVD a1+8(FP), R2
- MOVD a2+16(FP), R3
- MOVD a3+24(FP), R4
- MOVD $0, R5
- MOVD $0, R6
- MOVD $0, R7
- MOVD trap+0(FP), R1 // syscall entry
- SYSCALL
- MOVD R2, r1+32(FP)
- MOVD R3, r2+40(FP)
- RET
diff --git a/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s b/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s
deleted file mode 100644
index 5e7a1169..00000000
--- a/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build gc
-// +build gc
-
-#include "textflag.h"
-
-//
-// System call support for mips64, OpenBSD
-//
-
-// Just jump to package syscall's implementation for all these functions.
-// The runtime may know about them.
-
-TEXT ·Syscall(SB),NOSPLIT,$0-56
- JMP syscall·Syscall(SB)
-
-TEXT ·Syscall6(SB),NOSPLIT,$0-80
- JMP syscall·Syscall6(SB)
-
-TEXT ·Syscall9(SB),NOSPLIT,$0-104
- JMP syscall·Syscall9(SB)
-
-TEXT ·RawSyscall(SB),NOSPLIT,$0-56
- JMP syscall·RawSyscall(SB)
-
-TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
- JMP syscall·RawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s b/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s
deleted file mode 100644
index f8c5394c..00000000
--- a/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s
+++ /dev/null
@@ -1,18 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build gc
-// +build gc
-
-#include "textflag.h"
-
-//
-// System calls for amd64, Solaris are implemented in runtime/syscall_solaris.go
-//
-
-TEXT ·sysvicall6(SB),NOSPLIT,$0-88
- JMP syscall·sysvicall6(SB)
-
-TEXT ·rawSysvicall6(SB),NOSPLIT,$0-88
- JMP syscall·rawSysvicall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_zos_s390x.s b/vendor/golang.org/x/sys/unix/asm_zos_s390x.s
deleted file mode 100644
index 3b54e185..00000000
--- a/vendor/golang.org/x/sys/unix/asm_zos_s390x.s
+++ /dev/null
@@ -1,426 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build zos && s390x && gc
-// +build zos
-// +build s390x
-// +build gc
-
-#include "textflag.h"
-
-#define PSALAA 1208(R0)
-#define GTAB64(x) 80(x)
-#define LCA64(x) 88(x)
-#define CAA(x) 8(x)
-#define EDCHPXV(x) 1016(x) // in the CAA
-#define SAVSTACK_ASYNC(x) 336(x) // in the LCA
-
-// SS_*, where x=SAVSTACK_ASYNC
-#define SS_LE(x) 0(x)
-#define SS_GO(x) 8(x)
-#define SS_ERRNO(x) 16(x)
-#define SS_ERRNOJR(x) 20(x)
-
-#define LE_CALL BYTE $0x0D; BYTE $0x76; // BL R7, R6
-
-TEXT ·clearErrno(SB),NOSPLIT,$0-0
- BL addrerrno<>(SB)
- MOVD $0, 0(R3)
- RET
-
-// Returns the address of errno in R3.
-TEXT addrerrno<>(SB),NOSPLIT|NOFRAME,$0-0
- // Get library control area (LCA).
- MOVW PSALAA, R8
- MOVD LCA64(R8), R8
-
- // Get __errno FuncDesc.
- MOVD CAA(R8), R9
- MOVD EDCHPXV(R9), R9
- ADD $(0x156*16), R9
- LMG 0(R9), R5, R6
-
- // Switch to saved LE stack.
- MOVD SAVSTACK_ASYNC(R8), R9
- MOVD 0(R9), R4
- MOVD $0, 0(R9)
-
- // Call __errno function.
- LE_CALL
- NOPH
-
- // Switch back to Go stack.
- XOR R0, R0 // Restore R0 to $0.
- MOVD R4, 0(R9) // Save stack pointer.
- RET
-
-TEXT ·syscall_syscall(SB),NOSPLIT,$0-56
- BL runtime·entersyscall(SB)
- MOVD a1+8(FP), R1
- MOVD a2+16(FP), R2
- MOVD a3+24(FP), R3
-
- // Get library control area (LCA).
- MOVW PSALAA, R8
- MOVD LCA64(R8), R8
-
- // Get function.
- MOVD CAA(R8), R9
- MOVD EDCHPXV(R9), R9
- MOVD trap+0(FP), R5
- SLD $4, R5
- ADD R5, R9
- LMG 0(R9), R5, R6
-
- // Restore LE stack.
- MOVD SAVSTACK_ASYNC(R8), R9
- MOVD 0(R9), R4
- MOVD $0, 0(R9)
-
- // Call function.
- LE_CALL
- NOPH
- XOR R0, R0 // Restore R0 to $0.
- MOVD R4, 0(R9) // Save stack pointer.
-
- MOVD R3, r1+32(FP)
- MOVD R0, r2+40(FP)
- MOVD R0, err+48(FP)
- MOVW R3, R4
- CMP R4, $-1
- BNE done
- BL addrerrno<>(SB)
- MOVWZ 0(R3), R3
- MOVD R3, err+48(FP)
-done:
- BL runtime·exitsyscall(SB)
- RET
-
-TEXT ·syscall_rawsyscall(SB),NOSPLIT,$0-56
- MOVD a1+8(FP), R1
- MOVD a2+16(FP), R2
- MOVD a3+24(FP), R3
-
- // Get library control area (LCA).
- MOVW PSALAA, R8
- MOVD LCA64(R8), R8
-
- // Get function.
- MOVD CAA(R8), R9
- MOVD EDCHPXV(R9), R9
- MOVD trap+0(FP), R5
- SLD $4, R5
- ADD R5, R9
- LMG 0(R9), R5, R6
-
- // Restore LE stack.
- MOVD SAVSTACK_ASYNC(R8), R9
- MOVD 0(R9), R4
- MOVD $0, 0(R9)
-
- // Call function.
- LE_CALL
- NOPH
- XOR R0, R0 // Restore R0 to $0.
- MOVD R4, 0(R9) // Save stack pointer.
-
- MOVD R3, r1+32(FP)
- MOVD R0, r2+40(FP)
- MOVD R0, err+48(FP)
- MOVW R3, R4
- CMP R4, $-1
- BNE done
- BL addrerrno<>(SB)
- MOVWZ 0(R3), R3
- MOVD R3, err+48(FP)
-done:
- RET
-
-TEXT ·syscall_syscall6(SB),NOSPLIT,$0-80
- BL runtime·entersyscall(SB)
- MOVD a1+8(FP), R1
- MOVD a2+16(FP), R2
- MOVD a3+24(FP), R3
-
- // Get library control area (LCA).
- MOVW PSALAA, R8
- MOVD LCA64(R8), R8
-
- // Get function.
- MOVD CAA(R8), R9
- MOVD EDCHPXV(R9), R9
- MOVD trap+0(FP), R5
- SLD $4, R5
- ADD R5, R9
- LMG 0(R9), R5, R6
-
- // Restore LE stack.
- MOVD SAVSTACK_ASYNC(R8), R9
- MOVD 0(R9), R4
- MOVD $0, 0(R9)
-
- // Fill in parameter list.
- MOVD a4+32(FP), R12
- MOVD R12, (2176+24)(R4)
- MOVD a5+40(FP), R12
- MOVD R12, (2176+32)(R4)
- MOVD a6+48(FP), R12
- MOVD R12, (2176+40)(R4)
-
- // Call function.
- LE_CALL
- NOPH
- XOR R0, R0 // Restore R0 to $0.
- MOVD R4, 0(R9) // Save stack pointer.
-
- MOVD R3, r1+56(FP)
- MOVD R0, r2+64(FP)
- MOVD R0, err+72(FP)
- MOVW R3, R4
- CMP R4, $-1
- BNE done
- BL addrerrno<>(SB)
- MOVWZ 0(R3), R3
- MOVD R3, err+72(FP)
-done:
- BL runtime·exitsyscall(SB)
- RET
-
-TEXT ·syscall_rawsyscall6(SB),NOSPLIT,$0-80
- MOVD a1+8(FP), R1
- MOVD a2+16(FP), R2
- MOVD a3+24(FP), R3
-
- // Get library control area (LCA).
- MOVW PSALAA, R8
- MOVD LCA64(R8), R8
-
- // Get function.
- MOVD CAA(R8), R9
- MOVD EDCHPXV(R9), R9
- MOVD trap+0(FP), R5
- SLD $4, R5
- ADD R5, R9
- LMG 0(R9), R5, R6
-
- // Restore LE stack.
- MOVD SAVSTACK_ASYNC(R8), R9
- MOVD 0(R9), R4
- MOVD $0, 0(R9)
-
- // Fill in parameter list.
- MOVD a4+32(FP), R12
- MOVD R12, (2176+24)(R4)
- MOVD a5+40(FP), R12
- MOVD R12, (2176+32)(R4)
- MOVD a6+48(FP), R12
- MOVD R12, (2176+40)(R4)
-
- // Call function.
- LE_CALL
- NOPH
- XOR R0, R0 // Restore R0 to $0.
- MOVD R4, 0(R9) // Save stack pointer.
-
- MOVD R3, r1+56(FP)
- MOVD R0, r2+64(FP)
- MOVD R0, err+72(FP)
- MOVW R3, R4
- CMP R4, $-1
- BNE done
- BL ·rrno<>(SB)
- MOVWZ 0(R3), R3
- MOVD R3, err+72(FP)
-done:
- RET
-
-TEXT ·syscall_syscall9(SB),NOSPLIT,$0
- BL runtime·entersyscall(SB)
- MOVD a1+8(FP), R1
- MOVD a2+16(FP), R2
- MOVD a3+24(FP), R3
-
- // Get library control area (LCA).
- MOVW PSALAA, R8
- MOVD LCA64(R8), R8
-
- // Get function.
- MOVD CAA(R8), R9
- MOVD EDCHPXV(R9), R9
- MOVD trap+0(FP), R5
- SLD $4, R5
- ADD R5, R9
- LMG 0(R9), R5, R6
-
- // Restore LE stack.
- MOVD SAVSTACK_ASYNC(R8), R9
- MOVD 0(R9), R4
- MOVD $0, 0(R9)
-
- // Fill in parameter list.
- MOVD a4+32(FP), R12
- MOVD R12, (2176+24)(R4)
- MOVD a5+40(FP), R12
- MOVD R12, (2176+32)(R4)
- MOVD a6+48(FP), R12
- MOVD R12, (2176+40)(R4)
- MOVD a7+56(FP), R12
- MOVD R12, (2176+48)(R4)
- MOVD a8+64(FP), R12
- MOVD R12, (2176+56)(R4)
- MOVD a9+72(FP), R12
- MOVD R12, (2176+64)(R4)
-
- // Call function.
- LE_CALL
- NOPH
- XOR R0, R0 // Restore R0 to $0.
- MOVD R4, 0(R9) // Save stack pointer.
-
- MOVD R3, r1+80(FP)
- MOVD R0, r2+88(FP)
- MOVD R0, err+96(FP)
- MOVW R3, R4
- CMP R4, $-1
- BNE done
- BL addrerrno<>(SB)
- MOVWZ 0(R3), R3
- MOVD R3, err+96(FP)
-done:
- BL runtime·exitsyscall(SB)
- RET
-
-TEXT ·syscall_rawsyscall9(SB),NOSPLIT,$0
- MOVD a1+8(FP), R1
- MOVD a2+16(FP), R2
- MOVD a3+24(FP), R3
-
- // Get library control area (LCA).
- MOVW PSALAA, R8
- MOVD LCA64(R8), R8
-
- // Get function.
- MOVD CAA(R8), R9
- MOVD EDCHPXV(R9), R9
- MOVD trap+0(FP), R5
- SLD $4, R5
- ADD R5, R9
- LMG 0(R9), R5, R6
-
- // Restore LE stack.
- MOVD SAVSTACK_ASYNC(R8), R9
- MOVD 0(R9), R4
- MOVD $0, 0(R9)
-
- // Fill in parameter list.
- MOVD a4+32(FP), R12
- MOVD R12, (2176+24)(R4)
- MOVD a5+40(FP), R12
- MOVD R12, (2176+32)(R4)
- MOVD a6+48(FP), R12
- MOVD R12, (2176+40)(R4)
- MOVD a7+56(FP), R12
- MOVD R12, (2176+48)(R4)
- MOVD a8+64(FP), R12
- MOVD R12, (2176+56)(R4)
- MOVD a9+72(FP), R12
- MOVD R12, (2176+64)(R4)
-
- // Call function.
- LE_CALL
- NOPH
- XOR R0, R0 // Restore R0 to $0.
- MOVD R4, 0(R9) // Save stack pointer.
-
- MOVD R3, r1+80(FP)
- MOVD R0, r2+88(FP)
- MOVD R0, err+96(FP)
- MOVW R3, R4
- CMP R4, $-1
- BNE done
- BL addrerrno<>(SB)
- MOVWZ 0(R3), R3
- MOVD R3, err+96(FP)
-done:
- RET
-
-// func svcCall(fnptr unsafe.Pointer, argv *unsafe.Pointer, dsa *uint64)
-TEXT ·svcCall(SB),NOSPLIT,$0
- BL runtime·save_g(SB) // Save g and stack pointer
- MOVW PSALAA, R8
- MOVD LCA64(R8), R8
- MOVD SAVSTACK_ASYNC(R8), R9
- MOVD R15, 0(R9)
-
- MOVD argv+8(FP), R1 // Move function arguments into registers
- MOVD dsa+16(FP), g
- MOVD fnptr+0(FP), R15
-
- BYTE $0x0D // Branch to function
- BYTE $0xEF
-
- BL runtime·load_g(SB) // Restore g and stack pointer
- MOVW PSALAA, R8
- MOVD LCA64(R8), R8
- MOVD SAVSTACK_ASYNC(R8), R9
- MOVD 0(R9), R15
-
- RET
-
-// func svcLoad(name *byte) unsafe.Pointer
-TEXT ·svcLoad(SB),NOSPLIT,$0
- MOVD R15, R2 // Save go stack pointer
- MOVD name+0(FP), R0 // Move SVC args into registers
- MOVD $0x80000000, R1
- MOVD $0, R15
- BYTE $0x0A // SVC 08 LOAD
- BYTE $0x08
- MOVW R15, R3 // Save return code from SVC
- MOVD R2, R15 // Restore go stack pointer
- CMP R3, $0 // Check SVC return code
- BNE error
-
- MOVD $-2, R3 // Reset last bit of entry point to zero
- AND R0, R3
- MOVD R3, addr+8(FP) // Return entry point returned by SVC
- CMP R0, R3 // Check if last bit of entry point was set
- BNE done
-
- MOVD R15, R2 // Save go stack pointer
- MOVD $0, R15 // Move SVC args into registers (entry point still in r0 from SVC 08)
- BYTE $0x0A // SVC 09 DELETE
- BYTE $0x09
- MOVD R2, R15 // Restore go stack pointer
-
-error:
- MOVD $0, addr+8(FP) // Return 0 on failure
-done:
- XOR R0, R0 // Reset r0 to 0
- RET
-
-// func svcUnload(name *byte, fnptr unsafe.Pointer) int64
-TEXT ·svcUnload(SB),NOSPLIT,$0
- MOVD R15, R2 // Save go stack pointer
- MOVD name+0(FP), R0 // Move SVC args into registers
- MOVD addr+8(FP), R15
- BYTE $0x0A // SVC 09
- BYTE $0x09
- XOR R0, R0 // Reset r0 to 0
- MOVD R15, R1 // Save SVC return code
- MOVD R2, R15 // Restore go stack pointer
- MOVD R1, rc+0(FP) // Return SVC return code
- RET
-
-// func gettid() uint64
-TEXT ·gettid(SB), NOSPLIT, $0
- // Get library control area (LCA).
- MOVW PSALAA, R8
- MOVD LCA64(R8), R8
-
- // Get CEECAATHDID
- MOVD CAA(R8), R9
- MOVD 0x3D0(R9), R9
- MOVD R9, ret+0(FP)
-
- RET
diff --git a/vendor/golang.org/x/sys/unix/bluetooth_linux.go b/vendor/golang.org/x/sys/unix/bluetooth_linux.go
deleted file mode 100644
index a178a614..00000000
--- a/vendor/golang.org/x/sys/unix/bluetooth_linux.go
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Bluetooth sockets and messages
-
-package unix
-
-// Bluetooth Protocols
-const (
- BTPROTO_L2CAP = 0
- BTPROTO_HCI = 1
- BTPROTO_SCO = 2
- BTPROTO_RFCOMM = 3
- BTPROTO_BNEP = 4
- BTPROTO_CMTP = 5
- BTPROTO_HIDP = 6
- BTPROTO_AVDTP = 7
-)
-
-const (
- HCI_CHANNEL_RAW = 0
- HCI_CHANNEL_USER = 1
- HCI_CHANNEL_MONITOR = 2
- HCI_CHANNEL_CONTROL = 3
- HCI_CHANNEL_LOGGING = 4
-)
-
-// Socketoption Level
-const (
- SOL_BLUETOOTH = 0x112
- SOL_HCI = 0x0
- SOL_L2CAP = 0x6
- SOL_RFCOMM = 0x12
- SOL_SCO = 0x11
-)
diff --git a/vendor/golang.org/x/sys/unix/cap_freebsd.go b/vendor/golang.org/x/sys/unix/cap_freebsd.go
deleted file mode 100644
index 0b7c6adb..00000000
--- a/vendor/golang.org/x/sys/unix/cap_freebsd.go
+++ /dev/null
@@ -1,196 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build freebsd
-// +build freebsd
-
-package unix
-
-import (
- "errors"
- "fmt"
-)
-
-// Go implementation of C mostly found in /usr/src/sys/kern/subr_capability.c
-
-const (
- // This is the version of CapRights this package understands. See C implementation for parallels.
- capRightsGoVersion = CAP_RIGHTS_VERSION_00
- capArSizeMin = CAP_RIGHTS_VERSION_00 + 2
- capArSizeMax = capRightsGoVersion + 2
-)
-
-var (
- bit2idx = []int{
- -1, 0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1,
- 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
- }
-)
-
-func capidxbit(right uint64) int {
- return int((right >> 57) & 0x1f)
-}
-
-func rightToIndex(right uint64) (int, error) {
- idx := capidxbit(right)
- if idx < 0 || idx >= len(bit2idx) {
- return -2, fmt.Errorf("index for right 0x%x out of range", right)
- }
- return bit2idx[idx], nil
-}
-
-func caprver(right uint64) int {
- return int(right >> 62)
-}
-
-func capver(rights *CapRights) int {
- return caprver(rights.Rights[0])
-}
-
-func caparsize(rights *CapRights) int {
- return capver(rights) + 2
-}
-
-// CapRightsSet sets the permissions in setrights in rights.
-func CapRightsSet(rights *CapRights, setrights []uint64) error {
- // This is essentially a copy of cap_rights_vset()
- if capver(rights) != CAP_RIGHTS_VERSION_00 {
- return fmt.Errorf("bad rights version %d", capver(rights))
- }
-
- n := caparsize(rights)
- if n < capArSizeMin || n > capArSizeMax {
- return errors.New("bad rights size")
- }
-
- for _, right := range setrights {
- if caprver(right) != CAP_RIGHTS_VERSION_00 {
- return errors.New("bad right version")
- }
- i, err := rightToIndex(right)
- if err != nil {
- return err
- }
- if i >= n {
- return errors.New("index overflow")
- }
- if capidxbit(rights.Rights[i]) != capidxbit(right) {
- return errors.New("index mismatch")
- }
- rights.Rights[i] |= right
- if capidxbit(rights.Rights[i]) != capidxbit(right) {
- return errors.New("index mismatch (after assign)")
- }
- }
-
- return nil
-}
-
-// CapRightsClear clears the permissions in clearrights from rights.
-func CapRightsClear(rights *CapRights, clearrights []uint64) error {
- // This is essentially a copy of cap_rights_vclear()
- if capver(rights) != CAP_RIGHTS_VERSION_00 {
- return fmt.Errorf("bad rights version %d", capver(rights))
- }
-
- n := caparsize(rights)
- if n < capArSizeMin || n > capArSizeMax {
- return errors.New("bad rights size")
- }
-
- for _, right := range clearrights {
- if caprver(right) != CAP_RIGHTS_VERSION_00 {
- return errors.New("bad right version")
- }
- i, err := rightToIndex(right)
- if err != nil {
- return err
- }
- if i >= n {
- return errors.New("index overflow")
- }
- if capidxbit(rights.Rights[i]) != capidxbit(right) {
- return errors.New("index mismatch")
- }
- rights.Rights[i] &= ^(right & 0x01FFFFFFFFFFFFFF)
- if capidxbit(rights.Rights[i]) != capidxbit(right) {
- return errors.New("index mismatch (after assign)")
- }
- }
-
- return nil
-}
-
-// CapRightsIsSet checks whether all the permissions in setrights are present in rights.
-func CapRightsIsSet(rights *CapRights, setrights []uint64) (bool, error) {
- // This is essentially a copy of cap_rights_is_vset()
- if capver(rights) != CAP_RIGHTS_VERSION_00 {
- return false, fmt.Errorf("bad rights version %d", capver(rights))
- }
-
- n := caparsize(rights)
- if n < capArSizeMin || n > capArSizeMax {
- return false, errors.New("bad rights size")
- }
-
- for _, right := range setrights {
- if caprver(right) != CAP_RIGHTS_VERSION_00 {
- return false, errors.New("bad right version")
- }
- i, err := rightToIndex(right)
- if err != nil {
- return false, err
- }
- if i >= n {
- return false, errors.New("index overflow")
- }
- if capidxbit(rights.Rights[i]) != capidxbit(right) {
- return false, errors.New("index mismatch")
- }
- if (rights.Rights[i] & right) != right {
- return false, nil
- }
- }
-
- return true, nil
-}
-
-func capright(idx uint64, bit uint64) uint64 {
- return ((1 << (57 + idx)) | bit)
-}
-
-// CapRightsInit returns a pointer to an initialised CapRights structure filled with rights.
-// See man cap_rights_init(3) and rights(4).
-func CapRightsInit(rights []uint64) (*CapRights, error) {
- var r CapRights
- r.Rights[0] = (capRightsGoVersion << 62) | capright(0, 0)
- r.Rights[1] = capright(1, 0)
-
- err := CapRightsSet(&r, rights)
- if err != nil {
- return nil, err
- }
- return &r, nil
-}
-
-// CapRightsLimit reduces the operations permitted on fd to at most those contained in rights.
-// The capability rights on fd can never be increased by CapRightsLimit.
-// See man cap_rights_limit(2) and rights(4).
-func CapRightsLimit(fd uintptr, rights *CapRights) error {
- return capRightsLimit(int(fd), rights)
-}
-
-// CapRightsGet returns a CapRights structure containing the operations permitted on fd.
-// See man cap_rights_get(3) and rights(4).
-func CapRightsGet(fd uintptr) (*CapRights, error) {
- r, err := CapRightsInit(nil)
- if err != nil {
- return nil, err
- }
- err = capRightsGet(capRightsGoVersion, int(fd), r)
- if err != nil {
- return nil, err
- }
- return r, nil
-}
diff --git a/vendor/golang.org/x/sys/unix/constants.go b/vendor/golang.org/x/sys/unix/constants.go
deleted file mode 100644
index 394a3965..00000000
--- a/vendor/golang.org/x/sys/unix/constants.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos
-// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos
-
-package unix
-
-const (
- R_OK = 0x4
- W_OK = 0x2
- X_OK = 0x1
-)
diff --git a/vendor/golang.org/x/sys/unix/dev_aix_ppc.go b/vendor/golang.org/x/sys/unix/dev_aix_ppc.go
deleted file mode 100644
index 65a99850..00000000
--- a/vendor/golang.org/x/sys/unix/dev_aix_ppc.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix && ppc
-// +build aix,ppc
-
-// Functions to access/create device major and minor numbers matching the
-// encoding used by AIX.
-
-package unix
-
-// Major returns the major component of a Linux device number.
-func Major(dev uint64) uint32 {
- return uint32((dev >> 16) & 0xffff)
-}
-
-// Minor returns the minor component of a Linux device number.
-func Minor(dev uint64) uint32 {
- return uint32(dev & 0xffff)
-}
-
-// Mkdev returns a Linux device number generated from the given major and minor
-// components.
-func Mkdev(major, minor uint32) uint64 {
- return uint64(((major) << 16) | (minor))
-}
diff --git a/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go b/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go
deleted file mode 100644
index 8fc08ad0..00000000
--- a/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix && ppc64
-// +build aix,ppc64
-
-// Functions to access/create device major and minor numbers matching the
-// encoding used AIX.
-
-package unix
-
-// Major returns the major component of a Linux device number.
-func Major(dev uint64) uint32 {
- return uint32((dev & 0x3fffffff00000000) >> 32)
-}
-
-// Minor returns the minor component of a Linux device number.
-func Minor(dev uint64) uint32 {
- return uint32((dev & 0x00000000ffffffff) >> 0)
-}
-
-// Mkdev returns a Linux device number generated from the given major and minor
-// components.
-func Mkdev(major, minor uint32) uint64 {
- var DEVNO64 uint64
- DEVNO64 = 0x8000000000000000
- return ((uint64(major) << 32) | (uint64(minor) & 0x00000000FFFFFFFF) | DEVNO64)
-}
diff --git a/vendor/golang.org/x/sys/unix/dev_darwin.go b/vendor/golang.org/x/sys/unix/dev_darwin.go
deleted file mode 100644
index 8d1dc0fa..00000000
--- a/vendor/golang.org/x/sys/unix/dev_darwin.go
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Functions to access/create device major and minor numbers matching the
-// encoding used in Darwin's sys/types.h header.
-
-package unix
-
-// Major returns the major component of a Darwin device number.
-func Major(dev uint64) uint32 {
- return uint32((dev >> 24) & 0xff)
-}
-
-// Minor returns the minor component of a Darwin device number.
-func Minor(dev uint64) uint32 {
- return uint32(dev & 0xffffff)
-}
-
-// Mkdev returns a Darwin device number generated from the given major and minor
-// components.
-func Mkdev(major, minor uint32) uint64 {
- return (uint64(major) << 24) | uint64(minor)
-}
diff --git a/vendor/golang.org/x/sys/unix/dev_dragonfly.go b/vendor/golang.org/x/sys/unix/dev_dragonfly.go
deleted file mode 100644
index 8502f202..00000000
--- a/vendor/golang.org/x/sys/unix/dev_dragonfly.go
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Functions to access/create device major and minor numbers matching the
-// encoding used in Dragonfly's sys/types.h header.
-//
-// The information below is extracted and adapted from sys/types.h:
-//
-// Minor gives a cookie instead of an index since in order to avoid changing the
-// meanings of bits 0-15 or wasting time and space shifting bits 16-31 for
-// devices that don't use them.
-
-package unix
-
-// Major returns the major component of a DragonFlyBSD device number.
-func Major(dev uint64) uint32 {
- return uint32((dev >> 8) & 0xff)
-}
-
-// Minor returns the minor component of a DragonFlyBSD device number.
-func Minor(dev uint64) uint32 {
- return uint32(dev & 0xffff00ff)
-}
-
-// Mkdev returns a DragonFlyBSD device number generated from the given major and
-// minor components.
-func Mkdev(major, minor uint32) uint64 {
- return (uint64(major) << 8) | uint64(minor)
-}
diff --git a/vendor/golang.org/x/sys/unix/dev_freebsd.go b/vendor/golang.org/x/sys/unix/dev_freebsd.go
deleted file mode 100644
index eba3b4bd..00000000
--- a/vendor/golang.org/x/sys/unix/dev_freebsd.go
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Functions to access/create device major and minor numbers matching the
-// encoding used in FreeBSD's sys/types.h header.
-//
-// The information below is extracted and adapted from sys/types.h:
-//
-// Minor gives a cookie instead of an index since in order to avoid changing the
-// meanings of bits 0-15 or wasting time and space shifting bits 16-31 for
-// devices that don't use them.
-
-package unix
-
-// Major returns the major component of a FreeBSD device number.
-func Major(dev uint64) uint32 {
- return uint32((dev >> 8) & 0xff)
-}
-
-// Minor returns the minor component of a FreeBSD device number.
-func Minor(dev uint64) uint32 {
- return uint32(dev & 0xffff00ff)
-}
-
-// Mkdev returns a FreeBSD device number generated from the given major and
-// minor components.
-func Mkdev(major, minor uint32) uint64 {
- return (uint64(major) << 8) | uint64(minor)
-}
diff --git a/vendor/golang.org/x/sys/unix/dev_linux.go b/vendor/golang.org/x/sys/unix/dev_linux.go
deleted file mode 100644
index d165d6f3..00000000
--- a/vendor/golang.org/x/sys/unix/dev_linux.go
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Functions to access/create device major and minor numbers matching the
-// encoding used by the Linux kernel and glibc.
-//
-// The information below is extracted and adapted from bits/sysmacros.h in the
-// glibc sources:
-//
-// dev_t in glibc is 64-bit, with 32-bit major and minor numbers. glibc's
-// default encoding is MMMM Mmmm mmmM MMmm, where M is a hex digit of the major
-// number and m is a hex digit of the minor number. This is backward compatible
-// with legacy systems where dev_t is 16 bits wide, encoded as MMmm. It is also
-// backward compatible with the Linux kernel, which for some architectures uses
-// 32-bit dev_t, encoded as mmmM MMmm.
-
-package unix
-
-// Major returns the major component of a Linux device number.
-func Major(dev uint64) uint32 {
- major := uint32((dev & 0x00000000000fff00) >> 8)
- major |= uint32((dev & 0xfffff00000000000) >> 32)
- return major
-}
-
-// Minor returns the minor component of a Linux device number.
-func Minor(dev uint64) uint32 {
- minor := uint32((dev & 0x00000000000000ff) >> 0)
- minor |= uint32((dev & 0x00000ffffff00000) >> 12)
- return minor
-}
-
-// Mkdev returns a Linux device number generated from the given major and minor
-// components.
-func Mkdev(major, minor uint32) uint64 {
- dev := (uint64(major) & 0x00000fff) << 8
- dev |= (uint64(major) & 0xfffff000) << 32
- dev |= (uint64(minor) & 0x000000ff) << 0
- dev |= (uint64(minor) & 0xffffff00) << 12
- return dev
-}
diff --git a/vendor/golang.org/x/sys/unix/dev_netbsd.go b/vendor/golang.org/x/sys/unix/dev_netbsd.go
deleted file mode 100644
index b4a203d0..00000000
--- a/vendor/golang.org/x/sys/unix/dev_netbsd.go
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Functions to access/create device major and minor numbers matching the
-// encoding used in NetBSD's sys/types.h header.
-
-package unix
-
-// Major returns the major component of a NetBSD device number.
-func Major(dev uint64) uint32 {
- return uint32((dev & 0x000fff00) >> 8)
-}
-
-// Minor returns the minor component of a NetBSD device number.
-func Minor(dev uint64) uint32 {
- minor := uint32((dev & 0x000000ff) >> 0)
- minor |= uint32((dev & 0xfff00000) >> 12)
- return minor
-}
-
-// Mkdev returns a NetBSD device number generated from the given major and minor
-// components.
-func Mkdev(major, minor uint32) uint64 {
- dev := (uint64(major) << 8) & 0x000fff00
- dev |= (uint64(minor) << 12) & 0xfff00000
- dev |= (uint64(minor) << 0) & 0x000000ff
- return dev
-}
diff --git a/vendor/golang.org/x/sys/unix/dev_openbsd.go b/vendor/golang.org/x/sys/unix/dev_openbsd.go
deleted file mode 100644
index f3430c42..00000000
--- a/vendor/golang.org/x/sys/unix/dev_openbsd.go
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Functions to access/create device major and minor numbers matching the
-// encoding used in OpenBSD's sys/types.h header.
-
-package unix
-
-// Major returns the major component of an OpenBSD device number.
-func Major(dev uint64) uint32 {
- return uint32((dev & 0x0000ff00) >> 8)
-}
-
-// Minor returns the minor component of an OpenBSD device number.
-func Minor(dev uint64) uint32 {
- minor := uint32((dev & 0x000000ff) >> 0)
- minor |= uint32((dev & 0xffff0000) >> 8)
- return minor
-}
-
-// Mkdev returns an OpenBSD device number generated from the given major and minor
-// components.
-func Mkdev(major, minor uint32) uint64 {
- dev := (uint64(major) << 8) & 0x0000ff00
- dev |= (uint64(minor) << 8) & 0xffff0000
- dev |= (uint64(minor) << 0) & 0x000000ff
- return dev
-}
diff --git a/vendor/golang.org/x/sys/unix/dev_zos.go b/vendor/golang.org/x/sys/unix/dev_zos.go
deleted file mode 100644
index a388e59a..00000000
--- a/vendor/golang.org/x/sys/unix/dev_zos.go
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build zos && s390x
-// +build zos,s390x
-
-// Functions to access/create device major and minor numbers matching the
-// encoding used by z/OS.
-//
-// The information below is extracted and adapted from macros.
-
-package unix
-
-// Major returns the major component of a z/OS device number.
-func Major(dev uint64) uint32 {
- return uint32((dev >> 16) & 0x0000FFFF)
-}
-
-// Minor returns the minor component of a z/OS device number.
-func Minor(dev uint64) uint32 {
- return uint32(dev & 0x0000FFFF)
-}
-
-// Mkdev returns a z/OS device number generated from the given major and minor
-// components.
-func Mkdev(major, minor uint32) uint64 {
- return (uint64(major) << 16) | uint64(minor)
-}
diff --git a/vendor/golang.org/x/sys/unix/dirent.go b/vendor/golang.org/x/sys/unix/dirent.go
deleted file mode 100644
index 2499f977..00000000
--- a/vendor/golang.org/x/sys/unix/dirent.go
+++ /dev/null
@@ -1,103 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos
-// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos
-
-package unix
-
-import "unsafe"
-
-// readInt returns the size-bytes unsigned integer in native byte order at offset off.
-func readInt(b []byte, off, size uintptr) (u uint64, ok bool) {
- if len(b) < int(off+size) {
- return 0, false
- }
- if isBigEndian {
- return readIntBE(b[off:], size), true
- }
- return readIntLE(b[off:], size), true
-}
-
-func readIntBE(b []byte, size uintptr) uint64 {
- switch size {
- case 1:
- return uint64(b[0])
- case 2:
- _ = b[1] // bounds check hint to compiler; see golang.org/issue/14808
- return uint64(b[1]) | uint64(b[0])<<8
- case 4:
- _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808
- return uint64(b[3]) | uint64(b[2])<<8 | uint64(b[1])<<16 | uint64(b[0])<<24
- case 8:
- _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808
- return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 |
- uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56
- default:
- panic("syscall: readInt with unsupported size")
- }
-}
-
-func readIntLE(b []byte, size uintptr) uint64 {
- switch size {
- case 1:
- return uint64(b[0])
- case 2:
- _ = b[1] // bounds check hint to compiler; see golang.org/issue/14808
- return uint64(b[0]) | uint64(b[1])<<8
- case 4:
- _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808
- return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24
- case 8:
- _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808
- return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
- uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
- default:
- panic("syscall: readInt with unsupported size")
- }
-}
-
-// ParseDirent parses up to max directory entries in buf,
-// appending the names to names. It returns the number of
-// bytes consumed from buf, the number of entries added
-// to names, and the new names slice.
-func ParseDirent(buf []byte, max int, names []string) (consumed int, count int, newnames []string) {
- origlen := len(buf)
- count = 0
- for max != 0 && len(buf) > 0 {
- reclen, ok := direntReclen(buf)
- if !ok || reclen > uint64(len(buf)) {
- return origlen, count, names
- }
- rec := buf[:reclen]
- buf = buf[reclen:]
- ino, ok := direntIno(rec)
- if !ok {
- break
- }
- if ino == 0 { // File absent in directory.
- continue
- }
- const namoff = uint64(unsafe.Offsetof(Dirent{}.Name))
- namlen, ok := direntNamlen(rec)
- if !ok || namoff+namlen > uint64(len(rec)) {
- break
- }
- name := rec[namoff : namoff+namlen]
- for i, c := range name {
- if c == 0 {
- name = name[:i]
- break
- }
- }
- // Check for useless names before allocating a string.
- if string(name) == "." || string(name) == ".." {
- continue
- }
- max--
- count++
- names = append(names, string(name))
- }
- return origlen - len(buf), count, names
-}
diff --git a/vendor/golang.org/x/sys/unix/endian_big.go b/vendor/golang.org/x/sys/unix/endian_big.go
deleted file mode 100644
index a5202655..00000000
--- a/vendor/golang.org/x/sys/unix/endian_big.go
+++ /dev/null
@@ -1,10 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-//
-//go:build armbe || arm64be || m68k || mips || mips64 || mips64p32 || ppc || ppc64 || s390 || s390x || shbe || sparc || sparc64
-// +build armbe arm64be m68k mips mips64 mips64p32 ppc ppc64 s390 s390x shbe sparc sparc64
-
-package unix
-
-const isBigEndian = true
diff --git a/vendor/golang.org/x/sys/unix/endian_little.go b/vendor/golang.org/x/sys/unix/endian_little.go
deleted file mode 100644
index b0f2bc4a..00000000
--- a/vendor/golang.org/x/sys/unix/endian_little.go
+++ /dev/null
@@ -1,10 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-//
-//go:build 386 || amd64 || amd64p32 || alpha || arm || arm64 || loong64 || mipsle || mips64le || mips64p32le || nios2 || ppc64le || riscv || riscv64 || sh
-// +build 386 amd64 amd64p32 alpha arm arm64 loong64 mipsle mips64le mips64p32le nios2 ppc64le riscv riscv64 sh
-
-package unix
-
-const isBigEndian = false
diff --git a/vendor/golang.org/x/sys/unix/env_unix.go b/vendor/golang.org/x/sys/unix/env_unix.go
deleted file mode 100644
index 29ccc4d1..00000000
--- a/vendor/golang.org/x/sys/unix/env_unix.go
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos
-// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos
-
-// Unix environment variables.
-
-package unix
-
-import "syscall"
-
-func Getenv(key string) (value string, found bool) {
- return syscall.Getenv(key)
-}
-
-func Setenv(key, value string) error {
- return syscall.Setenv(key, value)
-}
-
-func Clearenv() {
- syscall.Clearenv()
-}
-
-func Environ() []string {
- return syscall.Environ()
-}
-
-func Unsetenv(key string) error {
- return syscall.Unsetenv(key)
-}
diff --git a/vendor/golang.org/x/sys/unix/epoll_zos.go b/vendor/golang.org/x/sys/unix/epoll_zos.go
deleted file mode 100644
index cedaf7e0..00000000
--- a/vendor/golang.org/x/sys/unix/epoll_zos.go
+++ /dev/null
@@ -1,221 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build zos && s390x
-// +build zos,s390x
-
-package unix
-
-import (
- "sync"
-)
-
-// This file simulates epoll on z/OS using poll.
-
-// Analogous to epoll_event on Linux.
-// TODO(neeilan): Pad is because the Linux kernel expects a 96-bit struct. We never pass this to the kernel; remove?
-type EpollEvent struct {
- Events uint32
- Fd int32
- Pad int32
-}
-
-const (
- EPOLLERR = 0x8
- EPOLLHUP = 0x10
- EPOLLIN = 0x1
- EPOLLMSG = 0x400
- EPOLLOUT = 0x4
- EPOLLPRI = 0x2
- EPOLLRDBAND = 0x80
- EPOLLRDNORM = 0x40
- EPOLLWRBAND = 0x200
- EPOLLWRNORM = 0x100
- EPOLL_CTL_ADD = 0x1
- EPOLL_CTL_DEL = 0x2
- EPOLL_CTL_MOD = 0x3
- // The following constants are part of the epoll API, but represent
- // currently unsupported functionality on z/OS.
- // EPOLL_CLOEXEC = 0x80000
- // EPOLLET = 0x80000000
- // EPOLLONESHOT = 0x40000000
- // EPOLLRDHUP = 0x2000 // Typically used with edge-triggered notis
- // EPOLLEXCLUSIVE = 0x10000000 // Exclusive wake-up mode
- // EPOLLWAKEUP = 0x20000000 // Relies on Linux's BLOCK_SUSPEND capability
-)
-
-// TODO(neeilan): We can eliminate these epToPoll / pToEpoll calls by using identical mask values for POLL/EPOLL
-// constants where possible The lower 16 bits of epoll events (uint32) can fit any system poll event (int16).
-
-// epToPollEvt converts epoll event field to poll equivalent.
-// In epoll, Events is a 32-bit field, while poll uses 16 bits.
-func epToPollEvt(events uint32) int16 {
- var ep2p = map[uint32]int16{
- EPOLLIN: POLLIN,
- EPOLLOUT: POLLOUT,
- EPOLLHUP: POLLHUP,
- EPOLLPRI: POLLPRI,
- EPOLLERR: POLLERR,
- }
-
- var pollEvts int16 = 0
- for epEvt, pEvt := range ep2p {
- if (events & epEvt) != 0 {
- pollEvts |= pEvt
- }
- }
-
- return pollEvts
-}
-
-// pToEpollEvt converts 16 bit poll event bitfields to 32-bit epoll event fields.
-func pToEpollEvt(revents int16) uint32 {
- var p2ep = map[int16]uint32{
- POLLIN: EPOLLIN,
- POLLOUT: EPOLLOUT,
- POLLHUP: EPOLLHUP,
- POLLPRI: EPOLLPRI,
- POLLERR: EPOLLERR,
- }
-
- var epollEvts uint32 = 0
- for pEvt, epEvt := range p2ep {
- if (revents & pEvt) != 0 {
- epollEvts |= epEvt
- }
- }
-
- return epollEvts
-}
-
-// Per-process epoll implementation.
-type epollImpl struct {
- mu sync.Mutex
- epfd2ep map[int]*eventPoll
- nextEpfd int
-}
-
-// eventPoll holds a set of file descriptors being watched by the process. A process can have multiple epoll instances.
-// On Linux, this is an in-kernel data structure accessed through a fd.
-type eventPoll struct {
- mu sync.Mutex
- fds map[int]*EpollEvent
-}
-
-// epoll impl for this process.
-var impl epollImpl = epollImpl{
- epfd2ep: make(map[int]*eventPoll),
- nextEpfd: 0,
-}
-
-func (e *epollImpl) epollcreate(size int) (epfd int, err error) {
- e.mu.Lock()
- defer e.mu.Unlock()
- epfd = e.nextEpfd
- e.nextEpfd++
-
- e.epfd2ep[epfd] = &eventPoll{
- fds: make(map[int]*EpollEvent),
- }
- return epfd, nil
-}
-
-func (e *epollImpl) epollcreate1(flag int) (fd int, err error) {
- return e.epollcreate(4)
-}
-
-func (e *epollImpl) epollctl(epfd int, op int, fd int, event *EpollEvent) (err error) {
- e.mu.Lock()
- defer e.mu.Unlock()
-
- ep, ok := e.epfd2ep[epfd]
- if !ok {
-
- return EBADF
- }
-
- switch op {
- case EPOLL_CTL_ADD:
- // TODO(neeilan): When we make epfds and fds disjoint, detect epoll
- // loops here (instances watching each other) and return ELOOP.
- if _, ok := ep.fds[fd]; ok {
- return EEXIST
- }
- ep.fds[fd] = event
- case EPOLL_CTL_MOD:
- if _, ok := ep.fds[fd]; !ok {
- return ENOENT
- }
- ep.fds[fd] = event
- case EPOLL_CTL_DEL:
- if _, ok := ep.fds[fd]; !ok {
- return ENOENT
- }
- delete(ep.fds, fd)
-
- }
- return nil
-}
-
-// Must be called while holding ep.mu
-func (ep *eventPoll) getFds() []int {
- fds := make([]int, len(ep.fds))
- for fd := range ep.fds {
- fds = append(fds, fd)
- }
- return fds
-}
-
-func (e *epollImpl) epollwait(epfd int, events []EpollEvent, msec int) (n int, err error) {
- e.mu.Lock() // in [rare] case of concurrent epollcreate + epollwait
- ep, ok := e.epfd2ep[epfd]
-
- if !ok {
- e.mu.Unlock()
- return 0, EBADF
- }
-
- pollfds := make([]PollFd, 4)
- for fd, epollevt := range ep.fds {
- pollfds = append(pollfds, PollFd{Fd: int32(fd), Events: epToPollEvt(epollevt.Events)})
- }
- e.mu.Unlock()
-
- n, err = Poll(pollfds, msec)
- if err != nil {
- return n, err
- }
-
- i := 0
- for _, pFd := range pollfds {
- if pFd.Revents != 0 {
- events[i] = EpollEvent{Fd: pFd.Fd, Events: pToEpollEvt(pFd.Revents)}
- i++
- }
-
- if i == n {
- break
- }
- }
-
- return n, nil
-}
-
-func EpollCreate(size int) (fd int, err error) {
- return impl.epollcreate(size)
-}
-
-func EpollCreate1(flag int) (fd int, err error) {
- return impl.epollcreate1(flag)
-}
-
-func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) {
- return impl.epollctl(epfd, op, fd, event)
-}
-
-// Because EpollWait mutates events, the caller is expected to coordinate
-// concurrent access if calling with the same epfd from multiple goroutines.
-func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) {
- return impl.epollwait(epfd, events, msec)
-}
diff --git a/vendor/golang.org/x/sys/unix/fcntl.go b/vendor/golang.org/x/sys/unix/fcntl.go
deleted file mode 100644
index e9b99125..00000000
--- a/vendor/golang.org/x/sys/unix/fcntl.go
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build dragonfly || freebsd || linux || netbsd || openbsd
-// +build dragonfly freebsd linux netbsd openbsd
-
-package unix
-
-import "unsafe"
-
-// fcntl64Syscall is usually SYS_FCNTL, but is overridden on 32-bit Linux
-// systems by fcntl_linux_32bit.go to be SYS_FCNTL64.
-var fcntl64Syscall uintptr = SYS_FCNTL
-
-func fcntl(fd int, cmd, arg int) (int, error) {
- valptr, _, errno := Syscall(fcntl64Syscall, uintptr(fd), uintptr(cmd), uintptr(arg))
- var err error
- if errno != 0 {
- err = errno
- }
- return int(valptr), err
-}
-
-// FcntlInt performs a fcntl syscall on fd with the provided command and argument.
-func FcntlInt(fd uintptr, cmd, arg int) (int, error) {
- return fcntl(int(fd), cmd, arg)
-}
-
-// FcntlFlock performs a fcntl syscall for the F_GETLK, F_SETLK or F_SETLKW command.
-func FcntlFlock(fd uintptr, cmd int, lk *Flock_t) error {
- _, _, errno := Syscall(fcntl64Syscall, fd, uintptr(cmd), uintptr(unsafe.Pointer(lk)))
- if errno == 0 {
- return nil
- }
- return errno
-}
diff --git a/vendor/golang.org/x/sys/unix/fcntl_darwin.go b/vendor/golang.org/x/sys/unix/fcntl_darwin.go
deleted file mode 100644
index a9911c7c..00000000
--- a/vendor/golang.org/x/sys/unix/fcntl_darwin.go
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package unix
-
-import "unsafe"
-
-// FcntlInt performs a fcntl syscall on fd with the provided command and argument.
-func FcntlInt(fd uintptr, cmd, arg int) (int, error) {
- return fcntl(int(fd), cmd, arg)
-}
-
-// FcntlFlock performs a fcntl syscall for the F_GETLK, F_SETLK or F_SETLKW command.
-func FcntlFlock(fd uintptr, cmd int, lk *Flock_t) error {
- _, err := fcntl(int(fd), cmd, int(uintptr(unsafe.Pointer(lk))))
- return err
-}
-
-// FcntlFstore performs a fcntl syscall for the F_PREALLOCATE command.
-func FcntlFstore(fd uintptr, cmd int, fstore *Fstore_t) error {
- _, err := fcntl(int(fd), cmd, int(uintptr(unsafe.Pointer(fstore))))
- return err
-}
diff --git a/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go b/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go
deleted file mode 100644
index 29d44808..00000000
--- a/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build (linux && 386) || (linux && arm) || (linux && mips) || (linux && mipsle) || (linux && ppc)
-// +build linux,386 linux,arm linux,mips linux,mipsle linux,ppc
-
-package unix
-
-func init() {
- // On 32-bit Linux systems, the fcntl syscall that matches Go's
- // Flock_t type is SYS_FCNTL64, not SYS_FCNTL.
- fcntl64Syscall = SYS_FCNTL64
-}
diff --git a/vendor/golang.org/x/sys/unix/fdset.go b/vendor/golang.org/x/sys/unix/fdset.go
deleted file mode 100644
index a8068f94..00000000
--- a/vendor/golang.org/x/sys/unix/fdset.go
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos
-// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos
-
-package unix
-
-// Set adds fd to the set fds.
-func (fds *FdSet) Set(fd int) {
- fds.Bits[fd/NFDBITS] |= (1 << (uintptr(fd) % NFDBITS))
-}
-
-// Clear removes fd from the set fds.
-func (fds *FdSet) Clear(fd int) {
- fds.Bits[fd/NFDBITS] &^= (1 << (uintptr(fd) % NFDBITS))
-}
-
-// IsSet returns whether fd is in the set fds.
-func (fds *FdSet) IsSet(fd int) bool {
- return fds.Bits[fd/NFDBITS]&(1<<(uintptr(fd)%NFDBITS)) != 0
-}
-
-// Zero clears the set fds.
-func (fds *FdSet) Zero() {
- for i := range fds.Bits {
- fds.Bits[i] = 0
- }
-}
diff --git a/vendor/golang.org/x/sys/unix/fstatfs_zos.go b/vendor/golang.org/x/sys/unix/fstatfs_zos.go
deleted file mode 100644
index e377cc9f..00000000
--- a/vendor/golang.org/x/sys/unix/fstatfs_zos.go
+++ /dev/null
@@ -1,164 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build zos && s390x
-// +build zos,s390x
-
-package unix
-
-import (
- "unsafe"
-)
-
-// This file simulates fstatfs on z/OS using fstatvfs and w_getmntent.
-
-func Fstatfs(fd int, stat *Statfs_t) (err error) {
- var stat_v Statvfs_t
- err = Fstatvfs(fd, &stat_v)
- if err == nil {
- // populate stat
- stat.Type = 0
- stat.Bsize = stat_v.Bsize
- stat.Blocks = stat_v.Blocks
- stat.Bfree = stat_v.Bfree
- stat.Bavail = stat_v.Bavail
- stat.Files = stat_v.Files
- stat.Ffree = stat_v.Ffree
- stat.Fsid = stat_v.Fsid
- stat.Namelen = stat_v.Namemax
- stat.Frsize = stat_v.Frsize
- stat.Flags = stat_v.Flag
- for passn := 0; passn < 5; passn++ {
- switch passn {
- case 0:
- err = tryGetmntent64(stat)
- break
- case 1:
- err = tryGetmntent128(stat)
- break
- case 2:
- err = tryGetmntent256(stat)
- break
- case 3:
- err = tryGetmntent512(stat)
- break
- case 4:
- err = tryGetmntent1024(stat)
- break
- default:
- break
- }
- //proceed to return if: err is nil (found), err is nonnil but not ERANGE (another error occurred)
- if err == nil || err != nil && err != ERANGE {
- break
- }
- }
- }
- return err
-}
-
-func tryGetmntent64(stat *Statfs_t) (err error) {
- var mnt_ent_buffer struct {
- header W_Mnth
- filesys_info [64]W_Mntent
- }
- var buffer_size int = int(unsafe.Sizeof(mnt_ent_buffer))
- fs_count, err := W_Getmntent((*byte)(unsafe.Pointer(&mnt_ent_buffer)), buffer_size)
- if err != nil {
- return err
- }
- err = ERANGE //return ERANGE if no match is found in this batch
- for i := 0; i < fs_count; i++ {
- if stat.Fsid == uint64(mnt_ent_buffer.filesys_info[i].Dev) {
- stat.Type = uint32(mnt_ent_buffer.filesys_info[i].Fstname[0])
- err = nil
- break
- }
- }
- return err
-}
-
-func tryGetmntent128(stat *Statfs_t) (err error) {
- var mnt_ent_buffer struct {
- header W_Mnth
- filesys_info [128]W_Mntent
- }
- var buffer_size int = int(unsafe.Sizeof(mnt_ent_buffer))
- fs_count, err := W_Getmntent((*byte)(unsafe.Pointer(&mnt_ent_buffer)), buffer_size)
- if err != nil {
- return err
- }
- err = ERANGE //return ERANGE if no match is found in this batch
- for i := 0; i < fs_count; i++ {
- if stat.Fsid == uint64(mnt_ent_buffer.filesys_info[i].Dev) {
- stat.Type = uint32(mnt_ent_buffer.filesys_info[i].Fstname[0])
- err = nil
- break
- }
- }
- return err
-}
-
-func tryGetmntent256(stat *Statfs_t) (err error) {
- var mnt_ent_buffer struct {
- header W_Mnth
- filesys_info [256]W_Mntent
- }
- var buffer_size int = int(unsafe.Sizeof(mnt_ent_buffer))
- fs_count, err := W_Getmntent((*byte)(unsafe.Pointer(&mnt_ent_buffer)), buffer_size)
- if err != nil {
- return err
- }
- err = ERANGE //return ERANGE if no match is found in this batch
- for i := 0; i < fs_count; i++ {
- if stat.Fsid == uint64(mnt_ent_buffer.filesys_info[i].Dev) {
- stat.Type = uint32(mnt_ent_buffer.filesys_info[i].Fstname[0])
- err = nil
- break
- }
- }
- return err
-}
-
-func tryGetmntent512(stat *Statfs_t) (err error) {
- var mnt_ent_buffer struct {
- header W_Mnth
- filesys_info [512]W_Mntent
- }
- var buffer_size int = int(unsafe.Sizeof(mnt_ent_buffer))
- fs_count, err := W_Getmntent((*byte)(unsafe.Pointer(&mnt_ent_buffer)), buffer_size)
- if err != nil {
- return err
- }
- err = ERANGE //return ERANGE if no match is found in this batch
- for i := 0; i < fs_count; i++ {
- if stat.Fsid == uint64(mnt_ent_buffer.filesys_info[i].Dev) {
- stat.Type = uint32(mnt_ent_buffer.filesys_info[i].Fstname[0])
- err = nil
- break
- }
- }
- return err
-}
-
-func tryGetmntent1024(stat *Statfs_t) (err error) {
- var mnt_ent_buffer struct {
- header W_Mnth
- filesys_info [1024]W_Mntent
- }
- var buffer_size int = int(unsafe.Sizeof(mnt_ent_buffer))
- fs_count, err := W_Getmntent((*byte)(unsafe.Pointer(&mnt_ent_buffer)), buffer_size)
- if err != nil {
- return err
- }
- err = ERANGE //return ERANGE if no match is found in this batch
- for i := 0; i < fs_count; i++ {
- if stat.Fsid == uint64(mnt_ent_buffer.filesys_info[i].Dev) {
- stat.Type = uint32(mnt_ent_buffer.filesys_info[i].Fstname[0])
- err = nil
- break
- }
- }
- return err
-}
diff --git a/vendor/golang.org/x/sys/unix/gccgo.go b/vendor/golang.org/x/sys/unix/gccgo.go
deleted file mode 100644
index b06f52d7..00000000
--- a/vendor/golang.org/x/sys/unix/gccgo.go
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build gccgo && !aix && !hurd
-// +build gccgo,!aix,!hurd
-
-package unix
-
-import "syscall"
-
-// We can't use the gc-syntax .s files for gccgo. On the plus side
-// much of the functionality can be written directly in Go.
-
-func realSyscallNoError(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r uintptr)
-
-func realSyscall(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r, errno uintptr)
-
-func SyscallNoError(trap, a1, a2, a3 uintptr) (r1, r2 uintptr) {
- syscall.Entersyscall()
- r := realSyscallNoError(trap, a1, a2, a3, 0, 0, 0, 0, 0, 0)
- syscall.Exitsyscall()
- return r, 0
-}
-
-func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) {
- syscall.Entersyscall()
- r, errno := realSyscall(trap, a1, a2, a3, 0, 0, 0, 0, 0, 0)
- syscall.Exitsyscall()
- return r, 0, syscall.Errno(errno)
-}
-
-func Syscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno) {
- syscall.Entersyscall()
- r, errno := realSyscall(trap, a1, a2, a3, a4, a5, a6, 0, 0, 0)
- syscall.Exitsyscall()
- return r, 0, syscall.Errno(errno)
-}
-
-func Syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) {
- syscall.Entersyscall()
- r, errno := realSyscall(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9)
- syscall.Exitsyscall()
- return r, 0, syscall.Errno(errno)
-}
-
-func RawSyscallNoError(trap, a1, a2, a3 uintptr) (r1, r2 uintptr) {
- r := realSyscallNoError(trap, a1, a2, a3, 0, 0, 0, 0, 0, 0)
- return r, 0
-}
-
-func RawSyscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) {
- r, errno := realSyscall(trap, a1, a2, a3, 0, 0, 0, 0, 0, 0)
- return r, 0, syscall.Errno(errno)
-}
-
-func RawSyscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno) {
- r, errno := realSyscall(trap, a1, a2, a3, a4, a5, a6, 0, 0, 0)
- return r, 0, syscall.Errno(errno)
-}
diff --git a/vendor/golang.org/x/sys/unix/gccgo_c.c b/vendor/golang.org/x/sys/unix/gccgo_c.c
deleted file mode 100644
index f98a1c54..00000000
--- a/vendor/golang.org/x/sys/unix/gccgo_c.c
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build gccgo && !aix && !hurd
-// +build gccgo,!aix,!hurd
-
-#include
-#include
-#include
-
-#define _STRINGIFY2_(x) #x
-#define _STRINGIFY_(x) _STRINGIFY2_(x)
-#define GOSYM_PREFIX _STRINGIFY_(__USER_LABEL_PREFIX__)
-
-// Call syscall from C code because the gccgo support for calling from
-// Go to C does not support varargs functions.
-
-struct ret {
- uintptr_t r;
- uintptr_t err;
-};
-
-struct ret gccgoRealSyscall(uintptr_t trap, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4, uintptr_t a5, uintptr_t a6, uintptr_t a7, uintptr_t a8, uintptr_t a9)
- __asm__(GOSYM_PREFIX GOPKGPATH ".realSyscall");
-
-struct ret
-gccgoRealSyscall(uintptr_t trap, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4, uintptr_t a5, uintptr_t a6, uintptr_t a7, uintptr_t a8, uintptr_t a9)
-{
- struct ret r;
-
- errno = 0;
- r.r = syscall(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9);
- r.err = errno;
- return r;
-}
-
-uintptr_t gccgoRealSyscallNoError(uintptr_t trap, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4, uintptr_t a5, uintptr_t a6, uintptr_t a7, uintptr_t a8, uintptr_t a9)
- __asm__(GOSYM_PREFIX GOPKGPATH ".realSyscallNoError");
-
-uintptr_t
-gccgoRealSyscallNoError(uintptr_t trap, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4, uintptr_t a5, uintptr_t a6, uintptr_t a7, uintptr_t a8, uintptr_t a9)
-{
- return syscall(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9);
-}
diff --git a/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go b/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go
deleted file mode 100644
index e60e49a3..00000000
--- a/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build gccgo && linux && amd64
-// +build gccgo,linux,amd64
-
-package unix
-
-import "syscall"
-
-//extern gettimeofday
-func realGettimeofday(*Timeval, *byte) int32
-
-func gettimeofday(tv *Timeval) (err syscall.Errno) {
- r := realGettimeofday(tv, nil)
- if r < 0 {
- return syscall.GetErrno()
- }
- return 0
-}
diff --git a/vendor/golang.org/x/sys/unix/ifreq_linux.go b/vendor/golang.org/x/sys/unix/ifreq_linux.go
deleted file mode 100644
index 15721a51..00000000
--- a/vendor/golang.org/x/sys/unix/ifreq_linux.go
+++ /dev/null
@@ -1,142 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build linux
-// +build linux
-
-package unix
-
-import (
- "unsafe"
-)
-
-// Helpers for dealing with ifreq since it contains a union and thus requires a
-// lot of unsafe.Pointer casts to use properly.
-
-// An Ifreq is a type-safe wrapper around the raw ifreq struct. An Ifreq
-// contains an interface name and a union of arbitrary data which can be
-// accessed using the Ifreq's methods. To create an Ifreq, use the NewIfreq
-// function.
-//
-// Use the Name method to access the stored interface name. The union data
-// fields can be get and set using the following methods:
-// - Uint16/SetUint16: flags
-// - Uint32/SetUint32: ifindex, metric, mtu
-type Ifreq struct{ raw ifreq }
-
-// NewIfreq creates an Ifreq with the input network interface name after
-// validating the name does not exceed IFNAMSIZ-1 (trailing NULL required)
-// bytes.
-func NewIfreq(name string) (*Ifreq, error) {
- // Leave room for terminating NULL byte.
- if len(name) >= IFNAMSIZ {
- return nil, EINVAL
- }
-
- var ifr ifreq
- copy(ifr.Ifrn[:], name)
-
- return &Ifreq{raw: ifr}, nil
-}
-
-// TODO(mdlayher): get/set methods for hardware address sockaddr, char array, etc.
-
-// Name returns the interface name associated with the Ifreq.
-func (ifr *Ifreq) Name() string {
- return ByteSliceToString(ifr.raw.Ifrn[:])
-}
-
-// According to netdevice(7), only AF_INET addresses are returned for numerous
-// sockaddr ioctls. For convenience, we expose these as Inet4Addr since the Port
-// field and other data is always empty.
-
-// Inet4Addr returns the Ifreq union data from an embedded sockaddr as a C
-// in_addr/Go []byte (4-byte IPv4 address) value. If the sockaddr family is not
-// AF_INET, an error is returned.
-func (ifr *Ifreq) Inet4Addr() ([]byte, error) {
- raw := *(*RawSockaddrInet4)(unsafe.Pointer(&ifr.raw.Ifru[:SizeofSockaddrInet4][0]))
- if raw.Family != AF_INET {
- // Cannot safely interpret raw.Addr bytes as an IPv4 address.
- return nil, EINVAL
- }
-
- return raw.Addr[:], nil
-}
-
-// SetInet4Addr sets a C in_addr/Go []byte (4-byte IPv4 address) value in an
-// embedded sockaddr within the Ifreq's union data. v must be 4 bytes in length
-// or an error will be returned.
-func (ifr *Ifreq) SetInet4Addr(v []byte) error {
- if len(v) != 4 {
- return EINVAL
- }
-
- var addr [4]byte
- copy(addr[:], v)
-
- ifr.clear()
- *(*RawSockaddrInet4)(
- unsafe.Pointer(&ifr.raw.Ifru[:SizeofSockaddrInet4][0]),
- ) = RawSockaddrInet4{
- // Always set IP family as ioctls would require it anyway.
- Family: AF_INET,
- Addr: addr,
- }
-
- return nil
-}
-
-// Uint16 returns the Ifreq union data as a C short/Go uint16 value.
-func (ifr *Ifreq) Uint16() uint16 {
- return *(*uint16)(unsafe.Pointer(&ifr.raw.Ifru[:2][0]))
-}
-
-// SetUint16 sets a C short/Go uint16 value as the Ifreq's union data.
-func (ifr *Ifreq) SetUint16(v uint16) {
- ifr.clear()
- *(*uint16)(unsafe.Pointer(&ifr.raw.Ifru[:2][0])) = v
-}
-
-// Uint32 returns the Ifreq union data as a C int/Go uint32 value.
-func (ifr *Ifreq) Uint32() uint32 {
- return *(*uint32)(unsafe.Pointer(&ifr.raw.Ifru[:4][0]))
-}
-
-// SetUint32 sets a C int/Go uint32 value as the Ifreq's union data.
-func (ifr *Ifreq) SetUint32(v uint32) {
- ifr.clear()
- *(*uint32)(unsafe.Pointer(&ifr.raw.Ifru[:4][0])) = v
-}
-
-// clear zeroes the ifreq's union field to prevent trailing garbage data from
-// being sent to the kernel if an ifreq is reused.
-func (ifr *Ifreq) clear() {
- for i := range ifr.raw.Ifru {
- ifr.raw.Ifru[i] = 0
- }
-}
-
-// TODO(mdlayher): export as IfreqData? For now we can provide helpers such as
-// IoctlGetEthtoolDrvinfo which use these APIs under the hood.
-
-// An ifreqData is an Ifreq which carries pointer data. To produce an ifreqData,
-// use the Ifreq.withData method.
-type ifreqData struct {
- name [IFNAMSIZ]byte
- // A type separate from ifreq is required in order to comply with the
- // unsafe.Pointer rules since the "pointer-ness" of data would not be
- // preserved if it were cast into the byte array of a raw ifreq.
- data unsafe.Pointer
- // Pad to the same size as ifreq.
- _ [len(ifreq{}.Ifru) - SizeofPtr]byte
-}
-
-// withData produces an ifreqData with the pointer p set for ioctls which require
-// arbitrary pointer data.
-func (ifr Ifreq) withData(p unsafe.Pointer) ifreqData {
- return ifreqData{
- name: ifr.raw.Ifrn,
- data: p,
- }
-}
diff --git a/vendor/golang.org/x/sys/unix/ioctl_linux.go b/vendor/golang.org/x/sys/unix/ioctl_linux.go
deleted file mode 100644
index 0d12c085..00000000
--- a/vendor/golang.org/x/sys/unix/ioctl_linux.go
+++ /dev/null
@@ -1,233 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package unix
-
-import "unsafe"
-
-// IoctlRetInt performs an ioctl operation specified by req on a device
-// associated with opened file descriptor fd, and returns a non-negative
-// integer that is returned by the ioctl syscall.
-func IoctlRetInt(fd int, req uint) (int, error) {
- ret, _, err := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), 0)
- if err != 0 {
- return 0, err
- }
- return int(ret), nil
-}
-
-func IoctlGetUint32(fd int, req uint) (uint32, error) {
- var value uint32
- err := ioctlPtr(fd, req, unsafe.Pointer(&value))
- return value, err
-}
-
-func IoctlGetRTCTime(fd int) (*RTCTime, error) {
- var value RTCTime
- err := ioctlPtr(fd, RTC_RD_TIME, unsafe.Pointer(&value))
- return &value, err
-}
-
-func IoctlSetRTCTime(fd int, value *RTCTime) error {
- return ioctlPtr(fd, RTC_SET_TIME, unsafe.Pointer(value))
-}
-
-func IoctlGetRTCWkAlrm(fd int) (*RTCWkAlrm, error) {
- var value RTCWkAlrm
- err := ioctlPtr(fd, RTC_WKALM_RD, unsafe.Pointer(&value))
- return &value, err
-}
-
-func IoctlSetRTCWkAlrm(fd int, value *RTCWkAlrm) error {
- return ioctlPtr(fd, RTC_WKALM_SET, unsafe.Pointer(value))
-}
-
-// IoctlGetEthtoolDrvinfo fetches ethtool driver information for the network
-// device specified by ifname.
-func IoctlGetEthtoolDrvinfo(fd int, ifname string) (*EthtoolDrvinfo, error) {
- ifr, err := NewIfreq(ifname)
- if err != nil {
- return nil, err
- }
-
- value := EthtoolDrvinfo{Cmd: ETHTOOL_GDRVINFO}
- ifrd := ifr.withData(unsafe.Pointer(&value))
-
- err = ioctlIfreqData(fd, SIOCETHTOOL, &ifrd)
- return &value, err
-}
-
-// IoctlGetWatchdogInfo fetches information about a watchdog device from the
-// Linux watchdog API. For more information, see:
-// https://www.kernel.org/doc/html/latest/watchdog/watchdog-api.html.
-func IoctlGetWatchdogInfo(fd int) (*WatchdogInfo, error) {
- var value WatchdogInfo
- err := ioctlPtr(fd, WDIOC_GETSUPPORT, unsafe.Pointer(&value))
- return &value, err
-}
-
-// IoctlWatchdogKeepalive issues a keepalive ioctl to a watchdog device. For
-// more information, see:
-// https://www.kernel.org/doc/html/latest/watchdog/watchdog-api.html.
-func IoctlWatchdogKeepalive(fd int) error {
- // arg is ignored and not a pointer, so ioctl is fine instead of ioctlPtr.
- return ioctl(fd, WDIOC_KEEPALIVE, 0)
-}
-
-// IoctlFileCloneRange performs an FICLONERANGE ioctl operation to clone the
-// range of data conveyed in value to the file associated with the file
-// descriptor destFd. See the ioctl_ficlonerange(2) man page for details.
-func IoctlFileCloneRange(destFd int, value *FileCloneRange) error {
- return ioctlPtr(destFd, FICLONERANGE, unsafe.Pointer(value))
-}
-
-// IoctlFileClone performs an FICLONE ioctl operation to clone the entire file
-// associated with the file description srcFd to the file associated with the
-// file descriptor destFd. See the ioctl_ficlone(2) man page for details.
-func IoctlFileClone(destFd, srcFd int) error {
- return ioctl(destFd, FICLONE, uintptr(srcFd))
-}
-
-type FileDedupeRange struct {
- Src_offset uint64
- Src_length uint64
- Reserved1 uint16
- Reserved2 uint32
- Info []FileDedupeRangeInfo
-}
-
-type FileDedupeRangeInfo struct {
- Dest_fd int64
- Dest_offset uint64
- Bytes_deduped uint64
- Status int32
- Reserved uint32
-}
-
-// IoctlFileDedupeRange performs an FIDEDUPERANGE ioctl operation to share the
-// range of data conveyed in value from the file associated with the file
-// descriptor srcFd to the value.Info destinations. See the
-// ioctl_fideduperange(2) man page for details.
-func IoctlFileDedupeRange(srcFd int, value *FileDedupeRange) error {
- buf := make([]byte, SizeofRawFileDedupeRange+
- len(value.Info)*SizeofRawFileDedupeRangeInfo)
- rawrange := (*RawFileDedupeRange)(unsafe.Pointer(&buf[0]))
- rawrange.Src_offset = value.Src_offset
- rawrange.Src_length = value.Src_length
- rawrange.Dest_count = uint16(len(value.Info))
- rawrange.Reserved1 = value.Reserved1
- rawrange.Reserved2 = value.Reserved2
-
- for i := range value.Info {
- rawinfo := (*RawFileDedupeRangeInfo)(unsafe.Pointer(
- uintptr(unsafe.Pointer(&buf[0])) + uintptr(SizeofRawFileDedupeRange) +
- uintptr(i*SizeofRawFileDedupeRangeInfo)))
- rawinfo.Dest_fd = value.Info[i].Dest_fd
- rawinfo.Dest_offset = value.Info[i].Dest_offset
- rawinfo.Bytes_deduped = value.Info[i].Bytes_deduped
- rawinfo.Status = value.Info[i].Status
- rawinfo.Reserved = value.Info[i].Reserved
- }
-
- err := ioctlPtr(srcFd, FIDEDUPERANGE, unsafe.Pointer(&buf[0]))
-
- // Output
- for i := range value.Info {
- rawinfo := (*RawFileDedupeRangeInfo)(unsafe.Pointer(
- uintptr(unsafe.Pointer(&buf[0])) + uintptr(SizeofRawFileDedupeRange) +
- uintptr(i*SizeofRawFileDedupeRangeInfo)))
- value.Info[i].Dest_fd = rawinfo.Dest_fd
- value.Info[i].Dest_offset = rawinfo.Dest_offset
- value.Info[i].Bytes_deduped = rawinfo.Bytes_deduped
- value.Info[i].Status = rawinfo.Status
- value.Info[i].Reserved = rawinfo.Reserved
- }
-
- return err
-}
-
-func IoctlHIDGetDesc(fd int, value *HIDRawReportDescriptor) error {
- return ioctlPtr(fd, HIDIOCGRDESC, unsafe.Pointer(value))
-}
-
-func IoctlHIDGetRawInfo(fd int) (*HIDRawDevInfo, error) {
- var value HIDRawDevInfo
- err := ioctlPtr(fd, HIDIOCGRAWINFO, unsafe.Pointer(&value))
- return &value, err
-}
-
-func IoctlHIDGetRawName(fd int) (string, error) {
- var value [_HIDIOCGRAWNAME_LEN]byte
- err := ioctlPtr(fd, _HIDIOCGRAWNAME, unsafe.Pointer(&value[0]))
- return ByteSliceToString(value[:]), err
-}
-
-func IoctlHIDGetRawPhys(fd int) (string, error) {
- var value [_HIDIOCGRAWPHYS_LEN]byte
- err := ioctlPtr(fd, _HIDIOCGRAWPHYS, unsafe.Pointer(&value[0]))
- return ByteSliceToString(value[:]), err
-}
-
-func IoctlHIDGetRawUniq(fd int) (string, error) {
- var value [_HIDIOCGRAWUNIQ_LEN]byte
- err := ioctlPtr(fd, _HIDIOCGRAWUNIQ, unsafe.Pointer(&value[0]))
- return ByteSliceToString(value[:]), err
-}
-
-// IoctlIfreq performs an ioctl using an Ifreq structure for input and/or
-// output. See the netdevice(7) man page for details.
-func IoctlIfreq(fd int, req uint, value *Ifreq) error {
- // It is possible we will add more fields to *Ifreq itself later to prevent
- // misuse, so pass the raw *ifreq directly.
- return ioctlPtr(fd, req, unsafe.Pointer(&value.raw))
-}
-
-// TODO(mdlayher): export if and when IfreqData is exported.
-
-// ioctlIfreqData performs an ioctl using an ifreqData structure for input
-// and/or output. See the netdevice(7) man page for details.
-func ioctlIfreqData(fd int, req uint, value *ifreqData) error {
- // The memory layout of IfreqData (type-safe) and ifreq (not type-safe) are
- // identical so pass *IfreqData directly.
- return ioctlPtr(fd, req, unsafe.Pointer(value))
-}
-
-// IoctlKCMClone attaches a new file descriptor to a multiplexor by cloning an
-// existing KCM socket, returning a structure containing the file descriptor of
-// the new socket.
-func IoctlKCMClone(fd int) (*KCMClone, error) {
- var info KCMClone
- if err := ioctlPtr(fd, SIOCKCMCLONE, unsafe.Pointer(&info)); err != nil {
- return nil, err
- }
-
- return &info, nil
-}
-
-// IoctlKCMAttach attaches a TCP socket and associated BPF program file
-// descriptor to a multiplexor.
-func IoctlKCMAttach(fd int, info KCMAttach) error {
- return ioctlPtr(fd, SIOCKCMATTACH, unsafe.Pointer(&info))
-}
-
-// IoctlKCMUnattach unattaches a TCP socket file descriptor from a multiplexor.
-func IoctlKCMUnattach(fd int, info KCMUnattach) error {
- return ioctlPtr(fd, SIOCKCMUNATTACH, unsafe.Pointer(&info))
-}
-
-// IoctlLoopGetStatus64 gets the status of the loop device associated with the
-// file descriptor fd using the LOOP_GET_STATUS64 operation.
-func IoctlLoopGetStatus64(fd int) (*LoopInfo64, error) {
- var value LoopInfo64
- if err := ioctlPtr(fd, LOOP_GET_STATUS64, unsafe.Pointer(&value)); err != nil {
- return nil, err
- }
- return &value, nil
-}
-
-// IoctlLoopSetStatus64 sets the status of the loop device associated with the
-// file descriptor fd using the LOOP_SET_STATUS64 operation.
-func IoctlLoopSetStatus64(fd int, value *LoopInfo64) error {
- return ioctlPtr(fd, LOOP_SET_STATUS64, unsafe.Pointer(value))
-}
diff --git a/vendor/golang.org/x/sys/unix/ioctl_signed.go b/vendor/golang.org/x/sys/unix/ioctl_signed.go
deleted file mode 100644
index 7def9580..00000000
--- a/vendor/golang.org/x/sys/unix/ioctl_signed.go
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix || solaris
-// +build aix solaris
-
-package unix
-
-import (
- "unsafe"
-)
-
-// ioctl itself should not be exposed directly, but additional get/set
-// functions for specific types are permissible.
-
-// IoctlSetInt performs an ioctl operation which sets an integer value
-// on fd, using the specified request number.
-func IoctlSetInt(fd int, req int, value int) error {
- return ioctl(fd, req, uintptr(value))
-}
-
-// IoctlSetPointerInt performs an ioctl operation which sets an
-// integer value on fd, using the specified request number. The ioctl
-// argument is called with a pointer to the integer value, rather than
-// passing the integer value directly.
-func IoctlSetPointerInt(fd int, req int, value int) error {
- v := int32(value)
- return ioctlPtr(fd, req, unsafe.Pointer(&v))
-}
-
-// IoctlSetWinsize performs an ioctl on fd with a *Winsize argument.
-//
-// To change fd's window size, the req argument should be TIOCSWINSZ.
-func IoctlSetWinsize(fd int, req int, value *Winsize) error {
- // TODO: if we get the chance, remove the req parameter and
- // hardcode TIOCSWINSZ.
- return ioctlPtr(fd, req, unsafe.Pointer(value))
-}
-
-// IoctlSetTermios performs an ioctl on fd with a *Termios.
-//
-// The req value will usually be TCSETA or TIOCSETA.
-func IoctlSetTermios(fd int, req int, value *Termios) error {
- // TODO: if we get the chance, remove the req parameter.
- return ioctlPtr(fd, req, unsafe.Pointer(value))
-}
-
-// IoctlGetInt performs an ioctl operation which gets an integer value
-// from fd, using the specified request number.
-//
-// A few ioctl requests use the return value as an output parameter;
-// for those, IoctlRetInt should be used instead of this function.
-func IoctlGetInt(fd int, req int) (int, error) {
- var value int
- err := ioctlPtr(fd, req, unsafe.Pointer(&value))
- return value, err
-}
-
-func IoctlGetWinsize(fd int, req int) (*Winsize, error) {
- var value Winsize
- err := ioctlPtr(fd, req, unsafe.Pointer(&value))
- return &value, err
-}
-
-func IoctlGetTermios(fd int, req int) (*Termios, error) {
- var value Termios
- err := ioctlPtr(fd, req, unsafe.Pointer(&value))
- return &value, err
-}
diff --git a/vendor/golang.org/x/sys/unix/ioctl_unsigned.go b/vendor/golang.org/x/sys/unix/ioctl_unsigned.go
deleted file mode 100644
index 649913d1..00000000
--- a/vendor/golang.org/x/sys/unix/ioctl_unsigned.go
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build darwin || dragonfly || freebsd || hurd || linux || netbsd || openbsd
-// +build darwin dragonfly freebsd hurd linux netbsd openbsd
-
-package unix
-
-import (
- "unsafe"
-)
-
-// ioctl itself should not be exposed directly, but additional get/set
-// functions for specific types are permissible.
-
-// IoctlSetInt performs an ioctl operation which sets an integer value
-// on fd, using the specified request number.
-func IoctlSetInt(fd int, req uint, value int) error {
- return ioctl(fd, req, uintptr(value))
-}
-
-// IoctlSetPointerInt performs an ioctl operation which sets an
-// integer value on fd, using the specified request number. The ioctl
-// argument is called with a pointer to the integer value, rather than
-// passing the integer value directly.
-func IoctlSetPointerInt(fd int, req uint, value int) error {
- v := int32(value)
- return ioctlPtr(fd, req, unsafe.Pointer(&v))
-}
-
-// IoctlSetWinsize performs an ioctl on fd with a *Winsize argument.
-//
-// To change fd's window size, the req argument should be TIOCSWINSZ.
-func IoctlSetWinsize(fd int, req uint, value *Winsize) error {
- // TODO: if we get the chance, remove the req parameter and
- // hardcode TIOCSWINSZ.
- return ioctlPtr(fd, req, unsafe.Pointer(value))
-}
-
-// IoctlSetTermios performs an ioctl on fd with a *Termios.
-//
-// The req value will usually be TCSETA or TIOCSETA.
-func IoctlSetTermios(fd int, req uint, value *Termios) error {
- // TODO: if we get the chance, remove the req parameter.
- return ioctlPtr(fd, req, unsafe.Pointer(value))
-}
-
-// IoctlGetInt performs an ioctl operation which gets an integer value
-// from fd, using the specified request number.
-//
-// A few ioctl requests use the return value as an output parameter;
-// for those, IoctlRetInt should be used instead of this function.
-func IoctlGetInt(fd int, req uint) (int, error) {
- var value int
- err := ioctlPtr(fd, req, unsafe.Pointer(&value))
- return value, err
-}
-
-func IoctlGetWinsize(fd int, req uint) (*Winsize, error) {
- var value Winsize
- err := ioctlPtr(fd, req, unsafe.Pointer(&value))
- return &value, err
-}
-
-func IoctlGetTermios(fd int, req uint) (*Termios, error) {
- var value Termios
- err := ioctlPtr(fd, req, unsafe.Pointer(&value))
- return &value, err
-}
diff --git a/vendor/golang.org/x/sys/unix/ioctl_zos.go b/vendor/golang.org/x/sys/unix/ioctl_zos.go
deleted file mode 100644
index cdc21bf7..00000000
--- a/vendor/golang.org/x/sys/unix/ioctl_zos.go
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build zos && s390x
-// +build zos,s390x
-
-package unix
-
-import (
- "runtime"
- "unsafe"
-)
-
-// ioctl itself should not be exposed directly, but additional get/set
-// functions for specific types are permissible.
-
-// IoctlSetInt performs an ioctl operation which sets an integer value
-// on fd, using the specified request number.
-func IoctlSetInt(fd int, req int, value int) error {
- return ioctl(fd, req, uintptr(value))
-}
-
-// IoctlSetWinsize performs an ioctl on fd with a *Winsize argument.
-//
-// To change fd's window size, the req argument should be TIOCSWINSZ.
-func IoctlSetWinsize(fd int, req int, value *Winsize) error {
- // TODO: if we get the chance, remove the req parameter and
- // hardcode TIOCSWINSZ.
- return ioctlPtr(fd, req, unsafe.Pointer(value))
-}
-
-// IoctlSetTermios performs an ioctl on fd with a *Termios.
-//
-// The req value is expected to be TCSETS, TCSETSW, or TCSETSF
-func IoctlSetTermios(fd int, req int, value *Termios) error {
- if (req != TCSETS) && (req != TCSETSW) && (req != TCSETSF) {
- return ENOSYS
- }
- err := Tcsetattr(fd, int(req), value)
- runtime.KeepAlive(value)
- return err
-}
-
-// IoctlGetInt performs an ioctl operation which gets an integer value
-// from fd, using the specified request number.
-//
-// A few ioctl requests use the return value as an output parameter;
-// for those, IoctlRetInt should be used instead of this function.
-func IoctlGetInt(fd int, req int) (int, error) {
- var value int
- err := ioctlPtr(fd, req, unsafe.Pointer(&value))
- return value, err
-}
-
-func IoctlGetWinsize(fd int, req int) (*Winsize, error) {
- var value Winsize
- err := ioctlPtr(fd, req, unsafe.Pointer(&value))
- return &value, err
-}
-
-// IoctlGetTermios performs an ioctl on fd with a *Termios.
-//
-// The req value is expected to be TCGETS
-func IoctlGetTermios(fd int, req int) (*Termios, error) {
- var value Termios
- if req != TCGETS {
- return &value, ENOSYS
- }
- err := Tcgetattr(fd, &value)
- return &value, err
-}
diff --git a/vendor/golang.org/x/sys/unix/mkall.sh b/vendor/golang.org/x/sys/unix/mkall.sh
deleted file mode 100644
index e6f31d37..00000000
--- a/vendor/golang.org/x/sys/unix/mkall.sh
+++ /dev/null
@@ -1,249 +0,0 @@
-#!/usr/bin/env bash
-# Copyright 2009 The Go Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style
-# license that can be found in the LICENSE file.
-
-# This script runs or (given -n) prints suggested commands to generate files for
-# the Architecture/OS specified by the GOARCH and GOOS environment variables.
-# See README.md for more information about how the build system works.
-
-GOOSARCH="${GOOS}_${GOARCH}"
-
-# defaults
-mksyscall="go run mksyscall.go"
-mkerrors="./mkerrors.sh"
-zerrors="zerrors_$GOOSARCH.go"
-mksysctl=""
-zsysctl="zsysctl_$GOOSARCH.go"
-mksysnum=
-mktypes=
-mkasm=
-run="sh"
-cmd=""
-
-case "$1" in
--syscalls)
- for i in zsyscall*go
- do
- # Run the command line that appears in the first line
- # of the generated file to regenerate it.
- sed 1q $i | sed 's;^// ;;' | sh > _$i && gofmt < _$i > $i
- rm _$i
- done
- exit 0
- ;;
--n)
- run="cat"
- cmd="echo"
- shift
-esac
-
-case "$#" in
-0)
- ;;
-*)
- echo 'usage: mkall.sh [-n]' 1>&2
- exit 2
-esac
-
-if [[ "$GOOS" = "linux" ]]; then
- # Use the Docker-based build system
- # Files generated through docker (use $cmd so you can Ctl-C the build or run)
- $cmd docker build --tag generate:$GOOS $GOOS
- $cmd docker run --interactive --tty --volume $(cd -- "$(dirname -- "$0")/.." && pwd):/build generate:$GOOS
- exit
-fi
-
-GOOSARCH_in=syscall_$GOOSARCH.go
-case "$GOOSARCH" in
-_* | *_ | _)
- echo 'undefined $GOOS_$GOARCH:' "$GOOSARCH" 1>&2
- exit 1
- ;;
-aix_ppc)
- mkerrors="$mkerrors -maix32"
- mksyscall="go run mksyscall_aix_ppc.go -aix"
- mktypes="GOARCH=$GOARCH go tool cgo -godefs"
- ;;
-aix_ppc64)
- mkerrors="$mkerrors -maix64"
- mksyscall="go run mksyscall_aix_ppc64.go -aix"
- mktypes="GOARCH=$GOARCH go tool cgo -godefs"
- ;;
-darwin_amd64)
- mkerrors="$mkerrors -m64"
- mktypes="GOARCH=$GOARCH go tool cgo -godefs"
- mkasm="go run mkasm.go"
- ;;
-darwin_arm64)
- mkerrors="$mkerrors -m64"
- mktypes="GOARCH=$GOARCH go tool cgo -godefs"
- mkasm="go run mkasm.go"
- ;;
-dragonfly_amd64)
- mkerrors="$mkerrors -m64"
- mksyscall="go run mksyscall.go -dragonfly"
- mksysnum="go run mksysnum.go 'https://gitweb.dragonflybsd.org/dragonfly.git/blob_plain/HEAD:/sys/kern/syscalls.master'"
- mktypes="GOARCH=$GOARCH go tool cgo -godefs"
- ;;
-freebsd_386)
- mkerrors="$mkerrors -m32"
- mksyscall="go run mksyscall.go -l32"
- mksysnum="go run mksysnum.go 'https://cgit.freebsd.org/src/plain/sys/kern/syscalls.master?h=stable/12'"
- mktypes="GOARCH=$GOARCH go tool cgo -godefs"
- ;;
-freebsd_amd64)
- mkerrors="$mkerrors -m64"
- mksysnum="go run mksysnum.go 'https://cgit.freebsd.org/src/plain/sys/kern/syscalls.master?h=stable/12'"
- mktypes="GOARCH=$GOARCH go tool cgo -godefs"
- ;;
-freebsd_arm)
- mkerrors="$mkerrors"
- mksyscall="go run mksyscall.go -l32 -arm"
- mksysnum="go run mksysnum.go 'https://cgit.freebsd.org/src/plain/sys/kern/syscalls.master?h=stable/12'"
- # Let the type of C char be signed for making the bare syscall
- # API consistent across platforms.
- mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
- ;;
-freebsd_arm64)
- mkerrors="$mkerrors -m64"
- mksysnum="go run mksysnum.go 'https://cgit.freebsd.org/src/plain/sys/kern/syscalls.master?h=stable/12'"
- mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
- ;;
-freebsd_riscv64)
- mkerrors="$mkerrors -m64"
- mksysnum="go run mksysnum.go 'https://cgit.freebsd.org/src/plain/sys/kern/syscalls.master?h=stable/12'"
- mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
- ;;
-netbsd_386)
- mkerrors="$mkerrors -m32"
- mksyscall="go run mksyscall.go -l32 -netbsd"
- mksysnum="go run mksysnum.go 'http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master'"
- mktypes="GOARCH=$GOARCH go tool cgo -godefs"
- ;;
-netbsd_amd64)
- mkerrors="$mkerrors -m64"
- mksyscall="go run mksyscall.go -netbsd"
- mksysnum="go run mksysnum.go 'http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master'"
- mktypes="GOARCH=$GOARCH go tool cgo -godefs"
- ;;
-netbsd_arm)
- mkerrors="$mkerrors"
- mksyscall="go run mksyscall.go -l32 -netbsd -arm"
- mksysnum="go run mksysnum.go 'http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master'"
- # Let the type of C char be signed for making the bare syscall
- # API consistent across platforms.
- mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
- ;;
-netbsd_arm64)
- mkerrors="$mkerrors -m64"
- mksyscall="go run mksyscall.go -netbsd"
- mksysnum="go run mksysnum.go 'http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master'"
- mktypes="GOARCH=$GOARCH go tool cgo -godefs"
- ;;
-openbsd_386)
- mkasm="go run mkasm.go"
- mkerrors="$mkerrors -m32"
- mksyscall="go run mksyscall.go -l32 -openbsd -libc"
- mksysctl="go run mksysctl_openbsd.go"
- mktypes="GOARCH=$GOARCH go tool cgo -godefs"
- ;;
-openbsd_amd64)
- mkasm="go run mkasm.go"
- mkerrors="$mkerrors -m64"
- mksyscall="go run mksyscall.go -openbsd -libc"
- mksysctl="go run mksysctl_openbsd.go"
- mktypes="GOARCH=$GOARCH go tool cgo -godefs"
- ;;
-openbsd_arm)
- mkasm="go run mkasm.go"
- mkerrors="$mkerrors"
- mksyscall="go run mksyscall.go -l32 -openbsd -arm -libc"
- mksysctl="go run mksysctl_openbsd.go"
- # Let the type of C char be signed for making the bare syscall
- # API consistent across platforms.
- mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
- ;;
-openbsd_arm64)
- mkasm="go run mkasm.go"
- mkerrors="$mkerrors -m64"
- mksyscall="go run mksyscall.go -openbsd -libc"
- mksysctl="go run mksysctl_openbsd.go"
- # Let the type of C char be signed for making the bare syscall
- # API consistent across platforms.
- mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
- ;;
-openbsd_mips64)
- mkasm="go run mkasm.go"
- mkerrors="$mkerrors -m64"
- mksyscall="go run mksyscall.go -openbsd -libc"
- mksysctl="go run mksysctl_openbsd.go"
- # Let the type of C char be signed for making the bare syscall
- # API consistent across platforms.
- mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
- ;;
-openbsd_ppc64)
- mkasm="go run mkasm.go"
- mkerrors="$mkerrors -m64"
- mksyscall="go run mksyscall.go -openbsd -libc"
- mksysctl="go run mksysctl_openbsd.go"
- # Let the type of C char be signed for making the bare syscall
- # API consistent across platforms.
- mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
- ;;
-openbsd_riscv64)
- mkasm="go run mkasm.go"
- mkerrors="$mkerrors -m64"
- mksyscall="go run mksyscall.go -openbsd -libc"
- mksysctl="go run mksysctl_openbsd.go"
- # Let the type of C char be signed for making the bare syscall
- # API consistent across platforms.
- mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
- ;;
-solaris_amd64)
- mksyscall="go run mksyscall_solaris.go"
- mkerrors="$mkerrors -m64"
- mksysnum=
- mktypes="GOARCH=$GOARCH go tool cgo -godefs"
- ;;
-illumos_amd64)
- mksyscall="go run mksyscall_solaris.go"
- mkerrors=
- mksysnum=
- mktypes="GOARCH=$GOARCH go tool cgo -godefs"
- ;;
-*)
- echo 'unrecognized $GOOS_$GOARCH: ' "$GOOSARCH" 1>&2
- exit 1
- ;;
-esac
-
-(
- if [ -n "$mkerrors" ]; then echo "$mkerrors |gofmt >$zerrors"; fi
- case "$GOOS" in
- *)
- syscall_goos="syscall_$GOOS.go"
- case "$GOOS" in
- darwin | dragonfly | freebsd | netbsd | openbsd)
- syscall_goos="syscall_bsd.go $syscall_goos"
- ;;
- esac
- if [ -n "$mksyscall" ]; then
- if [ "$GOOSARCH" == "aix_ppc64" ]; then
- # aix/ppc64 script generates files instead of writing to stdin.
- echo "$mksyscall -tags $GOOS,$GOARCH $syscall_goos $GOOSARCH_in && gofmt -w zsyscall_$GOOSARCH.go && gofmt -w zsyscall_"$GOOSARCH"_gccgo.go && gofmt -w zsyscall_"$GOOSARCH"_gc.go " ;
- elif [ "$GOOS" == "illumos" ]; then
- # illumos code generation requires a --illumos switch
- echo "$mksyscall -illumos -tags illumos,$GOARCH syscall_illumos.go |gofmt > zsyscall_illumos_$GOARCH.go";
- # illumos implies solaris, so solaris code generation is also required
- echo "$mksyscall -tags solaris,$GOARCH syscall_solaris.go syscall_solaris_$GOARCH.go |gofmt >zsyscall_solaris_$GOARCH.go";
- else
- echo "$mksyscall -tags $GOOS,$GOARCH $syscall_goos $GOOSARCH_in |gofmt >zsyscall_$GOOSARCH.go";
- fi
- fi
- esac
- if [ -n "$mksysctl" ]; then echo "$mksysctl |gofmt >$zsysctl"; fi
- if [ -n "$mksysnum" ]; then echo "$mksysnum |gofmt >zsysnum_$GOOSARCH.go"; fi
- if [ -n "$mktypes" ]; then echo "$mktypes types_$GOOS.go | go run mkpost.go > ztypes_$GOOSARCH.go"; fi
- if [ -n "$mkasm" ]; then echo "$mkasm $GOOS $GOARCH"; fi
-) | $run
diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh
deleted file mode 100644
index 47fa6a7e..00000000
--- a/vendor/golang.org/x/sys/unix/mkerrors.sh
+++ /dev/null
@@ -1,784 +0,0 @@
-#!/usr/bin/env bash
-# Copyright 2009 The Go Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style
-# license that can be found in the LICENSE file.
-
-# Generate Go code listing errors and other #defined constant
-# values (ENAMETOOLONG etc.), by asking the preprocessor
-# about the definitions.
-
-unset LANG
-export LC_ALL=C
-export LC_CTYPE=C
-
-if test -z "$GOARCH" -o -z "$GOOS"; then
- echo 1>&2 "GOARCH or GOOS not defined in environment"
- exit 1
-fi
-
-# Check that we are using the new build system if we should
-if [[ "$GOOS" = "linux" ]] && [[ "$GOLANG_SYS_BUILD" != "docker" ]]; then
- echo 1>&2 "In the Docker based build system, mkerrors should not be called directly."
- echo 1>&2 "See README.md"
- exit 1
-fi
-
-if [[ "$GOOS" = "aix" ]]; then
- CC=${CC:-gcc}
-else
- CC=${CC:-cc}
-fi
-
-if [[ "$GOOS" = "solaris" ]]; then
- # Assumes GNU versions of utilities in PATH.
- export PATH=/usr/gnu/bin:$PATH
-fi
-
-uname=$(uname)
-
-includes_AIX='
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-
-#define AF_LOCAL AF_UNIX
-'
-
-includes_Darwin='
-#define _DARWIN_C_SOURCE
-#define KERNEL 1
-#define _DARWIN_USE_64_BIT_INODE
-#define __APPLE_USE_RFC_3542
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-
-// for backwards compatibility because moved TIOCREMOTE to Kernel.framework after MacOSX12.0.sdk.
-#define TIOCREMOTE 0x80047469
-'
-
-includes_DragonFly='
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-'
-
-includes_FreeBSD='
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-
-#if __FreeBSD__ >= 10
-#define IFT_CARP 0xf8 // IFT_CARP is deprecated in FreeBSD 10
-#undef SIOCAIFADDR
-#define SIOCAIFADDR _IOW(105, 26, struct oifaliasreq) // ifaliasreq contains if_data
-#undef SIOCSIFPHYADDR
-#define SIOCSIFPHYADDR _IOW(105, 70, struct oifaliasreq) // ifaliasreq contains if_data
-#endif
-'
-
-includes_Linux='
-#define _LARGEFILE_SOURCE
-#define _LARGEFILE64_SOURCE
-#ifndef __LP64__
-#define _FILE_OFFSET_BITS 64
-#endif
-#define _GNU_SOURCE
-
-// is broken on powerpc64, as it fails to include definitions of
-// these structures. We just include them copied from .
-#if defined(__powerpc__)
-struct sgttyb {
- char sg_ispeed;
- char sg_ospeed;
- char sg_erase;
- char sg_kill;
- short sg_flags;
-};
-
-struct tchars {
- char t_intrc;
- char t_quitc;
- char t_startc;
- char t_stopc;
- char t_eofc;
- char t_brkc;
-};
-
-struct ltchars {
- char t_suspc;
- char t_dsuspc;
- char t_rprntc;
- char t_flushc;
- char t_werasc;
- char t_lnextc;
-};
-#endif
-
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include