ソースを参照

remotecache: support SSL with redis (#18511)

* update go-redis lib from v2 -> v5
* add ssl option to the redis connection string
fixes #18498
Kyle Brandt 6 年 前
コミット
f689b60426
61 ファイル変更8202 行追加4734 行削除
  1. 1 1
      conf/defaults.ini
  2. 1 1
      conf/sample.ini
  3. 18 3
      docs/sources/installation/configuration.md
  4. 2 3
      go.mod
  5. 2 4
      go.sum
  6. 24 4
      pkg/infra/remotecache/redis_storage.go
  7. 42 7
      pkg/infra/remotecache/redis_storage_test.go
  8. 0 2
      vendor/gopkg.in/bufio.v1/Makefile
  9. 0 4
      vendor/gopkg.in/bufio.v1/README.md
  10. 0 413
      vendor/gopkg.in/bufio.v1/buffer.go
  11. 0 728
      vendor/gopkg.in/bufio.v1/bufio.go
  12. 0 27
      vendor/gopkg.in/redis.v2/LICENSE
  13. 0 3
      vendor/gopkg.in/redis.v2/Makefile
  14. 0 46
      vendor/gopkg.in/redis.v2/README.md
  15. 0 597
      vendor/gopkg.in/redis.v2/command.go
  16. 0 1246
      vendor/gopkg.in/redis.v2/commands.go
  17. 0 23
      vendor/gopkg.in/redis.v2/error.go
  18. 0 138
      vendor/gopkg.in/redis.v2/multi.go
  19. 0 262
      vendor/gopkg.in/redis.v2/parser.go
  20. 0 91
      vendor/gopkg.in/redis.v2/pipeline.go
  21. 0 405
      vendor/gopkg.in/redis.v2/pool.go
  22. 0 134
      vendor/gopkg.in/redis.v2/pubsub.go
  23. 0 53
      vendor/gopkg.in/redis.v2/rate_limit.go
  24. 0 231
      vendor/gopkg.in/redis.v2/redis.go
  25. 0 291
      vendor/gopkg.in/redis.v2/sentinel.go
  26. 2 0
      vendor/gopkg.in/redis.v5/.gitignore
  27. 2 4
      vendor/gopkg.in/redis.v5/LICENSE
  28. 19 0
      vendor/gopkg.in/redis.v5/Makefile
  29. 136 0
      vendor/gopkg.in/redis.v5/README.md
  30. 940 0
      vendor/gopkg.in/redis.v5/cluster.go
  31. 956 0
      vendor/gopkg.in/redis.v5/command.go
  32. 2078 0
      vendor/gopkg.in/redis.v5/commands.go
  33. 0 0
      vendor/gopkg.in/redis.v5/doc.go
  34. 81 0
      vendor/gopkg.in/redis.v5/internal/consistenthash/consistenthash.go
  35. 75 0
      vendor/gopkg.in/redis.v5/internal/errors.go
  36. 73 0
      vendor/gopkg.in/redis.v5/internal/hashtag/hashtag.go
  37. 15 0
      vendor/gopkg.in/redis.v5/internal/log.go
  38. 78 0
      vendor/gopkg.in/redis.v5/internal/pool/conn.go
  39. 354 0
      vendor/gopkg.in/redis.v5/internal/pool/pool.go
  40. 47 0
      vendor/gopkg.in/redis.v5/internal/pool/pool_single.go
  41. 119 0
      vendor/gopkg.in/redis.v5/internal/pool/pool_sticky.go
  42. 334 0
      vendor/gopkg.in/redis.v5/internal/proto/reader.go
  43. 131 0
      vendor/gopkg.in/redis.v5/internal/proto/scan.go
  44. 105 0
      vendor/gopkg.in/redis.v5/internal/proto/write_buffer.go
  45. 7 0
      vendor/gopkg.in/redis.v5/internal/safe.go
  46. 14 0
      vendor/gopkg.in/redis.v5/internal/unsafe.go
  47. 47 0
      vendor/gopkg.in/redis.v5/internal/util.go
  48. 73 0
      vendor/gopkg.in/redis.v5/iterator.go
  49. 185 0
      vendor/gopkg.in/redis.v5/options.go
  50. 400 0
      vendor/gopkg.in/redis.v5/parser.go
  51. 88 0
      vendor/gopkg.in/redis.v5/pipeline.go
  52. 311 0
      vendor/gopkg.in/redis.v5/pubsub.go
  53. 378 0
      vendor/gopkg.in/redis.v5/redis.go
  54. 35 0
      vendor/gopkg.in/redis.v5/redis_context.go
  55. 15 0
      vendor/gopkg.in/redis.v5/redis_no_context.go
  56. 140 0
      vendor/gopkg.in/redis.v5/result.go
  57. 420 0
      vendor/gopkg.in/redis.v5/ring.go
  58. 13 9
      vendor/gopkg.in/redis.v5/script.go
  59. 335 0
      vendor/gopkg.in/redis.v5/sentinel.go
  60. 99 0
      vendor/gopkg.in/redis.v5/tx.go
  61. 7 4
      vendor/modules.txt

+ 1 - 1
conf/defaults.ini

@@ -116,7 +116,7 @@ type = database
 
 # cache connectionstring options
 # database: will use Grafana primary database.
-# redis: config like redis server e.g. `addr=127.0.0.1:6379,pool_size=100,db=0`. Only addr is required.
+# redis: config like redis server e.g. `addr=127.0.0.1:6379,pool_size=100,db=0,ssl=false`. Only addr is required. ssl may be 'true', 'false', or 'insecure'.
 # memcache: 127.0.0.1:11211
 connstr =
 

+ 1 - 1
conf/sample.ini

@@ -112,7 +112,7 @@
 
 # cache connectionstring options
 # database: will use Grafana primary database.
-# redis: config like redis server e.g. `addr=127.0.0.1:6379,pool_size=100,db=0`. Only addr is required.
+# redis: config like redis server e.g. `addr=127.0.0.1:6379,pool_size=100,db=0,ssl=false`. Only addr is required. ssl may be 'true', 'false', or 'insecure'.
 # memcache: 127.0.0.1:11211
 ;connstr =
 

+ 18 - 3
docs/sources/installation/configuration.md

@@ -281,9 +281,24 @@ Either `redis`, `memcached` or `database` default is `database`
 
 ### connstr
 
-The remote cache connection string. Leave empty when using `database` since it will use the primary database.
-Redis example config: `addr=127.0.0.1:6379,pool_size=100,db=grafana`
-Memcache example: `127.0.0.1:11211`
+The remote cache connection string. The format depends on the `type` of the remote cache.
+
+#### Database
+
+Leave empty when using `database` since it will use the primary database.
+
+#### Redis
+
+Example connstr: `addr=127.0.0.1:6379,pool_size=100,db=0,ssl=false`
+
+- `addr` is the host `:` port of the redis server.
+- `pool_size` (optional) is the number of underlying connections that can be made to redis.
+- `db` (optional) is the number indentifer of the redis database you want to use.
+- `ssl` (optional) is if SSL should be used to connect to redis server. The value may be `true`, `false`, or `insecure`. Setting the value to `insecure` skips verification of the certificate chain and hostname when making the connection.
+
+#### Memcache
+
+Example connstr: `127.0.0.1:11211`
 
 <hr />
 

+ 2 - 3
go.mod

@@ -52,7 +52,7 @@ require (
 	github.com/onsi/gomega v1.5.0 // indirect
 	github.com/opentracing/opentracing-go v1.1.0
 	github.com/patrickmn/go-cache v2.1.0+incompatible
-	github.com/pkg/errors v0.8.1 // indirect
+	github.com/pkg/errors v0.8.1
 	github.com/prometheus/client_golang v0.9.2
 	github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90
 	github.com/prometheus/common v0.2.0
@@ -80,12 +80,11 @@ require (
 	golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373
 	gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc // indirect
 	gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d // indirect
-	gopkg.in/bufio.v1 v1.0.0-20140618132640-567b2bfa514e // indirect
 	gopkg.in/ini.v1 v1.42.0
 	gopkg.in/ldap.v3 v3.0.2
 	gopkg.in/macaron.v1 v1.3.2
 	gopkg.in/mail.v2 v2.3.1
-	gopkg.in/redis.v2 v2.3.2
+	gopkg.in/redis.v5 v5.2.9
 	gopkg.in/square/go-jose.v2 v2.3.0
 	gopkg.in/yaml.v2 v2.2.2
 )

+ 2 - 4
go.sum

@@ -269,8 +269,6 @@ gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc h1:2gG
 gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc/go.mod h1:m7x9LTH6d71AHyAX77c9yqWCCa3UKHcVEj9y7hAtKDk=
 gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d h1:TxyelI5cVkbREznMhfzycHdkp5cLA7DpE+GKjSslYhM=
 gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw=
-gopkg.in/bufio.v1 v1.0.0-20140618132640-567b2bfa514e h1:wGA78yza6bu/mWcc4QfBuIEHEtc06xdiU0X8sY36yUU=
-gopkg.in/bufio.v1 v1.0.0-20140618132640-567b2bfa514e/go.mod h1:xsQCaysVCudhrYTfzYWe577fCe7Ceci+6qjO2Rdc0Z4=
 gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
 gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
 gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@@ -284,8 +282,8 @@ gopkg.in/macaron.v1 v1.3.2 h1:AvWIaPmwBUA87/OWzePkoxeaw6YJWDfBt1pDFPBnLf8=
 gopkg.in/macaron.v1 v1.3.2/go.mod h1:PrsiawTWAGZs6wFbT5hlr7SQ2Ns9h7cUVtcUu4lQOVo=
 gopkg.in/mail.v2 v2.3.1 h1:WYFn/oANrAGP2C0dcV6/pbkPzv8yGzqTjPmTeO7qoXk=
 gopkg.in/mail.v2 v2.3.1/go.mod h1:htwXN1Qh09vZJ1NVKxQqHPBaCBbzKhp5GzuJEA4VJWw=
-gopkg.in/redis.v2 v2.3.2 h1:GPVIIB/JnL1wvfULefy3qXmPu1nfNu2d0yA09FHgwfs=
-gopkg.in/redis.v2 v2.3.2/go.mod h1:4wl9PJ/CqzeHk3LVq1hNLHH8krm3+AXEgut4jVc++LU=
+gopkg.in/redis.v5 v5.2.9 h1:MNZYOLPomQzZMfpN3ZtD1uyJ2IDonTTlxYiV/pEApiw=
+gopkg.in/redis.v5 v5.2.9/go.mod h1:6gtv0/+A4iM08kdRfocWYB3bLX2tebpNtfKlFT6H4mY=
 gopkg.in/square/go-jose.v2 v2.3.0 h1:nLzhkFyl5bkblqYBoiWJUt5JkWOzmiaBtCxdJAqJd3U=
 gopkg.in/square/go-jose.v2 v2.3.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
 gopkg.in/stretchr/testify.v1 v1.2.2/go.mod h1:QI5V/q6UbPmuhtm10CaFZxED9NreB8PnFYN9JcR6TxU=

+ 24 - 4
pkg/infra/remotecache/redis_storage.go

@@ -1,6 +1,7 @@
 package remotecache
 
 import (
+	"crypto/tls"
 	"fmt"
 	"strconv"
 	"strings"
@@ -8,7 +9,7 @@ import (
 
 	"github.com/grafana/grafana/pkg/setting"
 	"github.com/grafana/grafana/pkg/util/errutil"
-	redis "gopkg.in/redis.v2"
+	redis "gopkg.in/redis.v5"
 )
 
 const redisCacheType = "redis"
@@ -21,6 +22,7 @@ type redisStorage struct {
 func parseRedisConnStr(connStr string) (*redis.Options, error) {
 	keyValueCSV := strings.Split(connStr, ",")
 	options := &redis.Options{Network: "tcp"}
+	setTLSIsTrue := false
 	for _, rawKeyValue := range keyValueCSV {
 		keyValueTuple := strings.SplitN(rawKeyValue, "=", 2)
 		if len(keyValueTuple) != 2 {
@@ -38,7 +40,7 @@ func parseRedisConnStr(connStr string) (*redis.Options, error) {
 		case "password":
 			options.Password = connVal
 		case "db":
-			i, err := strconv.ParseInt(connVal, 10, 64)
+			i, err := strconv.Atoi(connVal)
 			if err != nil {
 				return nil, errutil.Wrap("value for db in redis connection string must be a number", err)
 			}
@@ -49,9 +51,27 @@ func parseRedisConnStr(connStr string) (*redis.Options, error) {
 				return nil, errutil.Wrap("value for pool_size in redis connection string must be a number", err)
 			}
 			options.PoolSize = i
+		case "ssl":
+			if connVal != "true" && connVal != "false" && connVal != "insecure" {
+				return nil, fmt.Errorf("ssl must be set to 'true', 'false', or 'insecure' when present")
+			}
+			if connVal == "true" {
+				setTLSIsTrue = true // Needs addr already parsed, so set later
+			}
+			if connVal == "insecure" {
+				options.TLSConfig = &tls.Config{InsecureSkipVerify: true}
+			}
 		default:
-			return nil, fmt.Errorf("unrecorgnized option '%v' in redis connection string", connVal)
+			return nil, fmt.Errorf("unrecognized option '%v' in redis connection string", connKey)
+		}
+	}
+	if setTLSIsTrue {
+		// Get hostname from the Addr property and set it on the configuration for TLS
+		sp := strings.Split(options.Addr, ":")
+		if len(sp) < 1 {
+			return nil, fmt.Errorf("unable to get hostname from the addr field, expected host:port, got '%v'", options.Addr)
 		}
+		options.TLSConfig = &tls.Config{ServerName: sp[0]}
 	}
 	return options, nil
 }
@@ -71,7 +91,7 @@ func (s *redisStorage) Set(key string, val interface{}, expires time.Duration) e
 	if err != nil {
 		return err
 	}
-	status := s.c.SetEx(key, expires, string(value))
+	status := s.c.Set(key, string(value), expires)
 	return status.Err()
 }
 

+ 42 - 7
pkg/infra/remotecache/redis_storage_test.go

@@ -1,11 +1,12 @@
 package remotecache
 
 import (
+	"crypto/tls"
 	"fmt"
 	"testing"
 
 	"github.com/stretchr/testify/assert"
-	redis "gopkg.in/redis.v2"
+	redis "gopkg.in/redis.v5"
 )
 
 func Test_parseRedisConnStr(t *testing.T) {
@@ -15,13 +16,14 @@ func Test_parseRedisConnStr(t *testing.T) {
 		ShouldErr     bool
 	}{
 		"all redis options should parse": {
-			"addr=127.0.0.1:6379,pool_size=100,db=1,password=grafanaRocks",
+			"addr=127.0.0.1:6379,pool_size=100,db=1,password=grafanaRocks,ssl=false",
 			&redis.Options{
-				Addr:     "127.0.0.1:6379",
-				PoolSize: 100,
-				DB:       1,
-				Password: "grafanaRocks",
-				Network:  "tcp",
+				Addr:      "127.0.0.1:6379",
+				PoolSize:  100,
+				DB:        1,
+				Password:  "grafanaRocks",
+				Network:   "tcp",
+				TLSConfig: nil,
 			},
 			false,
 		},
@@ -34,6 +36,39 @@ func Test_parseRedisConnStr(t *testing.T) {
 			},
 			false,
 		},
+		"ssl set to true should result in default TLS configuration with tls set to addr's host": {
+			"addr=grafana.com:6379,ssl=true",
+			&redis.Options{
+				Addr:      "grafana.com:6379",
+				Network:   "tcp",
+				TLSConfig: &tls.Config{ServerName: "grafana.com"},
+			},
+			false,
+		},
+		"ssl to insecure should result in TLS configuration with InsecureSkipVerify": {
+			"addr=127.0.0.1:6379,ssl=insecure",
+			&redis.Options{
+				Addr:      "127.0.0.1:6379",
+				Network:   "tcp",
+				TLSConfig: &tls.Config{InsecureSkipVerify: true},
+			},
+			false,
+		},
+		"invalid SSL option should err": {
+			"addr=127.0.0.1:6379,ssl=dragons",
+			nil,
+			true,
+		},
+		"invalid pool_size value should err": {
+			"addr=127.0.0.1:6379,pool_size=seven",
+			nil,
+			true,
+		},
+		"invalid db value should err": {
+			"addr=127.0.0.1:6379,db=seven",
+			nil,
+			true,
+		},
 		"trailing comma should err": {
 			"addr=127.0.0.1:6379,pool_size=100,",
 			nil,

+ 0 - 2
vendor/gopkg.in/bufio.v1/Makefile

@@ -1,2 +0,0 @@
-all:
-	go test gopkg.in/bufio.v1

+ 0 - 4
vendor/gopkg.in/bufio.v1/README.md

@@ -1,4 +0,0 @@
-bufio
-=====
-
-This is a fork of the http://golang.org/pkg/bufio/ package. It adds `ReadN` method that allows reading next `n` bytes from the internal buffer without allocating intermediate buffer. This method works just like the [Buffer.Next](http://golang.org/pkg/bytes/#Buffer.Next) method, but has slightly different signature.

+ 0 - 413
vendor/gopkg.in/bufio.v1/buffer.go

@@ -1,413 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package bufio
-
-// Simple byte buffer for marshaling data.
-
-import (
-	"bytes"
-	"errors"
-	"io"
-	"unicode/utf8"
-)
-
-// A Buffer is a variable-sized buffer of bytes with Read and Write methods.
-// The zero value for Buffer is an empty buffer ready to use.
-type Buffer struct {
-	buf       []byte            // contents are the bytes buf[off : len(buf)]
-	off       int               // read at &buf[off], write at &buf[len(buf)]
-	runeBytes [utf8.UTFMax]byte // avoid allocation of slice on each WriteByte or Rune
-	bootstrap [64]byte          // memory to hold first slice; helps small buffers (Printf) avoid allocation.
-	lastRead  readOp            // last read operation, so that Unread* can work correctly.
-}
-
-// The readOp constants describe the last action performed on
-// the buffer, so that UnreadRune and UnreadByte can
-// check for invalid usage.
-type readOp int
-
-const (
-	opInvalid  readOp = iota // Non-read operation.
-	opReadRune               // Read rune.
-	opRead                   // Any other read operation.
-)
-
-// ErrTooLarge is passed to panic if memory cannot be allocated to store data in a buffer.
-var ErrTooLarge = errors.New("bytes.Buffer: too large")
-
-// Bytes returns a slice of the contents of the unread portion of the buffer;
-// len(b.Bytes()) == b.Len().  If the caller changes the contents of the
-// returned slice, the contents of the buffer will change provided there
-// are no intervening method calls on the Buffer.
-func (b *Buffer) Bytes() []byte { return b.buf[b.off:] }
-
-// String returns the contents of the unread portion of the buffer
-// as a string.  If the Buffer is a nil pointer, it returns "<nil>".
-func (b *Buffer) String() string {
-	if b == nil {
-		// Special case, useful in debugging.
-		return "<nil>"
-	}
-	return string(b.buf[b.off:])
-}
-
-// Len returns the number of bytes of the unread portion of the buffer;
-// b.Len() == len(b.Bytes()).
-func (b *Buffer) Len() int { return len(b.buf) - b.off }
-
-// Truncate discards all but the first n unread bytes from the buffer.
-// It panics if n is negative or greater than the length of the buffer.
-func (b *Buffer) Truncate(n int) {
-	b.lastRead = opInvalid
-	switch {
-	case n < 0 || n > b.Len():
-		panic("bytes.Buffer: truncation out of range")
-	case n == 0:
-		// Reuse buffer space.
-		b.off = 0
-	}
-	b.buf = b.buf[0 : b.off+n]
-}
-
-// Reset resets the buffer so it has no content.
-// b.Reset() is the same as b.Truncate(0).
-func (b *Buffer) Reset() { b.Truncate(0) }
-
-// grow grows the buffer to guarantee space for n more bytes.
-// It returns the index where bytes should be written.
-// If the buffer can't grow it will panic with ErrTooLarge.
-func (b *Buffer) grow(n int) int {
-	m := b.Len()
-	// If buffer is empty, reset to recover space.
-	if m == 0 && b.off != 0 {
-		b.Truncate(0)
-	}
-	if len(b.buf)+n > cap(b.buf) {
-		var buf []byte
-		if b.buf == nil && n <= len(b.bootstrap) {
-			buf = b.bootstrap[0:]
-		} else if m+n <= cap(b.buf)/2 {
-			// We can slide things down instead of allocating a new
-			// slice. We only need m+n <= cap(b.buf) to slide, but
-			// we instead let capacity get twice as large so we
-			// don't spend all our time copying.
-			copy(b.buf[:], b.buf[b.off:])
-			buf = b.buf[:m]
-		} else {
-			// not enough space anywhere
-			buf = makeSlice(2*cap(b.buf) + n)
-			copy(buf, b.buf[b.off:])
-		}
-		b.buf = buf
-		b.off = 0
-	}
-	b.buf = b.buf[0 : b.off+m+n]
-	return b.off + m
-}
-
-// Grow grows the buffer's capacity, if necessary, to guarantee space for
-// another n bytes. After Grow(n), at least n bytes can be written to the
-// buffer without another allocation.
-// If n is negative, Grow will panic.
-// If the buffer can't grow it will panic with ErrTooLarge.
-func (b *Buffer) Grow(n int) {
-	if n < 0 {
-		panic("bytes.Buffer.Grow: negative count")
-	}
-	m := b.grow(n)
-	b.buf = b.buf[0:m]
-}
-
-// Write appends the contents of p to the buffer, growing the buffer as
-// needed. The return value n is the length of p; err is always nil. If the
-// buffer becomes too large, Write will panic with ErrTooLarge.
-func (b *Buffer) Write(p []byte) (n int, err error) {
-	b.lastRead = opInvalid
-	m := b.grow(len(p))
-	return copy(b.buf[m:], p), nil
-}
-
-// WriteString appends the contents of s to the buffer, growing the buffer as
-// needed. The return value n is the length of s; err is always nil. If the
-// buffer becomes too large, WriteString will panic with ErrTooLarge.
-func (b *Buffer) WriteString(s string) (n int, err error) {
-	b.lastRead = opInvalid
-	m := b.grow(len(s))
-	return copy(b.buf[m:], s), nil
-}
-
-// MinRead is the minimum slice size passed to a Read call by
-// Buffer.ReadFrom.  As long as the Buffer has at least MinRead bytes beyond
-// what is required to hold the contents of r, ReadFrom will not grow the
-// underlying buffer.
-const MinRead = 512
-
-// ReadFrom reads data from r until EOF and appends it to the buffer, growing
-// the buffer as needed. The return value n is the number of bytes read. Any
-// error except io.EOF encountered during the read is also returned. If the
-// buffer becomes too large, ReadFrom will panic with ErrTooLarge.
-func (b *Buffer) ReadFrom(r io.Reader) (n int64, err error) {
-	b.lastRead = opInvalid
-	// If buffer is empty, reset to recover space.
-	if b.off >= len(b.buf) {
-		b.Truncate(0)
-	}
-	for {
-		if free := cap(b.buf) - len(b.buf); free < MinRead {
-			// not enough space at end
-			newBuf := b.buf
-			if b.off+free < MinRead {
-				// not enough space using beginning of buffer;
-				// double buffer capacity
-				newBuf = makeSlice(2*cap(b.buf) + MinRead)
-			}
-			copy(newBuf, b.buf[b.off:])
-			b.buf = newBuf[:len(b.buf)-b.off]
-			b.off = 0
-		}
-		m, e := r.Read(b.buf[len(b.buf):cap(b.buf)])
-		b.buf = b.buf[0 : len(b.buf)+m]
-		n += int64(m)
-		if e == io.EOF {
-			break
-		}
-		if e != nil {
-			return n, e
-		}
-	}
-	return n, nil // err is EOF, so return nil explicitly
-}
-
-// makeSlice allocates a slice of size n. If the allocation fails, it panics
-// with ErrTooLarge.
-func makeSlice(n int) []byte {
-	// If the make fails, give a known error.
-	defer func() {
-		if recover() != nil {
-			panic(ErrTooLarge)
-		}
-	}()
-	return make([]byte, n)
-}
-
-// WriteTo writes data to w until the buffer is drained or an error occurs.
-// The return value n is the number of bytes written; it always fits into an
-// int, but it is int64 to match the io.WriterTo interface. Any error
-// encountered during the write is also returned.
-func (b *Buffer) WriteTo(w io.Writer) (n int64, err error) {
-	b.lastRead = opInvalid
-	if b.off < len(b.buf) {
-		nBytes := b.Len()
-		m, e := w.Write(b.buf[b.off:])
-		if m > nBytes {
-			panic("bytes.Buffer.WriteTo: invalid Write count")
-		}
-		b.off += m
-		n = int64(m)
-		if e != nil {
-			return n, e
-		}
-		// all bytes should have been written, by definition of
-		// Write method in io.Writer
-		if m != nBytes {
-			return n, io.ErrShortWrite
-		}
-	}
-	// Buffer is now empty; reset.
-	b.Truncate(0)
-	return
-}
-
-// WriteByte appends the byte c to the buffer, growing the buffer as needed.
-// The returned error is always nil, but is included to match bufio.Writer's
-// WriteByte. If the buffer becomes too large, WriteByte will panic with
-// ErrTooLarge.
-func (b *Buffer) WriteByte(c byte) error {
-	b.lastRead = opInvalid
-	m := b.grow(1)
-	b.buf[m] = c
-	return nil
-}
-
-// WriteRune appends the UTF-8 encoding of Unicode code point r to the
-// buffer, returning its length and an error, which is always nil but is
-// included to match bufio.Writer's WriteRune. The buffer is grown as needed;
-// if it becomes too large, WriteRune will panic with ErrTooLarge.
-func (b *Buffer) WriteRune(r rune) (n int, err error) {
-	if r < utf8.RuneSelf {
-		b.WriteByte(byte(r))
-		return 1, nil
-	}
-	n = utf8.EncodeRune(b.runeBytes[0:], r)
-	b.Write(b.runeBytes[0:n])
-	return n, nil
-}
-
-// Read reads the next len(p) bytes from the buffer or until the buffer
-// is drained.  The return value n is the number of bytes read.  If the
-// buffer has no data to return, err is io.EOF (unless len(p) is zero);
-// otherwise it is nil.
-func (b *Buffer) Read(p []byte) (n int, err error) {
-	b.lastRead = opInvalid
-	if b.off >= len(b.buf) {
-		// Buffer is empty, reset to recover space.
-		b.Truncate(0)
-		if len(p) == 0 {
-			return
-		}
-		return 0, io.EOF
-	}
-	n = copy(p, b.buf[b.off:])
-	b.off += n
-	if n > 0 {
-		b.lastRead = opRead
-	}
-	return
-}
-
-// Next returns a slice containing the next n bytes from the buffer,
-// advancing the buffer as if the bytes had been returned by Read.
-// If there are fewer than n bytes in the buffer, Next returns the entire buffer.
-// The slice is only valid until the next call to a read or write method.
-func (b *Buffer) Next(n int) []byte {
-	b.lastRead = opInvalid
-	m := b.Len()
-	if n > m {
-		n = m
-	}
-	data := b.buf[b.off : b.off+n]
-	b.off += n
-	if n > 0 {
-		b.lastRead = opRead
-	}
-	return data
-}
-
-// ReadByte reads and returns the next byte from the buffer.
-// If no byte is available, it returns error io.EOF.
-func (b *Buffer) ReadByte() (c byte, err error) {
-	b.lastRead = opInvalid
-	if b.off >= len(b.buf) {
-		// Buffer is empty, reset to recover space.
-		b.Truncate(0)
-		return 0, io.EOF
-	}
-	c = b.buf[b.off]
-	b.off++
-	b.lastRead = opRead
-	return c, nil
-}
-
-// ReadRune reads and returns the next UTF-8-encoded
-// Unicode code point from the buffer.
-// If no bytes are available, the error returned is io.EOF.
-// If the bytes are an erroneous UTF-8 encoding, it
-// consumes one byte and returns U+FFFD, 1.
-func (b *Buffer) ReadRune() (r rune, size int, err error) {
-	b.lastRead = opInvalid
-	if b.off >= len(b.buf) {
-		// Buffer is empty, reset to recover space.
-		b.Truncate(0)
-		return 0, 0, io.EOF
-	}
-	b.lastRead = opReadRune
-	c := b.buf[b.off]
-	if c < utf8.RuneSelf {
-		b.off++
-		return rune(c), 1, nil
-	}
-	r, n := utf8.DecodeRune(b.buf[b.off:])
-	b.off += n
-	return r, n, nil
-}
-
-// UnreadRune unreads the last rune returned by ReadRune.
-// If the most recent read or write operation on the buffer was
-// not a ReadRune, UnreadRune returns an error.  (In this regard
-// it is stricter than UnreadByte, which will unread the last byte
-// from any read operation.)
-func (b *Buffer) UnreadRune() error {
-	if b.lastRead != opReadRune {
-		return errors.New("bytes.Buffer: UnreadRune: previous operation was not ReadRune")
-	}
-	b.lastRead = opInvalid
-	if b.off > 0 {
-		_, n := utf8.DecodeLastRune(b.buf[0:b.off])
-		b.off -= n
-	}
-	return nil
-}
-
-// UnreadByte unreads the last byte returned by the most recent
-// read operation.  If write has happened since the last read, UnreadByte
-// returns an error.
-func (b *Buffer) UnreadByte() error {
-	if b.lastRead != opReadRune && b.lastRead != opRead {
-		return errors.New("bytes.Buffer: UnreadByte: previous operation was not a read")
-	}
-	b.lastRead = opInvalid
-	if b.off > 0 {
-		b.off--
-	}
-	return nil
-}
-
-// ReadBytes reads until the first occurrence of delim in the input,
-// returning a slice containing the data up to and including the delimiter.
-// If ReadBytes encounters an error before finding a delimiter,
-// it returns the data read before the error and the error itself (often io.EOF).
-// ReadBytes returns err != nil if and only if the returned data does not end in
-// delim.
-func (b *Buffer) ReadBytes(delim byte) (line []byte, err error) {
-	slice, err := b.readSlice(delim)
-	// return a copy of slice. The buffer's backing array may
-	// be overwritten by later calls.
-	line = append(line, slice...)
-	return
-}
-
-// readSlice is like ReadBytes but returns a reference to internal buffer data.
-func (b *Buffer) readSlice(delim byte) (line []byte, err error) {
-	i := bytes.IndexByte(b.buf[b.off:], delim)
-	end := b.off + i + 1
-	if i < 0 {
-		end = len(b.buf)
-		err = io.EOF
-	}
-	line = b.buf[b.off:end]
-	b.off = end
-	b.lastRead = opRead
-	return line, err
-}
-
-// ReadString reads until the first occurrence of delim in the input,
-// returning a string containing the data up to and including the delimiter.
-// If ReadString encounters an error before finding a delimiter,
-// it returns the data read before the error and the error itself (often io.EOF).
-// ReadString returns err != nil if and only if the returned data does not end
-// in delim.
-func (b *Buffer) ReadString(delim byte) (line string, err error) {
-	slice, err := b.readSlice(delim)
-	return string(slice), err
-}
-
-// NewBuffer creates and initializes a new Buffer using buf as its initial
-// contents.  It is intended to prepare a Buffer to read existing data.  It
-// can also be used to size the internal buffer for writing. To do that,
-// buf should have the desired capacity but a length of zero.
-//
-// In most cases, new(Buffer) (or just declaring a Buffer variable) is
-// sufficient to initialize a Buffer.
-func NewBuffer(buf []byte) *Buffer { return &Buffer{buf: buf} }
-
-// NewBufferString creates and initializes a new Buffer using string s as its
-// initial contents. It is intended to prepare a buffer to read an existing
-// string.
-//
-// In most cases, new(Buffer) (or just declaring a Buffer variable) is
-// sufficient to initialize a Buffer.
-func NewBufferString(s string) *Buffer {
-	return &Buffer{buf: []byte(s)}
-}

+ 0 - 728
vendor/gopkg.in/bufio.v1/bufio.go

@@ -1,728 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package bufio implements buffered I/O.  It wraps an io.Reader or io.Writer
-// object, creating another object (Reader or Writer) that also implements
-// the interface but provides buffering and some help for textual I/O.
-package bufio
-
-import (
-	"bytes"
-	"errors"
-	"io"
-	"unicode/utf8"
-)
-
-const (
-	defaultBufSize = 4096
-)
-
-var (
-	ErrInvalidUnreadByte = errors.New("bufio: invalid use of UnreadByte")
-	ErrInvalidUnreadRune = errors.New("bufio: invalid use of UnreadRune")
-	ErrBufferFull        = errors.New("bufio: buffer full")
-	ErrNegativeCount     = errors.New("bufio: negative count")
-)
-
-// Buffered input.
-
-// Reader implements buffering for an io.Reader object.
-type Reader struct {
-	buf          []byte
-	rd           io.Reader
-	r, w         int
-	err          error
-	lastByte     int
-	lastRuneSize int
-}
-
-const minReadBufferSize = 16
-const maxConsecutiveEmptyReads = 100
-
-// NewReaderSize returns a new Reader whose buffer has at least the specified
-// size. If the argument io.Reader is already a Reader with large enough
-// size, it returns the underlying Reader.
-func NewReaderSize(rd io.Reader, size int) *Reader {
-	// Is it already a Reader?
-	b, ok := rd.(*Reader)
-	if ok && len(b.buf) >= size {
-		return b
-	}
-	if size < minReadBufferSize {
-		size = minReadBufferSize
-	}
-	r := new(Reader)
-	r.reset(make([]byte, size), rd)
-	return r
-}
-
-// NewReader returns a new Reader whose buffer has the default size.
-func NewReader(rd io.Reader) *Reader {
-	return NewReaderSize(rd, defaultBufSize)
-}
-
-// Reset discards any buffered data, resets all state, and switches
-// the buffered reader to read from r.
-func (b *Reader) Reset(r io.Reader) {
-	b.reset(b.buf, r)
-}
-
-func (b *Reader) reset(buf []byte, r io.Reader) {
-	*b = Reader{
-		buf:          buf,
-		rd:           r,
-		lastByte:     -1,
-		lastRuneSize: -1,
-	}
-}
-
-var errNegativeRead = errors.New("bufio: reader returned negative count from Read")
-
-// fill reads a new chunk into the buffer.
-func (b *Reader) fill() {
-	// Slide existing data to beginning.
-	if b.r > 0 {
-		copy(b.buf, b.buf[b.r:b.w])
-		b.w -= b.r
-		b.r = 0
-	}
-
-	if b.w >= len(b.buf) {
-		panic("bufio: tried to fill full buffer")
-	}
-
-	// Read new data: try a limited number of times.
-	for i := maxConsecutiveEmptyReads; i > 0; i-- {
-		n, err := b.rd.Read(b.buf[b.w:])
-		if n < 0 {
-			panic(errNegativeRead)
-		}
-		b.w += n
-		if err != nil {
-			b.err = err
-			return
-		}
-		if n > 0 {
-			return
-		}
-	}
-	b.err = io.ErrNoProgress
-}
-
-func (b *Reader) readErr() error {
-	err := b.err
-	b.err = nil
-	return err
-}
-
-// Peek returns the next n bytes without advancing the reader. The bytes stop
-// being valid at the next read call. If Peek returns fewer than n bytes, it
-// also returns an error explaining why the read is short. The error is
-// ErrBufferFull if n is larger than b's buffer size.
-func (b *Reader) Peek(n int) ([]byte, error) {
-	if n < 0 {
-		return nil, ErrNegativeCount
-	}
-	if n > len(b.buf) {
-		return nil, ErrBufferFull
-	}
-	// 0 <= n <= len(b.buf)
-	for b.w-b.r < n && b.err == nil {
-		b.fill() // b.w-b.r < len(b.buf) => buffer is not full
-	}
-	m := b.w - b.r
-	if m > n {
-		m = n
-	}
-	var err error
-	if m < n {
-		err = b.readErr()
-		if err == nil {
-			err = ErrBufferFull
-		}
-	}
-	return b.buf[b.r : b.r+m], err
-}
-
-// Read reads data into p.
-// It returns the number of bytes read into p.
-// It calls Read at most once on the underlying Reader,
-// hence n may be less than len(p).
-// At EOF, the count will be zero and err will be io.EOF.
-func (b *Reader) Read(p []byte) (n int, err error) {
-	n = len(p)
-	if n == 0 {
-		return 0, b.readErr()
-	}
-	if b.r == b.w {
-		if b.err != nil {
-			return 0, b.readErr()
-		}
-		if len(p) >= len(b.buf) {
-			// Large read, empty buffer.
-			// Read directly into p to avoid copy.
-			n, b.err = b.rd.Read(p)
-			if n < 0 {
-				panic(errNegativeRead)
-			}
-			if n > 0 {
-				b.lastByte = int(p[n-1])
-				b.lastRuneSize = -1
-			}
-			return n, b.readErr()
-		}
-		b.fill() // buffer is empty
-		if b.w == b.r {
-			return 0, b.readErr()
-		}
-	}
-
-	if n > b.w-b.r {
-		n = b.w - b.r
-	}
-	copy(p[0:n], b.buf[b.r:])
-	b.r += n
-	b.lastByte = int(b.buf[b.r-1])
-	b.lastRuneSize = -1
-	return n, nil
-}
-
-// ReadByte reads and returns a single byte.
-// If no byte is available, returns an error.
-func (b *Reader) ReadByte() (c byte, err error) {
-	b.lastRuneSize = -1
-	for b.r == b.w {
-		if b.err != nil {
-			return 0, b.readErr()
-		}
-		b.fill() // buffer is empty
-	}
-	c = b.buf[b.r]
-	b.r++
-	b.lastByte = int(c)
-	return c, nil
-}
-
-// UnreadByte unreads the last byte.  Only the most recently read byte can be unread.
-func (b *Reader) UnreadByte() error {
-	if b.lastByte < 0 || b.r == 0 && b.w > 0 {
-		return ErrInvalidUnreadByte
-	}
-	// b.r > 0 || b.w == 0
-	if b.r > 0 {
-		b.r--
-	} else {
-		// b.r == 0 && b.w == 0
-		b.w = 1
-	}
-	b.buf[b.r] = byte(b.lastByte)
-	b.lastByte = -1
-	b.lastRuneSize = -1
-	return nil
-}
-
-// ReadRune reads a single UTF-8 encoded Unicode character and returns the
-// rune and its size in bytes. If the encoded rune is invalid, it consumes one byte
-// and returns unicode.ReplacementChar (U+FFFD) with a size of 1.
-func (b *Reader) ReadRune() (r rune, size int, err error) {
-	for b.r+utf8.UTFMax > b.w && !utf8.FullRune(b.buf[b.r:b.w]) && b.err == nil && b.w-b.r < len(b.buf) {
-		b.fill() // b.w-b.r < len(buf) => buffer is not full
-	}
-	b.lastRuneSize = -1
-	if b.r == b.w {
-		return 0, 0, b.readErr()
-	}
-	r, size = rune(b.buf[b.r]), 1
-	if r >= 0x80 {
-		r, size = utf8.DecodeRune(b.buf[b.r:b.w])
-	}
-	b.r += size
-	b.lastByte = int(b.buf[b.r-1])
-	b.lastRuneSize = size
-	return r, size, nil
-}
-
-// UnreadRune unreads the last rune.  If the most recent read operation on
-// the buffer was not a ReadRune, UnreadRune returns an error.  (In this
-// regard it is stricter than UnreadByte, which will unread the last byte
-// from any read operation.)
-func (b *Reader) UnreadRune() error {
-	if b.lastRuneSize < 0 || b.r < b.lastRuneSize {
-		return ErrInvalidUnreadRune
-	}
-	b.r -= b.lastRuneSize
-	b.lastByte = -1
-	b.lastRuneSize = -1
-	return nil
-}
-
-// Buffered returns the number of bytes that can be read from the current buffer.
-func (b *Reader) Buffered() int { return b.w - b.r }
-
-// ReadSlice reads until the first occurrence of delim in the input,
-// returning a slice pointing at the bytes in the buffer.
-// The bytes stop being valid at the next read.
-// If ReadSlice encounters an error before finding a delimiter,
-// it returns all the data in the buffer and the error itself (often io.EOF).
-// ReadSlice fails with error ErrBufferFull if the buffer fills without a delim.
-// Because the data returned from ReadSlice will be overwritten
-// by the next I/O operation, most clients should use
-// ReadBytes or ReadString instead.
-// ReadSlice returns err != nil if and only if line does not end in delim.
-func (b *Reader) ReadSlice(delim byte) (line []byte, err error) {
-	for {
-		// Search buffer.
-		if i := bytes.IndexByte(b.buf[b.r:b.w], delim); i >= 0 {
-			line = b.buf[b.r : b.r+i+1]
-			b.r += i + 1
-			break
-		}
-
-		// Pending error?
-		if b.err != nil {
-			line = b.buf[b.r:b.w]
-			b.r = b.w
-			err = b.readErr()
-			break
-		}
-
-		// Buffer full?
-		if n := b.Buffered(); n >= len(b.buf) {
-			b.r = b.w
-			line = b.buf
-			err = ErrBufferFull
-			break
-		}
-
-		b.fill() // buffer is not full
-	}
-
-	// Handle last byte, if any.
-	if i := len(line) - 1; i >= 0 {
-		b.lastByte = int(line[i])
-	}
-
-	return
-}
-
-// ReadN tries to read exactly n bytes.
-// The bytes stop being valid at the next read call.
-// If ReadN encounters an error before reading n bytes,
-// it returns all the data in the buffer and the error itself (often io.EOF).
-// ReadN fails with error ErrBufferFull if the buffer fills
-// without reading N bytes.
-// Because the data returned from ReadN will be overwritten
-// by the next I/O operation, most clients should use
-// ReadBytes or ReadString instead.
-func (b *Reader) ReadN(n int) ([]byte, error) {
-	for b.Buffered() < n {
-		if b.err != nil {
-			buf := b.buf[b.r:b.w]
-			b.r = b.w
-			return buf, b.readErr()
-		}
-
-		// Buffer is full?
-		if b.Buffered() >= len(b.buf) {
-			b.r = b.w
-			return b.buf, ErrBufferFull
-		}
-
-		b.fill()
-	}
-	buf := b.buf[b.r : b.r+n]
-	b.r += n
-	return buf, nil
-}
-
-// ReadLine is a low-level line-reading primitive. Most callers should use
-// ReadBytes('\n') or ReadString('\n') instead or use a Scanner.
-//
-// ReadLine tries to return a single line, not including the end-of-line bytes.
-// If the line was too long for the buffer then isPrefix is set and the
-// beginning of the line is returned. The rest of the line will be returned
-// from future calls. isPrefix will be false when returning the last fragment
-// of the line. The returned buffer is only valid until the next call to
-// ReadLine. ReadLine either returns a non-nil line or it returns an error,
-// never both.
-//
-// The text returned from ReadLine does not include the line end ("\r\n" or "\n").
-// No indication or error is given if the input ends without a final line end.
-// Calling UnreadByte after ReadLine will always unread the last byte read
-// (possibly a character belonging to the line end) even if that byte is not
-// part of the line returned by ReadLine.
-func (b *Reader) ReadLine() (line []byte, isPrefix bool, err error) {
-	line, err = b.ReadSlice('\n')
-	if err == ErrBufferFull {
-		// Handle the case where "\r\n" straddles the buffer.
-		if len(line) > 0 && line[len(line)-1] == '\r' {
-			// Put the '\r' back on buf and drop it from line.
-			// Let the next call to ReadLine check for "\r\n".
-			if b.r == 0 {
-				// should be unreachable
-				panic("bufio: tried to rewind past start of buffer")
-			}
-			b.r--
-			line = line[:len(line)-1]
-		}
-		return line, true, nil
-	}
-
-	if len(line) == 0 {
-		if err != nil {
-			line = nil
-		}
-		return
-	}
-	err = nil
-
-	if line[len(line)-1] == '\n' {
-		drop := 1
-		if len(line) > 1 && line[len(line)-2] == '\r' {
-			drop = 2
-		}
-		line = line[:len(line)-drop]
-	}
-	return
-}
-
-// ReadBytes reads until the first occurrence of delim in the input,
-// returning a slice containing the data up to and including the delimiter.
-// If ReadBytes encounters an error before finding a delimiter,
-// it returns the data read before the error and the error itself (often io.EOF).
-// ReadBytes returns err != nil if and only if the returned data does not end in
-// delim.
-// For simple uses, a Scanner may be more convenient.
-func (b *Reader) ReadBytes(delim byte) (line []byte, err error) {
-	// Use ReadSlice to look for array,
-	// accumulating full buffers.
-	var frag []byte
-	var full [][]byte
-	err = nil
-
-	for {
-		var e error
-		frag, e = b.ReadSlice(delim)
-		if e == nil { // got final fragment
-			break
-		}
-		if e != ErrBufferFull { // unexpected error
-			err = e
-			break
-		}
-
-		// Make a copy of the buffer.
-		buf := make([]byte, len(frag))
-		copy(buf, frag)
-		full = append(full, buf)
-	}
-
-	// Allocate new buffer to hold the full pieces and the fragment.
-	n := 0
-	for i := range full {
-		n += len(full[i])
-	}
-	n += len(frag)
-
-	// Copy full pieces and fragment in.
-	buf := make([]byte, n)
-	n = 0
-	for i := range full {
-		n += copy(buf[n:], full[i])
-	}
-	copy(buf[n:], frag)
-	return buf, err
-}
-
-// ReadString reads until the first occurrence of delim in the input,
-// returning a string containing the data up to and including the delimiter.
-// If ReadString encounters an error before finding a delimiter,
-// it returns the data read before the error and the error itself (often io.EOF).
-// ReadString returns err != nil if and only if the returned data does not end in
-// delim.
-// For simple uses, a Scanner may be more convenient.
-func (b *Reader) ReadString(delim byte) (line string, err error) {
-	bytes, err := b.ReadBytes(delim)
-	line = string(bytes)
-	return line, err
-}
-
-// WriteTo implements io.WriterTo.
-func (b *Reader) WriteTo(w io.Writer) (n int64, err error) {
-	n, err = b.writeBuf(w)
-	if err != nil {
-		return
-	}
-
-	if r, ok := b.rd.(io.WriterTo); ok {
-		m, err := r.WriteTo(w)
-		n += m
-		return n, err
-	}
-
-	if w, ok := w.(io.ReaderFrom); ok {
-		m, err := w.ReadFrom(b.rd)
-		n += m
-		return n, err
-	}
-
-	if b.w-b.r < len(b.buf) {
-		b.fill() // buffer not full
-	}
-
-	for b.r < b.w {
-		// b.r < b.w => buffer is not empty
-		m, err := b.writeBuf(w)
-		n += m
-		if err != nil {
-			return n, err
-		}
-		b.fill() // buffer is empty
-	}
-
-	if b.err == io.EOF {
-		b.err = nil
-	}
-
-	return n, b.readErr()
-}
-
-// writeBuf writes the Reader's buffer to the writer.
-func (b *Reader) writeBuf(w io.Writer) (int64, error) {
-	n, err := w.Write(b.buf[b.r:b.w])
-	if n < b.r-b.w {
-		panic(errors.New("bufio: writer did not write all data"))
-	}
-	b.r += n
-	return int64(n), err
-}
-
-// buffered output
-
-// Writer implements buffering for an io.Writer object.
-// If an error occurs writing to a Writer, no more data will be
-// accepted and all subsequent writes will return the error.
-// After all data has been written, the client should call the
-// Flush method to guarantee all data has been forwarded to
-// the underlying io.Writer.
-type Writer struct {
-	err error
-	buf []byte
-	n   int
-	wr  io.Writer
-}
-
-// NewWriterSize returns a new Writer whose buffer has at least the specified
-// size. If the argument io.Writer is already a Writer with large enough
-// size, it returns the underlying Writer.
-func NewWriterSize(w io.Writer, size int) *Writer {
-	// Is it already a Writer?
-	b, ok := w.(*Writer)
-	if ok && len(b.buf) >= size {
-		return b
-	}
-	if size <= 0 {
-		size = defaultBufSize
-	}
-	return &Writer{
-		buf: make([]byte, size),
-		wr:  w,
-	}
-}
-
-// NewWriter returns a new Writer whose buffer has the default size.
-func NewWriter(w io.Writer) *Writer {
-	return NewWriterSize(w, defaultBufSize)
-}
-
-// Reset discards any unflushed buffered data, clears any error, and
-// resets b to write its output to w.
-func (b *Writer) Reset(w io.Writer) {
-	b.err = nil
-	b.n = 0
-	b.wr = w
-}
-
-// Flush writes any buffered data to the underlying io.Writer.
-func (b *Writer) Flush() error {
-	err := b.flush()
-	return err
-}
-
-func (b *Writer) flush() error {
-	if b.err != nil {
-		return b.err
-	}
-	if b.n == 0 {
-		return nil
-	}
-	n, err := b.wr.Write(b.buf[0:b.n])
-	if n < b.n && err == nil {
-		err = io.ErrShortWrite
-	}
-	if err != nil {
-		if n > 0 && n < b.n {
-			copy(b.buf[0:b.n-n], b.buf[n:b.n])
-		}
-		b.n -= n
-		b.err = err
-		return err
-	}
-	b.n = 0
-	return nil
-}
-
-// Available returns how many bytes are unused in the buffer.
-func (b *Writer) Available() int { return len(b.buf) - b.n }
-
-// Buffered returns the number of bytes that have been written into the current buffer.
-func (b *Writer) Buffered() int { return b.n }
-
-// Write writes the contents of p into the buffer.
-// It returns the number of bytes written.
-// If nn < len(p), it also returns an error explaining
-// why the write is short.
-func (b *Writer) Write(p []byte) (nn int, err error) {
-	for len(p) > b.Available() && b.err == nil {
-		var n int
-		if b.Buffered() == 0 {
-			// Large write, empty buffer.
-			// Write directly from p to avoid copy.
-			n, b.err = b.wr.Write(p)
-		} else {
-			n = copy(b.buf[b.n:], p)
-			b.n += n
-			b.flush()
-		}
-		nn += n
-		p = p[n:]
-	}
-	if b.err != nil {
-		return nn, b.err
-	}
-	n := copy(b.buf[b.n:], p)
-	b.n += n
-	nn += n
-	return nn, nil
-}
-
-// WriteByte writes a single byte.
-func (b *Writer) WriteByte(c byte) error {
-	if b.err != nil {
-		return b.err
-	}
-	if b.Available() <= 0 && b.flush() != nil {
-		return b.err
-	}
-	b.buf[b.n] = c
-	b.n++
-	return nil
-}
-
-// WriteRune writes a single Unicode code point, returning
-// the number of bytes written and any error.
-func (b *Writer) WriteRune(r rune) (size int, err error) {
-	if r < utf8.RuneSelf {
-		err = b.WriteByte(byte(r))
-		if err != nil {
-			return 0, err
-		}
-		return 1, nil
-	}
-	if b.err != nil {
-		return 0, b.err
-	}
-	n := b.Available()
-	if n < utf8.UTFMax {
-		if b.flush(); b.err != nil {
-			return 0, b.err
-		}
-		n = b.Available()
-		if n < utf8.UTFMax {
-			// Can only happen if buffer is silly small.
-			return b.WriteString(string(r))
-		}
-	}
-	size = utf8.EncodeRune(b.buf[b.n:], r)
-	b.n += size
-	return size, nil
-}
-
-// WriteString writes a string.
-// It returns the number of bytes written.
-// If the count is less than len(s), it also returns an error explaining
-// why the write is short.
-func (b *Writer) WriteString(s string) (int, error) {
-	nn := 0
-	for len(s) > b.Available() && b.err == nil {
-		n := copy(b.buf[b.n:], s)
-		b.n += n
-		nn += n
-		s = s[n:]
-		b.flush()
-	}
-	if b.err != nil {
-		return nn, b.err
-	}
-	n := copy(b.buf[b.n:], s)
-	b.n += n
-	nn += n
-	return nn, nil
-}
-
-// ReadFrom implements io.ReaderFrom.
-func (b *Writer) ReadFrom(r io.Reader) (n int64, err error) {
-	if b.Buffered() == 0 {
-		if w, ok := b.wr.(io.ReaderFrom); ok {
-			return w.ReadFrom(r)
-		}
-	}
-	var m int
-	for {
-		if b.Available() == 0 {
-			if err1 := b.flush(); err1 != nil {
-				return n, err1
-			}
-		}
-		nr := 0
-		for nr < maxConsecutiveEmptyReads {
-			m, err = r.Read(b.buf[b.n:])
-			if m != 0 || err != nil {
-				break
-			}
-			nr++
-		}
-		if nr == maxConsecutiveEmptyReads {
-			return n, io.ErrNoProgress
-		}
-		b.n += m
-		n += int64(m)
-		if err != nil {
-			break
-		}
-	}
-	if err == io.EOF {
-		// If we filled the buffer exactly, flush pre-emptively.
-		if b.Available() == 0 {
-			err = b.flush()
-		} else {
-			err = nil
-		}
-	}
-	return n, err
-}
-
-// buffered input and output
-
-// ReadWriter stores pointers to a Reader and a Writer.
-// It implements io.ReadWriter.
-type ReadWriter struct {
-	*Reader
-	*Writer
-}
-
-// NewReadWriter allocates a new ReadWriter that dispatches to r and w.
-func NewReadWriter(r *Reader, w *Writer) *ReadWriter {
-	return &ReadWriter{r, w}
-}

+ 0 - 27
vendor/gopkg.in/redis.v2/LICENSE

@@ -1,27 +0,0 @@
-Copyright (c) 2012 The Redis Go Client Authors. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
-   * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
-   * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
-   * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

+ 0 - 3
vendor/gopkg.in/redis.v2/Makefile

@@ -1,3 +0,0 @@
-all:
-	go test gopkg.in/redis.v2 -cpu=1,2,4
-	go test gopkg.in/redis.v2 -short -race

+ 0 - 46
vendor/gopkg.in/redis.v2/README.md

@@ -1,46 +0,0 @@
-Redis client for Golang [![Build Status](https://travis-ci.org/go-redis/redis.png?branch=master)](https://travis-ci.org/go-redis/redis)
-=======================
-
-Supports:
-
-- Redis 2.8 commands except QUIT, MONITOR, SLOWLOG and SYNC.
-- Pub/sub.
-- Transactions.
-- Pipelining.
-- Connection pool.
-- TLS connections.
-- Thread safety.
-- Timeouts.
-- Redis Sentinel.
-
-API docs: http://godoc.org/gopkg.in/redis.v2.
-Examples: http://godoc.org/gopkg.in/redis.v2#pkg-examples.
-
-Installation
-------------
-
-Install:
-
-    go get gopkg.in/redis.v2
-
-Look and feel
--------------
-
-Some corner cases:
-
-    SORT list LIMIT 0 2 ASC
-    vals, err := client.Sort("list", redis.Sort{Offset: 0, Count: 2, Order: "ASC"}).Result()
-
-    ZRANGEBYSCORE zset -inf +inf WITHSCORES LIMIT 0 2
-    vals, err := client.ZRangeByScoreWithScores("zset", redis.ZRangeByScore{
-        Min: "-inf",
-        Max: "+inf",
-        Offset: 0,
-        Count: 2,
-    }).Result()
-
-    ZINTERSTORE out 2 zset1 zset2 WEIGHTS 2 3 AGGREGATE SUM
-    vals, err := client.ZInterStore("out", redis.ZStore{Weights: []int64{2, 3}}, "zset1", "zset2").Result()
-
-    EVAL "return {KEYS[1],ARGV[1]}" 1 "key" "hello"
-    vals, err := client.Eval("return {KEYS[1],ARGV[1]}", []string{"key"}, []string{"hello"}).Result()

+ 0 - 597
vendor/gopkg.in/redis.v2/command.go

@@ -1,597 +0,0 @@
-package redis
-
-import (
-	"fmt"
-	"strconv"
-	"strings"
-	"time"
-
-	"gopkg.in/bufio.v1"
-)
-
-var (
-	_ Cmder = (*Cmd)(nil)
-	_ Cmder = (*SliceCmd)(nil)
-	_ Cmder = (*StatusCmd)(nil)
-	_ Cmder = (*IntCmd)(nil)
-	_ Cmder = (*DurationCmd)(nil)
-	_ Cmder = (*BoolCmd)(nil)
-	_ Cmder = (*StringCmd)(nil)
-	_ Cmder = (*FloatCmd)(nil)
-	_ Cmder = (*StringSliceCmd)(nil)
-	_ Cmder = (*BoolSliceCmd)(nil)
-	_ Cmder = (*StringStringMapCmd)(nil)
-	_ Cmder = (*ZSliceCmd)(nil)
-	_ Cmder = (*ScanCmd)(nil)
-)
-
-type Cmder interface {
-	args() []string
-	parseReply(*bufio.Reader) error
-	setErr(error)
-
-	writeTimeout() *time.Duration
-	readTimeout() *time.Duration
-
-	Err() error
-	String() string
-}
-
-func setCmdsErr(cmds []Cmder, e error) {
-	for _, cmd := range cmds {
-		cmd.setErr(e)
-	}
-}
-
-func cmdString(cmd Cmder, val interface{}) string {
-	s := strings.Join(cmd.args(), " ")
-	if err := cmd.Err(); err != nil {
-		return s + ": " + err.Error()
-	}
-	if val != nil {
-		return s + ": " + fmt.Sprint(val)
-	}
-	return s
-
-}
-
-//------------------------------------------------------------------------------
-
-type baseCmd struct {
-	_args []string
-
-	err error
-
-	_writeTimeout, _readTimeout *time.Duration
-}
-
-func newBaseCmd(args ...string) *baseCmd {
-	return &baseCmd{
-		_args: args,
-	}
-}
-
-func (cmd *baseCmd) Err() error {
-	if cmd.err != nil {
-		return cmd.err
-	}
-	return nil
-}
-
-func (cmd *baseCmd) args() []string {
-	return cmd._args
-}
-
-func (cmd *baseCmd) readTimeout() *time.Duration {
-	return cmd._readTimeout
-}
-
-func (cmd *baseCmd) setReadTimeout(d time.Duration) {
-	cmd._readTimeout = &d
-}
-
-func (cmd *baseCmd) writeTimeout() *time.Duration {
-	return cmd._writeTimeout
-}
-
-func (cmd *baseCmd) setWriteTimeout(d time.Duration) {
-	cmd._writeTimeout = &d
-}
-
-func (cmd *baseCmd) setErr(e error) {
-	cmd.err = e
-}
-
-//------------------------------------------------------------------------------
-
-type Cmd struct {
-	*baseCmd
-
-	val interface{}
-}
-
-func NewCmd(args ...string) *Cmd {
-	return &Cmd{
-		baseCmd: newBaseCmd(args...),
-	}
-}
-
-func (cmd *Cmd) Val() interface{} {
-	return cmd.val
-}
-
-func (cmd *Cmd) Result() (interface{}, error) {
-	return cmd.val, cmd.err
-}
-
-func (cmd *Cmd) String() string {
-	return cmdString(cmd, cmd.val)
-}
-
-func (cmd *Cmd) parseReply(rd *bufio.Reader) error {
-	cmd.val, cmd.err = parseReply(rd, parseSlice)
-	return cmd.err
-}
-
-//------------------------------------------------------------------------------
-
-type SliceCmd struct {
-	*baseCmd
-
-	val []interface{}
-}
-
-func NewSliceCmd(args ...string) *SliceCmd {
-	return &SliceCmd{
-		baseCmd: newBaseCmd(args...),
-	}
-}
-
-func (cmd *SliceCmd) Val() []interface{} {
-	return cmd.val
-}
-
-func (cmd *SliceCmd) Result() ([]interface{}, error) {
-	return cmd.val, cmd.err
-}
-
-func (cmd *SliceCmd) String() string {
-	return cmdString(cmd, cmd.val)
-}
-
-func (cmd *SliceCmd) parseReply(rd *bufio.Reader) error {
-	v, err := parseReply(rd, parseSlice)
-	if err != nil {
-		cmd.err = err
-		return err
-	}
-	cmd.val = v.([]interface{})
-	return nil
-}
-
-//------------------------------------------------------------------------------
-
-type StatusCmd struct {
-	*baseCmd
-
-	val string
-}
-
-func NewStatusCmd(args ...string) *StatusCmd {
-	return &StatusCmd{
-		baseCmd: newBaseCmd(args...),
-	}
-}
-
-func (cmd *StatusCmd) Val() string {
-	return cmd.val
-}
-
-func (cmd *StatusCmd) Result() (string, error) {
-	return cmd.val, cmd.err
-}
-
-func (cmd *StatusCmd) String() string {
-	return cmdString(cmd, cmd.val)
-}
-
-func (cmd *StatusCmd) parseReply(rd *bufio.Reader) error {
-	v, err := parseReply(rd, nil)
-	if err != nil {
-		cmd.err = err
-		return err
-	}
-	cmd.val = v.(string)
-	return nil
-}
-
-//------------------------------------------------------------------------------
-
-type IntCmd struct {
-	*baseCmd
-
-	val int64
-}
-
-func NewIntCmd(args ...string) *IntCmd {
-	return &IntCmd{
-		baseCmd: newBaseCmd(args...),
-	}
-}
-
-func (cmd *IntCmd) Val() int64 {
-	return cmd.val
-}
-
-func (cmd *IntCmd) Result() (int64, error) {
-	return cmd.val, cmd.err
-}
-
-func (cmd *IntCmd) String() string {
-	return cmdString(cmd, cmd.val)
-}
-
-func (cmd *IntCmd) parseReply(rd *bufio.Reader) error {
-	v, err := parseReply(rd, nil)
-	if err != nil {
-		cmd.err = err
-		return err
-	}
-	cmd.val = v.(int64)
-	return nil
-}
-
-//------------------------------------------------------------------------------
-
-type DurationCmd struct {
-	*baseCmd
-
-	val       time.Duration
-	precision time.Duration
-}
-
-func NewDurationCmd(precision time.Duration, args ...string) *DurationCmd {
-	return &DurationCmd{
-		baseCmd:   newBaseCmd(args...),
-		precision: precision,
-	}
-}
-
-func (cmd *DurationCmd) Val() time.Duration {
-	return cmd.val
-}
-
-func (cmd *DurationCmd) Result() (time.Duration, error) {
-	return cmd.val, cmd.err
-}
-
-func (cmd *DurationCmd) String() string {
-	return cmdString(cmd, cmd.val)
-}
-
-func (cmd *DurationCmd) parseReply(rd *bufio.Reader) error {
-	v, err := parseReply(rd, nil)
-	if err != nil {
-		cmd.err = err
-		return err
-	}
-	cmd.val = time.Duration(v.(int64)) * cmd.precision
-	return nil
-}
-
-//------------------------------------------------------------------------------
-
-type BoolCmd struct {
-	*baseCmd
-
-	val bool
-}
-
-func NewBoolCmd(args ...string) *BoolCmd {
-	return &BoolCmd{
-		baseCmd: newBaseCmd(args...),
-	}
-}
-
-func (cmd *BoolCmd) Val() bool {
-	return cmd.val
-}
-
-func (cmd *BoolCmd) Result() (bool, error) {
-	return cmd.val, cmd.err
-}
-
-func (cmd *BoolCmd) String() string {
-	return cmdString(cmd, cmd.val)
-}
-
-func (cmd *BoolCmd) parseReply(rd *bufio.Reader) error {
-	v, err := parseReply(rd, nil)
-	if err != nil {
-		cmd.err = err
-		return err
-	}
-	cmd.val = v.(int64) == 1
-	return nil
-}
-
-//------------------------------------------------------------------------------
-
-type StringCmd struct {
-	*baseCmd
-
-	val string
-}
-
-func NewStringCmd(args ...string) *StringCmd {
-	return &StringCmd{
-		baseCmd: newBaseCmd(args...),
-	}
-}
-
-func (cmd *StringCmd) Val() string {
-	return cmd.val
-}
-
-func (cmd *StringCmd) Result() (string, error) {
-	return cmd.val, cmd.err
-}
-
-func (cmd *StringCmd) Int64() (int64, error) {
-	if cmd.err != nil {
-		return 0, cmd.err
-	}
-	return strconv.ParseInt(cmd.val, 10, 64)
-}
-
-func (cmd *StringCmd) Uint64() (uint64, error) {
-	if cmd.err != nil {
-		return 0, cmd.err
-	}
-	return strconv.ParseUint(cmd.val, 10, 64)
-}
-
-func (cmd *StringCmd) Float64() (float64, error) {
-	if cmd.err != nil {
-		return 0, cmd.err
-	}
-	return strconv.ParseFloat(cmd.val, 64)
-}
-
-func (cmd *StringCmd) String() string {
-	return cmdString(cmd, cmd.val)
-}
-
-func (cmd *StringCmd) parseReply(rd *bufio.Reader) error {
-	v, err := parseReply(rd, nil)
-	if err != nil {
-		cmd.err = err
-		return err
-	}
-	cmd.val = v.(string)
-	return nil
-}
-
-//------------------------------------------------------------------------------
-
-type FloatCmd struct {
-	*baseCmd
-
-	val float64
-}
-
-func NewFloatCmd(args ...string) *FloatCmd {
-	return &FloatCmd{
-		baseCmd: newBaseCmd(args...),
-	}
-}
-
-func (cmd *FloatCmd) Val() float64 {
-	return cmd.val
-}
-
-func (cmd *FloatCmd) String() string {
-	return cmdString(cmd, cmd.val)
-}
-
-func (cmd *FloatCmd) parseReply(rd *bufio.Reader) error {
-	v, err := parseReply(rd, nil)
-	if err != nil {
-		cmd.err = err
-		return err
-	}
-	cmd.val, cmd.err = strconv.ParseFloat(v.(string), 64)
-	return cmd.err
-}
-
-//------------------------------------------------------------------------------
-
-type StringSliceCmd struct {
-	*baseCmd
-
-	val []string
-}
-
-func NewStringSliceCmd(args ...string) *StringSliceCmd {
-	return &StringSliceCmd{
-		baseCmd: newBaseCmd(args...),
-	}
-}
-
-func (cmd *StringSliceCmd) Val() []string {
-	return cmd.val
-}
-
-func (cmd *StringSliceCmd) Result() ([]string, error) {
-	return cmd.Val(), cmd.Err()
-}
-
-func (cmd *StringSliceCmd) String() string {
-	return cmdString(cmd, cmd.val)
-}
-
-func (cmd *StringSliceCmd) parseReply(rd *bufio.Reader) error {
-	v, err := parseReply(rd, parseStringSlice)
-	if err != nil {
-		cmd.err = err
-		return err
-	}
-	cmd.val = v.([]string)
-	return nil
-}
-
-//------------------------------------------------------------------------------
-
-type BoolSliceCmd struct {
-	*baseCmd
-
-	val []bool
-}
-
-func NewBoolSliceCmd(args ...string) *BoolSliceCmd {
-	return &BoolSliceCmd{
-		baseCmd: newBaseCmd(args...),
-	}
-}
-
-func (cmd *BoolSliceCmd) Val() []bool {
-	return cmd.val
-}
-
-func (cmd *BoolSliceCmd) Result() ([]bool, error) {
-	return cmd.val, cmd.err
-}
-
-func (cmd *BoolSliceCmd) String() string {
-	return cmdString(cmd, cmd.val)
-}
-
-func (cmd *BoolSliceCmd) parseReply(rd *bufio.Reader) error {
-	v, err := parseReply(rd, parseBoolSlice)
-	if err != nil {
-		cmd.err = err
-		return err
-	}
-	cmd.val = v.([]bool)
-	return nil
-}
-
-//------------------------------------------------------------------------------
-
-type StringStringMapCmd struct {
-	*baseCmd
-
-	val map[string]string
-}
-
-func NewStringStringMapCmd(args ...string) *StringStringMapCmd {
-	return &StringStringMapCmd{
-		baseCmd: newBaseCmd(args...),
-	}
-}
-
-func (cmd *StringStringMapCmd) Val() map[string]string {
-	return cmd.val
-}
-
-func (cmd *StringStringMapCmd) Result() (map[string]string, error) {
-	return cmd.val, cmd.err
-}
-
-func (cmd *StringStringMapCmd) String() string {
-	return cmdString(cmd, cmd.val)
-}
-
-func (cmd *StringStringMapCmd) parseReply(rd *bufio.Reader) error {
-	v, err := parseReply(rd, parseStringStringMap)
-	if err != nil {
-		cmd.err = err
-		return err
-	}
-	cmd.val = v.(map[string]string)
-	return nil
-}
-
-//------------------------------------------------------------------------------
-
-type ZSliceCmd struct {
-	*baseCmd
-
-	val []Z
-}
-
-func NewZSliceCmd(args ...string) *ZSliceCmd {
-	return &ZSliceCmd{
-		baseCmd: newBaseCmd(args...),
-	}
-}
-
-func (cmd *ZSliceCmd) Val() []Z {
-	return cmd.val
-}
-
-func (cmd *ZSliceCmd) Result() ([]Z, error) {
-	return cmd.val, cmd.err
-}
-
-func (cmd *ZSliceCmd) String() string {
-	return cmdString(cmd, cmd.val)
-}
-
-func (cmd *ZSliceCmd) parseReply(rd *bufio.Reader) error {
-	v, err := parseReply(rd, parseZSlice)
-	if err != nil {
-		cmd.err = err
-		return err
-	}
-	cmd.val = v.([]Z)
-	return nil
-}
-
-//------------------------------------------------------------------------------
-
-type ScanCmd struct {
-	*baseCmd
-
-	cursor int64
-	keys   []string
-}
-
-func NewScanCmd(args ...string) *ScanCmd {
-	return &ScanCmd{
-		baseCmd: newBaseCmd(args...),
-	}
-}
-
-func (cmd *ScanCmd) Val() (int64, []string) {
-	return cmd.cursor, cmd.keys
-}
-
-func (cmd *ScanCmd) Result() (int64, []string, error) {
-	return cmd.cursor, cmd.keys, cmd.err
-}
-
-func (cmd *ScanCmd) String() string {
-	return cmdString(cmd, cmd.keys)
-}
-
-func (cmd *ScanCmd) parseReply(rd *bufio.Reader) error {
-	vi, err := parseReply(rd, parseSlice)
-	if err != nil {
-		cmd.err = err
-		return cmd.err
-	}
-	v := vi.([]interface{})
-
-	cmd.cursor, cmd.err = strconv.ParseInt(v[0].(string), 10, 64)
-	if cmd.err != nil {
-		return cmd.err
-	}
-
-	keys := v[1].([]interface{})
-	for _, keyi := range keys {
-		cmd.keys = append(cmd.keys, keyi.(string))
-	}
-
-	return nil
-}

+ 0 - 1246
vendor/gopkg.in/redis.v2/commands.go

@@ -1,1246 +0,0 @@
-package redis
-
-import (
-	"io"
-	"strconv"
-	"time"
-)
-
-func formatFloat(f float64) string {
-	return strconv.FormatFloat(f, 'f', -1, 64)
-}
-
-func readTimeout(sec int64) time.Duration {
-	if sec == 0 {
-		return 0
-	}
-	return time.Duration(sec+1) * time.Second
-}
-
-//------------------------------------------------------------------------------
-
-func (c *Client) Auth(password string) *StatusCmd {
-	cmd := NewStatusCmd("AUTH", password)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) Echo(message string) *StringCmd {
-	cmd := NewStringCmd("ECHO", message)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) Ping() *StatusCmd {
-	cmd := NewStatusCmd("PING")
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) Quit() *StatusCmd {
-	panic("not implemented")
-}
-
-func (c *Client) Select(index int64) *StatusCmd {
-	cmd := NewStatusCmd("SELECT", strconv.FormatInt(index, 10))
-	c.Process(cmd)
-	return cmd
-}
-
-//------------------------------------------------------------------------------
-
-func (c *Client) Del(keys ...string) *IntCmd {
-	args := append([]string{"DEL"}, keys...)
-	cmd := NewIntCmd(args...)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) Dump(key string) *StringCmd {
-	cmd := NewStringCmd("DUMP", key)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) Exists(key string) *BoolCmd {
-	cmd := NewBoolCmd("EXISTS", key)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) Expire(key string, dur time.Duration) *BoolCmd {
-	cmd := NewBoolCmd("EXPIRE", key, strconv.FormatInt(int64(dur/time.Second), 10))
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) ExpireAt(key string, tm time.Time) *BoolCmd {
-	cmd := NewBoolCmd("EXPIREAT", key, strconv.FormatInt(tm.Unix(), 10))
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) Keys(pattern string) *StringSliceCmd {
-	cmd := NewStringSliceCmd("KEYS", pattern)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) Migrate(host, port, key string, db, timeout int64) *StatusCmd {
-	cmd := NewStatusCmd(
-		"MIGRATE",
-		host,
-		port,
-		key,
-		strconv.FormatInt(db, 10),
-		strconv.FormatInt(timeout, 10),
-	)
-	cmd.setReadTimeout(readTimeout(timeout))
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) Move(key string, db int64) *BoolCmd {
-	cmd := NewBoolCmd("MOVE", key, strconv.FormatInt(db, 10))
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) ObjectRefCount(keys ...string) *IntCmd {
-	args := append([]string{"OBJECT", "REFCOUNT"}, keys...)
-	cmd := NewIntCmd(args...)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) ObjectEncoding(keys ...string) *StringCmd {
-	args := append([]string{"OBJECT", "ENCODING"}, keys...)
-	cmd := NewStringCmd(args...)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) ObjectIdleTime(keys ...string) *DurationCmd {
-	args := append([]string{"OBJECT", "IDLETIME"}, keys...)
-	cmd := NewDurationCmd(time.Second, args...)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) Persist(key string) *BoolCmd {
-	cmd := NewBoolCmd("PERSIST", key)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) PExpire(key string, dur time.Duration) *BoolCmd {
-	cmd := NewBoolCmd("PEXPIRE", key, strconv.FormatInt(int64(dur/time.Millisecond), 10))
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) PExpireAt(key string, tm time.Time) *BoolCmd {
-	cmd := NewBoolCmd(
-		"PEXPIREAT",
-		key,
-		strconv.FormatInt(tm.UnixNano()/int64(time.Millisecond), 10),
-	)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) PTTL(key string) *DurationCmd {
-	cmd := NewDurationCmd(time.Millisecond, "PTTL", key)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) RandomKey() *StringCmd {
-	cmd := NewStringCmd("RANDOMKEY")
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) Rename(key, newkey string) *StatusCmd {
-	cmd := NewStatusCmd("RENAME", key, newkey)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) RenameNX(key, newkey string) *BoolCmd {
-	cmd := NewBoolCmd("RENAMENX", key, newkey)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) Restore(key string, ttl int64, value string) *StatusCmd {
-	cmd := NewStatusCmd(
-		"RESTORE",
-		key,
-		strconv.FormatInt(ttl, 10),
-		value,
-	)
-	c.Process(cmd)
-	return cmd
-}
-
-type Sort struct {
-	By            string
-	Offset, Count float64
-	Get           []string
-	Order         string
-	IsAlpha       bool
-	Store         string
-}
-
-func (c *Client) Sort(key string, sort Sort) *StringSliceCmd {
-	args := []string{"SORT", key}
-	if sort.By != "" {
-		args = append(args, "BY", sort.By)
-	}
-	if sort.Offset != 0 || sort.Count != 0 {
-		args = append(args, "LIMIT", formatFloat(sort.Offset), formatFloat(sort.Count))
-	}
-	for _, get := range sort.Get {
-		args = append(args, "GET", get)
-	}
-	if sort.Order != "" {
-		args = append(args, sort.Order)
-	}
-	if sort.IsAlpha {
-		args = append(args, "ALPHA")
-	}
-	if sort.Store != "" {
-		args = append(args, "STORE", sort.Store)
-	}
-	cmd := NewStringSliceCmd(args...)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) TTL(key string) *DurationCmd {
-	cmd := NewDurationCmd(time.Second, "TTL", key)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) Type(key string) *StatusCmd {
-	cmd := NewStatusCmd("TYPE", key)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) Scan(cursor int64, match string, count int64) *ScanCmd {
-	args := []string{"SCAN", strconv.FormatInt(cursor, 10)}
-	if match != "" {
-		args = append(args, "MATCH", match)
-	}
-	if count > 0 {
-		args = append(args, "COUNT", strconv.FormatInt(count, 10))
-	}
-	cmd := NewScanCmd(args...)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) SScan(key string, cursor int64, match string, count int64) *ScanCmd {
-	args := []string{"SSCAN", key, strconv.FormatInt(cursor, 10)}
-	if match != "" {
-		args = append(args, "MATCH", match)
-	}
-	if count > 0 {
-		args = append(args, "COUNT", strconv.FormatInt(count, 10))
-	}
-	cmd := NewScanCmd(args...)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) HScan(key string, cursor int64, match string, count int64) *ScanCmd {
-	args := []string{"HSCAN", key, strconv.FormatInt(cursor, 10)}
-	if match != "" {
-		args = append(args, "MATCH", match)
-	}
-	if count > 0 {
-		args = append(args, "COUNT", strconv.FormatInt(count, 10))
-	}
-	cmd := NewScanCmd(args...)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) ZScan(key string, cursor int64, match string, count int64) *ScanCmd {
-	args := []string{"ZSCAN", key, strconv.FormatInt(cursor, 10)}
-	if match != "" {
-		args = append(args, "MATCH", match)
-	}
-	if count > 0 {
-		args = append(args, "COUNT", strconv.FormatInt(count, 10))
-	}
-	cmd := NewScanCmd(args...)
-	c.Process(cmd)
-	return cmd
-}
-
-//------------------------------------------------------------------------------
-
-func (c *Client) Append(key, value string) *IntCmd {
-	cmd := NewIntCmd("APPEND", key, value)
-	c.Process(cmd)
-	return cmd
-}
-
-type BitCount struct {
-	Start, End int64
-}
-
-func (c *Client) BitCount(key string, bitCount *BitCount) *IntCmd {
-	args := []string{"BITCOUNT", key}
-	if bitCount != nil {
-		args = append(
-			args,
-			strconv.FormatInt(bitCount.Start, 10),
-			strconv.FormatInt(bitCount.End, 10),
-		)
-	}
-	cmd := NewIntCmd(args...)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) bitOp(op, destKey string, keys ...string) *IntCmd {
-	args := []string{"BITOP", op, destKey}
-	args = append(args, keys...)
-	cmd := NewIntCmd(args...)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) BitOpAnd(destKey string, keys ...string) *IntCmd {
-	return c.bitOp("AND", destKey, keys...)
-}
-
-func (c *Client) BitOpOr(destKey string, keys ...string) *IntCmd {
-	return c.bitOp("OR", destKey, keys...)
-}
-
-func (c *Client) BitOpXor(destKey string, keys ...string) *IntCmd {
-	return c.bitOp("XOR", destKey, keys...)
-}
-
-func (c *Client) BitOpNot(destKey string, key string) *IntCmd {
-	return c.bitOp("NOT", destKey, key)
-}
-
-func (c *Client) Decr(key string) *IntCmd {
-	cmd := NewIntCmd("DECR", key)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) DecrBy(key string, decrement int64) *IntCmd {
-	cmd := NewIntCmd("DECRBY", key, strconv.FormatInt(decrement, 10))
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) Get(key string) *StringCmd {
-	cmd := NewStringCmd("GET", key)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) GetBit(key string, offset int64) *IntCmd {
-	cmd := NewIntCmd("GETBIT", key, strconv.FormatInt(offset, 10))
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) GetRange(key string, start, end int64) *StringCmd {
-	cmd := NewStringCmd(
-		"GETRANGE",
-		key,
-		strconv.FormatInt(start, 10),
-		strconv.FormatInt(end, 10),
-	)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) GetSet(key, value string) *StringCmd {
-	cmd := NewStringCmd("GETSET", key, value)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) Incr(key string) *IntCmd {
-	cmd := NewIntCmd("INCR", key)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) IncrBy(key string, value int64) *IntCmd {
-	cmd := NewIntCmd("INCRBY", key, strconv.FormatInt(value, 10))
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) IncrByFloat(key string, value float64) *FloatCmd {
-	cmd := NewFloatCmd("INCRBYFLOAT", key, formatFloat(value))
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) MGet(keys ...string) *SliceCmd {
-	args := append([]string{"MGET"}, keys...)
-	cmd := NewSliceCmd(args...)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) MSet(pairs ...string) *StatusCmd {
-	args := append([]string{"MSET"}, pairs...)
-	cmd := NewStatusCmd(args...)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) MSetNX(pairs ...string) *BoolCmd {
-	args := append([]string{"MSETNX"}, pairs...)
-	cmd := NewBoolCmd(args...)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) PSetEx(key string, dur time.Duration, value string) *StatusCmd {
-	cmd := NewStatusCmd(
-		"PSETEX",
-		key,
-		strconv.FormatInt(int64(dur/time.Millisecond), 10),
-		value,
-	)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) Set(key, value string) *StatusCmd {
-	cmd := NewStatusCmd("SET", key, value)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) SetBit(key string, offset int64, value int) *IntCmd {
-	cmd := NewIntCmd(
-		"SETBIT",
-		key,
-		strconv.FormatInt(offset, 10),
-		strconv.FormatInt(int64(value), 10),
-	)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) SetEx(key string, dur time.Duration, value string) *StatusCmd {
-	cmd := NewStatusCmd("SETEX", key, strconv.FormatInt(int64(dur/time.Second), 10), value)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) SetNX(key, value string) *BoolCmd {
-	cmd := NewBoolCmd("SETNX", key, value)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) SetRange(key string, offset int64, value string) *IntCmd {
-	cmd := NewIntCmd("SETRANGE", key, strconv.FormatInt(offset, 10), value)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) StrLen(key string) *IntCmd {
-	cmd := NewIntCmd("STRLEN", key)
-	c.Process(cmd)
-	return cmd
-}
-
-//------------------------------------------------------------------------------
-
-func (c *Client) HDel(key string, fields ...string) *IntCmd {
-	args := append([]string{"HDEL", key}, fields...)
-	cmd := NewIntCmd(args...)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) HExists(key, field string) *BoolCmd {
-	cmd := NewBoolCmd("HEXISTS", key, field)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) HGet(key, field string) *StringCmd {
-	cmd := NewStringCmd("HGET", key, field)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) HGetAll(key string) *StringSliceCmd {
-	cmd := NewStringSliceCmd("HGETALL", key)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) HGetAllMap(key string) *StringStringMapCmd {
-	cmd := NewStringStringMapCmd("HGETALL", key)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) HIncrBy(key, field string, incr int64) *IntCmd {
-	cmd := NewIntCmd("HINCRBY", key, field, strconv.FormatInt(incr, 10))
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) HIncrByFloat(key, field string, incr float64) *FloatCmd {
-	cmd := NewFloatCmd("HINCRBYFLOAT", key, field, formatFloat(incr))
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) HKeys(key string) *StringSliceCmd {
-	cmd := NewStringSliceCmd("HKEYS", key)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) HLen(key string) *IntCmd {
-	cmd := NewIntCmd("HLEN", key)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) HMGet(key string, fields ...string) *SliceCmd {
-	args := append([]string{"HMGET", key}, fields...)
-	cmd := NewSliceCmd(args...)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) HMSet(key, field, value string, pairs ...string) *StatusCmd {
-	args := append([]string{"HMSET", key, field, value}, pairs...)
-	cmd := NewStatusCmd(args...)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) HSet(key, field, value string) *BoolCmd {
-	cmd := NewBoolCmd("HSET", key, field, value)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) HSetNX(key, field, value string) *BoolCmd {
-	cmd := NewBoolCmd("HSETNX", key, field, value)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) HVals(key string) *StringSliceCmd {
-	cmd := NewStringSliceCmd("HVALS", key)
-	c.Process(cmd)
-	return cmd
-}
-
-//------------------------------------------------------------------------------
-
-func (c *Client) BLPop(timeout int64, keys ...string) *StringSliceCmd {
-	args := append([]string{"BLPOP"}, keys...)
-	args = append(args, strconv.FormatInt(timeout, 10))
-	cmd := NewStringSliceCmd(args...)
-	cmd.setReadTimeout(readTimeout(timeout))
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) BRPop(timeout int64, keys ...string) *StringSliceCmd {
-	args := append([]string{"BRPOP"}, keys...)
-	args = append(args, strconv.FormatInt(timeout, 10))
-	cmd := NewStringSliceCmd(args...)
-	cmd.setReadTimeout(readTimeout(timeout))
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) BRPopLPush(source, destination string, timeout int64) *StringCmd {
-	cmd := NewStringCmd(
-		"BRPOPLPUSH",
-		source,
-		destination,
-		strconv.FormatInt(timeout, 10),
-	)
-	cmd.setReadTimeout(readTimeout(timeout))
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) LIndex(key string, index int64) *StringCmd {
-	cmd := NewStringCmd("LINDEX", key, strconv.FormatInt(index, 10))
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) LInsert(key, op, pivot, value string) *IntCmd {
-	cmd := NewIntCmd("LINSERT", key, op, pivot, value)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) LLen(key string) *IntCmd {
-	cmd := NewIntCmd("LLEN", key)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) LPop(key string) *StringCmd {
-	cmd := NewStringCmd("LPOP", key)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) LPush(key string, values ...string) *IntCmd {
-	args := append([]string{"LPUSH", key}, values...)
-	cmd := NewIntCmd(args...)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) LPushX(key, value string) *IntCmd {
-	cmd := NewIntCmd("LPUSHX", key, value)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) LRange(key string, start, stop int64) *StringSliceCmd {
-	cmd := NewStringSliceCmd(
-		"LRANGE",
-		key,
-		strconv.FormatInt(start, 10),
-		strconv.FormatInt(stop, 10),
-	)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) LRem(key string, count int64, value string) *IntCmd {
-	cmd := NewIntCmd("LREM", key, strconv.FormatInt(count, 10), value)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) LSet(key string, index int64, value string) *StatusCmd {
-	cmd := NewStatusCmd("LSET", key, strconv.FormatInt(index, 10), value)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) LTrim(key string, start, stop int64) *StatusCmd {
-	cmd := NewStatusCmd(
-		"LTRIM",
-		key,
-		strconv.FormatInt(start, 10),
-		strconv.FormatInt(stop, 10),
-	)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) RPop(key string) *StringCmd {
-	cmd := NewStringCmd("RPOP", key)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) RPopLPush(source, destination string) *StringCmd {
-	cmd := NewStringCmd("RPOPLPUSH", source, destination)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) RPush(key string, values ...string) *IntCmd {
-	args := append([]string{"RPUSH", key}, values...)
-	cmd := NewIntCmd(args...)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) RPushX(key string, value string) *IntCmd {
-	cmd := NewIntCmd("RPUSHX", key, value)
-	c.Process(cmd)
-	return cmd
-}
-
-//------------------------------------------------------------------------------
-
-func (c *Client) SAdd(key string, members ...string) *IntCmd {
-	args := append([]string{"SADD", key}, members...)
-	cmd := NewIntCmd(args...)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) SCard(key string) *IntCmd {
-	cmd := NewIntCmd("SCARD", key)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) SDiff(keys ...string) *StringSliceCmd {
-	args := append([]string{"SDIFF"}, keys...)
-	cmd := NewStringSliceCmd(args...)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) SDiffStore(destination string, keys ...string) *IntCmd {
-	args := append([]string{"SDIFFSTORE", destination}, keys...)
-	cmd := NewIntCmd(args...)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) SInter(keys ...string) *StringSliceCmd {
-	args := append([]string{"SINTER"}, keys...)
-	cmd := NewStringSliceCmd(args...)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) SInterStore(destination string, keys ...string) *IntCmd {
-	args := append([]string{"SINTERSTORE", destination}, keys...)
-	cmd := NewIntCmd(args...)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) SIsMember(key, member string) *BoolCmd {
-	cmd := NewBoolCmd("SISMEMBER", key, member)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) SMembers(key string) *StringSliceCmd {
-	cmd := NewStringSliceCmd("SMEMBERS", key)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) SMove(source, destination, member string) *BoolCmd {
-	cmd := NewBoolCmd("SMOVE", source, destination, member)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) SPop(key string) *StringCmd {
-	cmd := NewStringCmd("SPOP", key)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) SRandMember(key string) *StringCmd {
-	cmd := NewStringCmd("SRANDMEMBER", key)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) SRem(key string, members ...string) *IntCmd {
-	args := append([]string{"SREM", key}, members...)
-	cmd := NewIntCmd(args...)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) SUnion(keys ...string) *StringSliceCmd {
-	args := append([]string{"SUNION"}, keys...)
-	cmd := NewStringSliceCmd(args...)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) SUnionStore(destination string, keys ...string) *IntCmd {
-	args := append([]string{"SUNIONSTORE", destination}, keys...)
-	cmd := NewIntCmd(args...)
-	c.Process(cmd)
-	return cmd
-}
-
-//------------------------------------------------------------------------------
-
-type Z struct {
-	Score  float64
-	Member string
-}
-
-type ZStore struct {
-	Weights   []int64
-	Aggregate string
-}
-
-func (c *Client) ZAdd(key string, members ...Z) *IntCmd {
-	args := []string{"ZADD", key}
-	for _, m := range members {
-		args = append(args, formatFloat(m.Score), m.Member)
-	}
-	cmd := NewIntCmd(args...)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) ZCard(key string) *IntCmd {
-	cmd := NewIntCmd("ZCARD", key)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) ZCount(key, min, max string) *IntCmd {
-	cmd := NewIntCmd("ZCOUNT", key, min, max)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) ZIncrBy(key string, increment float64, member string) *FloatCmd {
-	cmd := NewFloatCmd("ZINCRBY", key, formatFloat(increment), member)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) ZInterStore(
-	destination string,
-	store ZStore,
-	keys ...string,
-) *IntCmd {
-	args := []string{"ZINTERSTORE", destination, strconv.FormatInt(int64(len(keys)), 10)}
-	args = append(args, keys...)
-	if len(store.Weights) > 0 {
-		args = append(args, "WEIGHTS")
-		for _, weight := range store.Weights {
-			args = append(args, strconv.FormatInt(weight, 10))
-		}
-	}
-	if store.Aggregate != "" {
-		args = append(args, "AGGREGATE", store.Aggregate)
-	}
-	cmd := NewIntCmd(args...)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) zRange(key string, start, stop int64, withScores bool) *StringSliceCmd {
-	args := []string{
-		"ZRANGE",
-		key,
-		strconv.FormatInt(start, 10),
-		strconv.FormatInt(stop, 10),
-	}
-	if withScores {
-		args = append(args, "WITHSCORES")
-	}
-	cmd := NewStringSliceCmd(args...)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) ZRange(key string, start, stop int64) *StringSliceCmd {
-	return c.zRange(key, start, stop, false)
-}
-
-func (c *Client) ZRangeWithScores(key string, start, stop int64) *ZSliceCmd {
-	args := []string{
-		"ZRANGE",
-		key,
-		strconv.FormatInt(start, 10),
-		strconv.FormatInt(stop, 10),
-		"WITHSCORES",
-	}
-	cmd := NewZSliceCmd(args...)
-	c.Process(cmd)
-	return cmd
-}
-
-type ZRangeByScore struct {
-	Min, Max string
-
-	Offset, Count int64
-}
-
-func (c *Client) zRangeByScore(key string, opt ZRangeByScore, withScores bool) *StringSliceCmd {
-	args := []string{"ZRANGEBYSCORE", key, opt.Min, opt.Max}
-	if withScores {
-		args = append(args, "WITHSCORES")
-	}
-	if opt.Offset != 0 || opt.Count != 0 {
-		args = append(
-			args,
-			"LIMIT",
-			strconv.FormatInt(opt.Offset, 10),
-			strconv.FormatInt(opt.Count, 10),
-		)
-	}
-	cmd := NewStringSliceCmd(args...)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) ZRangeByScore(key string, opt ZRangeByScore) *StringSliceCmd {
-	return c.zRangeByScore(key, opt, false)
-}
-
-func (c *Client) ZRangeByScoreWithScores(key string, opt ZRangeByScore) *ZSliceCmd {
-	args := []string{"ZRANGEBYSCORE", key, opt.Min, opt.Max, "WITHSCORES"}
-	if opt.Offset != 0 || opt.Count != 0 {
-		args = append(
-			args,
-			"LIMIT",
-			strconv.FormatInt(opt.Offset, 10),
-			strconv.FormatInt(opt.Count, 10),
-		)
-	}
-	cmd := NewZSliceCmd(args...)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) ZRank(key, member string) *IntCmd {
-	cmd := NewIntCmd("ZRANK", key, member)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) ZRem(key string, members ...string) *IntCmd {
-	args := append([]string{"ZREM", key}, members...)
-	cmd := NewIntCmd(args...)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) ZRemRangeByRank(key string, start, stop int64) *IntCmd {
-	cmd := NewIntCmd(
-		"ZREMRANGEBYRANK",
-		key,
-		strconv.FormatInt(start, 10),
-		strconv.FormatInt(stop, 10),
-	)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) ZRemRangeByScore(key, min, max string) *IntCmd {
-	cmd := NewIntCmd("ZREMRANGEBYSCORE", key, min, max)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) zRevRange(key, start, stop string, withScores bool) *StringSliceCmd {
-	args := []string{"ZREVRANGE", key, start, stop}
-	if withScores {
-		args = append(args, "WITHSCORES")
-	}
-	cmd := NewStringSliceCmd(args...)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) ZRevRange(key, start, stop string) *StringSliceCmd {
-	return c.zRevRange(key, start, stop, false)
-}
-
-func (c *Client) ZRevRangeWithScores(key, start, stop string) *ZSliceCmd {
-	args := []string{"ZREVRANGE", key, start, stop, "WITHSCORES"}
-	cmd := NewZSliceCmd(args...)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) zRevRangeByScore(key string, opt ZRangeByScore, withScores bool) *StringSliceCmd {
-	args := []string{"ZREVRANGEBYSCORE", key, opt.Max, opt.Min}
-	if withScores {
-		args = append(args, "WITHSCORES")
-	}
-	if opt.Offset != 0 || opt.Count != 0 {
-		args = append(
-			args,
-			"LIMIT",
-			strconv.FormatInt(opt.Offset, 10),
-			strconv.FormatInt(opt.Count, 10),
-		)
-	}
-	cmd := NewStringSliceCmd(args...)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) ZRevRangeByScore(key string, opt ZRangeByScore) *StringSliceCmd {
-	return c.zRevRangeByScore(key, opt, false)
-}
-
-func (c *Client) ZRevRangeByScoreWithScores(key string, opt ZRangeByScore) *ZSliceCmd {
-	args := []string{"ZREVRANGEBYSCORE", key, opt.Max, opt.Min, "WITHSCORES"}
-	if opt.Offset != 0 || opt.Count != 0 {
-		args = append(
-			args,
-			"LIMIT",
-			strconv.FormatInt(opt.Offset, 10),
-			strconv.FormatInt(opt.Count, 10),
-		)
-	}
-	cmd := NewZSliceCmd(args...)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) ZRevRank(key, member string) *IntCmd {
-	cmd := NewIntCmd("ZREVRANK", key, member)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) ZScore(key, member string) *FloatCmd {
-	cmd := NewFloatCmd("ZSCORE", key, member)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) ZUnionStore(
-	destination string,
-	store ZStore,
-	keys ...string,
-) *IntCmd {
-	args := []string{"ZUNIONSTORE", destination, strconv.FormatInt(int64(len(keys)), 10)}
-	args = append(args, keys...)
-	if len(store.Weights) > 0 {
-		args = append(args, "WEIGHTS")
-		for _, weight := range store.Weights {
-			args = append(args, strconv.FormatInt(weight, 10))
-		}
-	}
-	if store.Aggregate != "" {
-		args = append(args, "AGGREGATE", store.Aggregate)
-	}
-	cmd := NewIntCmd(args...)
-	c.Process(cmd)
-	return cmd
-}
-
-//------------------------------------------------------------------------------
-
-func (c *Client) BgRewriteAOF() *StatusCmd {
-	cmd := NewStatusCmd("BGREWRITEAOF")
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) BgSave() *StatusCmd {
-	cmd := NewStatusCmd("BGSAVE")
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) ClientKill(ipPort string) *StatusCmd {
-	cmd := NewStatusCmd("CLIENT", "KILL", ipPort)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) ClientList() *StringCmd {
-	cmd := NewStringCmd("CLIENT", "LIST")
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) ConfigGet(parameter string) *SliceCmd {
-	cmd := NewSliceCmd("CONFIG", "GET", parameter)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) ConfigResetStat() *StatusCmd {
-	cmd := NewStatusCmd("CONFIG", "RESETSTAT")
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) ConfigSet(parameter, value string) *StatusCmd {
-	cmd := NewStatusCmd("CONFIG", "SET", parameter, value)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) DbSize() *IntCmd {
-	cmd := NewIntCmd("DBSIZE")
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) FlushAll() *StatusCmd {
-	cmd := NewStatusCmd("FLUSHALL")
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) FlushDb() *StatusCmd {
-	cmd := NewStatusCmd("FLUSHDB")
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) Info() *StringCmd {
-	cmd := NewStringCmd("INFO")
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) LastSave() *IntCmd {
-	cmd := NewIntCmd("LASTSAVE")
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) Save() *StatusCmd {
-	cmd := NewStatusCmd("SAVE")
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) shutdown(modifier string) *StatusCmd {
-	var args []string
-	if modifier == "" {
-		args = []string{"SHUTDOWN"}
-	} else {
-		args = []string{"SHUTDOWN", modifier}
-	}
-	cmd := NewStatusCmd(args...)
-	c.Process(cmd)
-	if err := cmd.Err(); err != nil {
-		if err == io.EOF {
-			// Server quit as expected.
-			cmd.err = nil
-		}
-	} else {
-		// Server did not quit. String reply contains the reason.
-		cmd.err = errorf(cmd.val)
-		cmd.val = ""
-	}
-	return cmd
-}
-
-func (c *Client) Shutdown() *StatusCmd {
-	return c.shutdown("")
-}
-
-func (c *Client) ShutdownSave() *StatusCmd {
-	return c.shutdown("SAVE")
-}
-
-func (c *Client) ShutdownNoSave() *StatusCmd {
-	return c.shutdown("NOSAVE")
-}
-
-func (c *Client) SlaveOf(host, port string) *StatusCmd {
-	cmd := NewStatusCmd("SLAVEOF", host, port)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) SlowLog() {
-	panic("not implemented")
-}
-
-func (c *Client) Sync() {
-	panic("not implemented")
-}
-
-func (c *Client) Time() *StringSliceCmd {
-	cmd := NewStringSliceCmd("TIME")
-	c.Process(cmd)
-	return cmd
-}
-
-//------------------------------------------------------------------------------
-
-func (c *Client) Eval(script string, keys []string, args []string) *Cmd {
-	cmdArgs := []string{"EVAL", script, strconv.FormatInt(int64(len(keys)), 10)}
-	cmdArgs = append(cmdArgs, keys...)
-	cmdArgs = append(cmdArgs, args...)
-	cmd := NewCmd(cmdArgs...)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) EvalSha(sha1 string, keys []string, args []string) *Cmd {
-	cmdArgs := []string{"EVALSHA", sha1, strconv.FormatInt(int64(len(keys)), 10)}
-	cmdArgs = append(cmdArgs, keys...)
-	cmdArgs = append(cmdArgs, args...)
-	cmd := NewCmd(cmdArgs...)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) ScriptExists(scripts ...string) *BoolSliceCmd {
-	args := append([]string{"SCRIPT", "EXISTS"}, scripts...)
-	cmd := NewBoolSliceCmd(args...)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) ScriptFlush() *StatusCmd {
-	cmd := NewStatusCmd("SCRIPT", "FLUSH")
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) ScriptKill() *StatusCmd {
-	cmd := NewStatusCmd("SCRIPT", "KILL")
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) ScriptLoad(script string) *StringCmd {
-	cmd := NewStringCmd("SCRIPT", "LOAD", script)
-	c.Process(cmd)
-	return cmd
-}
-
-//------------------------------------------------------------------------------
-
-func (c *Client) DebugObject(key string) *StringCmd {
-	cmd := NewStringCmd("DEBUG", "OBJECT", key)
-	c.Process(cmd)
-	return cmd
-}
-
-//------------------------------------------------------------------------------
-
-func (c *Client) PubSubChannels(pattern string) *StringSliceCmd {
-	args := []string{"PUBSUB", "CHANNELS"}
-	if pattern != "*" {
-		args = append(args, pattern)
-	}
-	cmd := NewStringSliceCmd(args...)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) PubSubNumSub(channels ...string) *SliceCmd {
-	args := []string{"PUBSUB", "NUMSUB"}
-	args = append(args, channels...)
-	cmd := NewSliceCmd(args...)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Client) PubSubNumPat() *IntCmd {
-	cmd := NewIntCmd("PUBSUB", "NUMPAT")
-	c.Process(cmd)
-	return cmd
-}

+ 0 - 23
vendor/gopkg.in/redis.v2/error.go

@@ -1,23 +0,0 @@
-package redis
-
-import (
-	"fmt"
-)
-
-// Redis nil reply.
-var Nil = errorf("redis: nil")
-
-// Redis transaction failed.
-var TxFailedErr = errorf("redis: transaction failed")
-
-type redisError struct {
-	s string
-}
-
-func errorf(s string, args ...interface{}) redisError {
-	return redisError{s: fmt.Sprintf(s, args...)}
-}
-
-func (err redisError) Error() string {
-	return err.s
-}

+ 0 - 138
vendor/gopkg.in/redis.v2/multi.go

@@ -1,138 +0,0 @@
-package redis
-
-import (
-	"errors"
-	"fmt"
-)
-
-var errDiscard = errors.New("redis: Discard can be used only inside Exec")
-
-// Not thread-safe.
-type Multi struct {
-	*Client
-}
-
-func (c *Client) Multi() *Multi {
-	return &Multi{
-		Client: &Client{
-			baseClient: &baseClient{
-				opt:      c.opt,
-				connPool: newSingleConnPool(c.connPool, true),
-			},
-		},
-	}
-}
-
-func (c *Multi) Close() error {
-	if err := c.Unwatch().Err(); err != nil {
-		return err
-	}
-	return c.Client.Close()
-}
-
-func (c *Multi) Watch(keys ...string) *StatusCmd {
-	args := append([]string{"WATCH"}, keys...)
-	cmd := NewStatusCmd(args...)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Multi) Unwatch(keys ...string) *StatusCmd {
-	args := append([]string{"UNWATCH"}, keys...)
-	cmd := NewStatusCmd(args...)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *Multi) Discard() error {
-	if c.cmds == nil {
-		return errDiscard
-	}
-	c.cmds = c.cmds[:1]
-	return nil
-}
-
-// Exec always returns list of commands. If transaction fails
-// TxFailedErr is returned. Otherwise Exec returns error of the first
-// failed command or nil.
-func (c *Multi) Exec(f func() error) ([]Cmder, error) {
-	c.cmds = []Cmder{NewStatusCmd("MULTI")}
-	if err := f(); err != nil {
-		return nil, err
-	}
-	c.cmds = append(c.cmds, NewSliceCmd("EXEC"))
-
-	cmds := c.cmds
-	c.cmds = nil
-
-	if len(cmds) == 2 {
-		return []Cmder{}, nil
-	}
-
-	cn, err := c.conn()
-	if err != nil {
-		setCmdsErr(cmds[1:len(cmds)-1], err)
-		return cmds[1 : len(cmds)-1], err
-	}
-
-	err = c.execCmds(cn, cmds)
-	if err != nil {
-		c.freeConn(cn, err)
-		return cmds[1 : len(cmds)-1], err
-	}
-
-	c.putConn(cn)
-	return cmds[1 : len(cmds)-1], nil
-}
-
-func (c *Multi) execCmds(cn *conn, cmds []Cmder) error {
-	err := c.writeCmd(cn, cmds...)
-	if err != nil {
-		setCmdsErr(cmds[1:len(cmds)-1], err)
-		return err
-	}
-
-	statusCmd := NewStatusCmd()
-
-	// Omit last command (EXEC).
-	cmdsLen := len(cmds) - 1
-
-	// Parse queued replies.
-	for i := 0; i < cmdsLen; i++ {
-		if err := statusCmd.parseReply(cn.rd); err != nil {
-			setCmdsErr(cmds[1:len(cmds)-1], err)
-			return err
-		}
-	}
-
-	// Parse number of replies.
-	line, err := readLine(cn.rd)
-	if err != nil {
-		setCmdsErr(cmds[1:len(cmds)-1], err)
-		return err
-	}
-	if line[0] != '*' {
-		err := fmt.Errorf("redis: expected '*', but got line %q", line)
-		setCmdsErr(cmds[1:len(cmds)-1], err)
-		return err
-	}
-	if len(line) == 3 && line[1] == '-' && line[2] == '1' {
-		setCmdsErr(cmds[1:len(cmds)-1], TxFailedErr)
-		return TxFailedErr
-	}
-
-	var firstCmdErr error
-
-	// Parse replies.
-	// Loop starts from 1 to omit MULTI cmd.
-	for i := 1; i < cmdsLen; i++ {
-		cmd := cmds[i]
-		if err := cmd.parseReply(cn.rd); err != nil {
-			if firstCmdErr == nil {
-				firstCmdErr = err
-			}
-		}
-	}
-
-	return firstCmdErr
-}

+ 0 - 262
vendor/gopkg.in/redis.v2/parser.go

@@ -1,262 +0,0 @@
-package redis
-
-import (
-	"errors"
-	"fmt"
-	"strconv"
-
-	"gopkg.in/bufio.v1"
-)
-
-type multiBulkParser func(rd *bufio.Reader, n int64) (interface{}, error)
-
-var (
-	errReaderTooSmall = errors.New("redis: reader is too small")
-)
-
-//------------------------------------------------------------------------------
-
-func appendArgs(buf []byte, args []string) []byte {
-	buf = append(buf, '*')
-	buf = strconv.AppendUint(buf, uint64(len(args)), 10)
-	buf = append(buf, '\r', '\n')
-	for _, arg := range args {
-		buf = append(buf, '$')
-		buf = strconv.AppendUint(buf, uint64(len(arg)), 10)
-		buf = append(buf, '\r', '\n')
-		buf = append(buf, arg...)
-		buf = append(buf, '\r', '\n')
-	}
-	return buf
-}
-
-//------------------------------------------------------------------------------
-
-func readLine(rd *bufio.Reader) ([]byte, error) {
-	line, isPrefix, err := rd.ReadLine()
-	if err != nil {
-		return line, err
-	}
-	if isPrefix {
-		return line, errReaderTooSmall
-	}
-	return line, nil
-}
-
-func readN(rd *bufio.Reader, n int) ([]byte, error) {
-	b, err := rd.ReadN(n)
-	if err == bufio.ErrBufferFull {
-		tmp := make([]byte, n)
-		r := copy(tmp, b)
-		b = tmp
-
-		for {
-			nn, err := rd.Read(b[r:])
-			r += nn
-			if r >= n {
-				// Ignore error if we read enough.
-				break
-			}
-			if err != nil {
-				return nil, err
-			}
-		}
-	} else if err != nil {
-		return nil, err
-	}
-	return b, nil
-}
-
-//------------------------------------------------------------------------------
-
-func parseReq(rd *bufio.Reader) ([]string, error) {
-	line, err := readLine(rd)
-	if err != nil {
-		return nil, err
-	}
-
-	if line[0] != '*' {
-		return []string{string(line)}, nil
-	}
-	numReplies, err := strconv.ParseInt(string(line[1:]), 10, 64)
-	if err != nil {
-		return nil, err
-	}
-
-	args := make([]string, 0, numReplies)
-	for i := int64(0); i < numReplies; i++ {
-		line, err = readLine(rd)
-		if err != nil {
-			return nil, err
-		}
-		if line[0] != '$' {
-			return nil, fmt.Errorf("redis: expected '$', but got %q", line)
-		}
-
-		argLen, err := strconv.ParseInt(string(line[1:]), 10, 32)
-		if err != nil {
-			return nil, err
-		}
-
-		arg, err := readN(rd, int(argLen)+2)
-		if err != nil {
-			return nil, err
-		}
-		args = append(args, string(arg[:argLen]))
-	}
-	return args, nil
-}
-
-//------------------------------------------------------------------------------
-
-func parseReply(rd *bufio.Reader, p multiBulkParser) (interface{}, error) {
-	line, err := readLine(rd)
-	if err != nil {
-		return nil, err
-	}
-
-	switch line[0] {
-	case '-':
-		return nil, errorf(string(line[1:]))
-	case '+':
-		return string(line[1:]), nil
-	case ':':
-		v, err := strconv.ParseInt(string(line[1:]), 10, 64)
-		if err != nil {
-			return nil, err
-		}
-		return v, nil
-	case '$':
-		if len(line) == 3 && line[1] == '-' && line[2] == '1' {
-			return nil, Nil
-		}
-
-		replyLen, err := strconv.Atoi(string(line[1:]))
-		if err != nil {
-			return nil, err
-		}
-
-		b, err := readN(rd, replyLen+2)
-		if err != nil {
-			return nil, err
-		}
-		return string(b[:replyLen]), nil
-	case '*':
-		if len(line) == 3 && line[1] == '-' && line[2] == '1' {
-			return nil, Nil
-		}
-
-		repliesNum, err := strconv.ParseInt(string(line[1:]), 10, 64)
-		if err != nil {
-			return nil, err
-		}
-
-		return p(rd, repliesNum)
-	}
-	return nil, fmt.Errorf("redis: can't parse %q", line)
-}
-
-func parseSlice(rd *bufio.Reader, n int64) (interface{}, error) {
-	vals := make([]interface{}, 0, n)
-	for i := int64(0); i < n; i++ {
-		v, err := parseReply(rd, parseSlice)
-		if err == Nil {
-			vals = append(vals, nil)
-		} else if err != nil {
-			return nil, err
-		} else {
-			vals = append(vals, v)
-		}
-	}
-	return vals, nil
-}
-
-func parseStringSlice(rd *bufio.Reader, n int64) (interface{}, error) {
-	vals := make([]string, 0, n)
-	for i := int64(0); i < n; i++ {
-		viface, err := parseReply(rd, nil)
-		if err != nil {
-			return nil, err
-		}
-		v, ok := viface.(string)
-		if !ok {
-			return nil, fmt.Errorf("got %T, expected string", viface)
-		}
-		vals = append(vals, v)
-	}
-	return vals, nil
-}
-
-func parseBoolSlice(rd *bufio.Reader, n int64) (interface{}, error) {
-	vals := make([]bool, 0, n)
-	for i := int64(0); i < n; i++ {
-		viface, err := parseReply(rd, nil)
-		if err != nil {
-			return nil, err
-		}
-		v, ok := viface.(int64)
-		if !ok {
-			return nil, fmt.Errorf("got %T, expected int64", viface)
-		}
-		vals = append(vals, v == 1)
-	}
-	return vals, nil
-}
-
-func parseStringStringMap(rd *bufio.Reader, n int64) (interface{}, error) {
-	m := make(map[string]string, n/2)
-	for i := int64(0); i < n; i += 2 {
-		keyiface, err := parseReply(rd, nil)
-		if err != nil {
-			return nil, err
-		}
-		key, ok := keyiface.(string)
-		if !ok {
-			return nil, fmt.Errorf("got %T, expected string", keyiface)
-		}
-
-		valueiface, err := parseReply(rd, nil)
-		if err != nil {
-			return nil, err
-		}
-		value, ok := valueiface.(string)
-		if !ok {
-			return nil, fmt.Errorf("got %T, expected string", valueiface)
-		}
-
-		m[key] = value
-	}
-	return m, nil
-}
-
-func parseZSlice(rd *bufio.Reader, n int64) (interface{}, error) {
-	zz := make([]Z, n/2)
-	for i := int64(0); i < n; i += 2 {
-		z := &zz[i/2]
-
-		memberiface, err := parseReply(rd, nil)
-		if err != nil {
-			return nil, err
-		}
-		member, ok := memberiface.(string)
-		if !ok {
-			return nil, fmt.Errorf("got %T, expected string", memberiface)
-		}
-		z.Member = member
-
-		scoreiface, err := parseReply(rd, nil)
-		if err != nil {
-			return nil, err
-		}
-		scorestr, ok := scoreiface.(string)
-		if !ok {
-			return nil, fmt.Errorf("got %T, expected string", scoreiface)
-		}
-		score, err := strconv.ParseFloat(scorestr, 64)
-		if err != nil {
-			return nil, err
-		}
-		z.Score = score
-	}
-	return zz, nil
-}

+ 0 - 91
vendor/gopkg.in/redis.v2/pipeline.go

@@ -1,91 +0,0 @@
-package redis
-
-// Not thread-safe.
-type Pipeline struct {
-	*Client
-
-	closed bool
-}
-
-func (c *Client) Pipeline() *Pipeline {
-	return &Pipeline{
-		Client: &Client{
-			baseClient: &baseClient{
-				opt:      c.opt,
-				connPool: c.connPool,
-
-				cmds: make([]Cmder, 0),
-			},
-		},
-	}
-}
-
-func (c *Client) Pipelined(f func(*Pipeline) error) ([]Cmder, error) {
-	pc := c.Pipeline()
-	if err := f(pc); err != nil {
-		return nil, err
-	}
-	cmds, err := pc.Exec()
-	pc.Close()
-	return cmds, err
-}
-
-func (c *Pipeline) Close() error {
-	c.closed = true
-	return nil
-}
-
-func (c *Pipeline) Discard() error {
-	if c.closed {
-		return errClosed
-	}
-	c.cmds = c.cmds[:0]
-	return nil
-}
-
-// Exec always returns list of commands and error of the first failed
-// command if any.
-func (c *Pipeline) Exec() ([]Cmder, error) {
-	if c.closed {
-		return nil, errClosed
-	}
-
-	cmds := c.cmds
-	c.cmds = make([]Cmder, 0)
-
-	if len(cmds) == 0 {
-		return []Cmder{}, nil
-	}
-
-	cn, err := c.conn()
-	if err != nil {
-		setCmdsErr(cmds, err)
-		return cmds, err
-	}
-
-	if err := c.execCmds(cn, cmds); err != nil {
-		c.freeConn(cn, err)
-		return cmds, err
-	}
-
-	c.putConn(cn)
-	return cmds, nil
-}
-
-func (c *Pipeline) execCmds(cn *conn, cmds []Cmder) error {
-	if err := c.writeCmd(cn, cmds...); err != nil {
-		setCmdsErr(cmds, err)
-		return err
-	}
-
-	var firstCmdErr error
-	for _, cmd := range cmds {
-		if err := cmd.parseReply(cn.rd); err != nil {
-			if firstCmdErr == nil {
-				firstCmdErr = err
-			}
-		}
-	}
-
-	return firstCmdErr
-}

+ 0 - 405
vendor/gopkg.in/redis.v2/pool.go

@@ -1,405 +0,0 @@
-package redis
-
-import (
-	"container/list"
-	"errors"
-	"log"
-	"net"
-	"sync"
-	"time"
-
-	"gopkg.in/bufio.v1"
-)
-
-var (
-	errClosed      = errors.New("redis: client is closed")
-	errRateLimited = errors.New("redis: you open connections too fast")
-)
-
-var (
-	zeroTime = time.Time{}
-)
-
-type pool interface {
-	Get() (*conn, bool, error)
-	Put(*conn) error
-	Remove(*conn) error
-	Len() int
-	Size() int
-	Close() error
-	Filter(func(*conn) bool)
-}
-
-//------------------------------------------------------------------------------
-
-type conn struct {
-	netcn net.Conn
-	rd    *bufio.Reader
-	buf   []byte
-
-	inUse  bool
-	usedAt time.Time
-
-	readTimeout  time.Duration
-	writeTimeout time.Duration
-
-	elem *list.Element
-}
-
-func newConnFunc(dial func() (net.Conn, error)) func() (*conn, error) {
-	return func() (*conn, error) {
-		netcn, err := dial()
-		if err != nil {
-			return nil, err
-		}
-		cn := &conn{
-			netcn: netcn,
-			buf:   make([]byte, 0, 64),
-		}
-		cn.rd = bufio.NewReader(cn)
-		return cn, nil
-	}
-}
-
-func (cn *conn) Read(b []byte) (int, error) {
-	if cn.readTimeout != 0 {
-		cn.netcn.SetReadDeadline(time.Now().Add(cn.readTimeout))
-	} else {
-		cn.netcn.SetReadDeadline(zeroTime)
-	}
-	return cn.netcn.Read(b)
-}
-
-func (cn *conn) Write(b []byte) (int, error) {
-	if cn.writeTimeout != 0 {
-		cn.netcn.SetWriteDeadline(time.Now().Add(cn.writeTimeout))
-	} else {
-		cn.netcn.SetWriteDeadline(zeroTime)
-	}
-	return cn.netcn.Write(b)
-}
-
-func (cn *conn) RemoteAddr() net.Addr {
-	return cn.netcn.RemoteAddr()
-}
-
-func (cn *conn) Close() error {
-	return cn.netcn.Close()
-}
-
-//------------------------------------------------------------------------------
-
-type connPool struct {
-	dial func() (*conn, error)
-	rl   *rateLimiter
-
-	opt *options
-
-	cond  *sync.Cond
-	conns *list.List
-
-	idleNum int
-	closed  bool
-}
-
-func newConnPool(dial func() (*conn, error), opt *options) *connPool {
-	return &connPool{
-		dial: dial,
-		rl:   newRateLimiter(time.Second, 2*opt.PoolSize),
-
-		opt: opt,
-
-		cond:  sync.NewCond(&sync.Mutex{}),
-		conns: list.New(),
-	}
-}
-
-func (p *connPool) new() (*conn, error) {
-	if !p.rl.Check() {
-		return nil, errRateLimited
-	}
-	return p.dial()
-}
-
-func (p *connPool) Get() (*conn, bool, error) {
-	p.cond.L.Lock()
-
-	if p.closed {
-		p.cond.L.Unlock()
-		return nil, false, errClosed
-	}
-
-	if p.opt.IdleTimeout > 0 {
-		for el := p.conns.Front(); el != nil; el = el.Next() {
-			cn := el.Value.(*conn)
-			if cn.inUse {
-				break
-			}
-			if time.Since(cn.usedAt) > p.opt.IdleTimeout {
-				if err := p.remove(cn); err != nil {
-					log.Printf("remove failed: %s", err)
-				}
-			}
-		}
-	}
-
-	for p.conns.Len() >= p.opt.PoolSize && p.idleNum == 0 {
-		p.cond.Wait()
-	}
-
-	if p.idleNum > 0 {
-		elem := p.conns.Front()
-		cn := elem.Value.(*conn)
-		if cn.inUse {
-			panic("pool: precondition failed")
-		}
-		cn.inUse = true
-		p.conns.MoveToBack(elem)
-		p.idleNum--
-
-		p.cond.L.Unlock()
-		return cn, false, nil
-	}
-
-	if p.conns.Len() < p.opt.PoolSize {
-		cn, err := p.new()
-		if err != nil {
-			p.cond.L.Unlock()
-			return nil, false, err
-		}
-
-		cn.inUse = true
-		cn.elem = p.conns.PushBack(cn)
-
-		p.cond.L.Unlock()
-		return cn, true, nil
-	}
-
-	panic("not reached")
-}
-
-func (p *connPool) Put(cn *conn) error {
-	if cn.rd.Buffered() != 0 {
-		b, _ := cn.rd.ReadN(cn.rd.Buffered())
-		log.Printf("redis: connection has unread data: %q", b)
-		return p.Remove(cn)
-	}
-
-	if p.opt.IdleTimeout > 0 {
-		cn.usedAt = time.Now()
-	}
-
-	p.cond.L.Lock()
-	if p.closed {
-		p.cond.L.Unlock()
-		return errClosed
-	}
-	cn.inUse = false
-	p.conns.MoveToFront(cn.elem)
-	p.idleNum++
-	p.cond.Signal()
-	p.cond.L.Unlock()
-
-	return nil
-}
-
-func (p *connPool) Remove(cn *conn) error {
-	p.cond.L.Lock()
-	if p.closed {
-		// Noop, connection is already closed.
-		p.cond.L.Unlock()
-		return nil
-	}
-	err := p.remove(cn)
-	p.cond.Signal()
-	p.cond.L.Unlock()
-	return err
-}
-
-func (p *connPool) remove(cn *conn) error {
-	p.conns.Remove(cn.elem)
-	cn.elem = nil
-	if !cn.inUse {
-		p.idleNum--
-	}
-	return cn.Close()
-}
-
-// Len returns number of idle connections.
-func (p *connPool) Len() int {
-	defer p.cond.L.Unlock()
-	p.cond.L.Lock()
-	return p.idleNum
-}
-
-// Size returns number of connections in the pool.
-func (p *connPool) Size() int {
-	defer p.cond.L.Unlock()
-	p.cond.L.Lock()
-	return p.conns.Len()
-}
-
-func (p *connPool) Filter(f func(*conn) bool) {
-	p.cond.L.Lock()
-	for el, next := p.conns.Front(), p.conns.Front(); el != nil; el = next {
-		next = el.Next()
-		cn := el.Value.(*conn)
-		if !f(cn) {
-			p.remove(cn)
-		}
-	}
-	p.cond.L.Unlock()
-}
-
-func (p *connPool) Close() error {
-	defer p.cond.L.Unlock()
-	p.cond.L.Lock()
-	if p.closed {
-		return nil
-	}
-	p.closed = true
-	p.rl.Close()
-	var retErr error
-	for {
-		e := p.conns.Front()
-		if e == nil {
-			break
-		}
-		if err := p.remove(e.Value.(*conn)); err != nil {
-			log.Printf("cn.Close failed: %s", err)
-			retErr = err
-		}
-	}
-	return retErr
-}
-
-//------------------------------------------------------------------------------
-
-type singleConnPool struct {
-	pool pool
-
-	cnMtx sync.Mutex
-	cn    *conn
-
-	reusable bool
-
-	closed bool
-}
-
-func newSingleConnPool(pool pool, reusable bool) *singleConnPool {
-	return &singleConnPool{
-		pool:     pool,
-		reusable: reusable,
-	}
-}
-
-func (p *singleConnPool) SetConn(cn *conn) {
-	p.cnMtx.Lock()
-	p.cn = cn
-	p.cnMtx.Unlock()
-}
-
-func (p *singleConnPool) Get() (*conn, bool, error) {
-	defer p.cnMtx.Unlock()
-	p.cnMtx.Lock()
-
-	if p.closed {
-		return nil, false, errClosed
-	}
-	if p.cn != nil {
-		return p.cn, false, nil
-	}
-
-	cn, isNew, err := p.pool.Get()
-	if err != nil {
-		return nil, false, err
-	}
-	p.cn = cn
-
-	return p.cn, isNew, nil
-}
-
-func (p *singleConnPool) Put(cn *conn) error {
-	defer p.cnMtx.Unlock()
-	p.cnMtx.Lock()
-	if p.cn != cn {
-		panic("p.cn != cn")
-	}
-	if p.closed {
-		return errClosed
-	}
-	return nil
-}
-
-func (p *singleConnPool) put() error {
-	err := p.pool.Put(p.cn)
-	p.cn = nil
-	return err
-}
-
-func (p *singleConnPool) Remove(cn *conn) error {
-	defer p.cnMtx.Unlock()
-	p.cnMtx.Lock()
-	if p.cn == nil {
-		panic("p.cn == nil")
-	}
-	if p.cn != cn {
-		panic("p.cn != cn")
-	}
-	if p.closed {
-		return errClosed
-	}
-	return p.remove()
-}
-
-func (p *singleConnPool) remove() error {
-	err := p.pool.Remove(p.cn)
-	p.cn = nil
-	return err
-}
-
-func (p *singleConnPool) Len() int {
-	defer p.cnMtx.Unlock()
-	p.cnMtx.Lock()
-	if p.cn == nil {
-		return 0
-	}
-	return 1
-}
-
-func (p *singleConnPool) Size() int {
-	defer p.cnMtx.Unlock()
-	p.cnMtx.Lock()
-	if p.cn == nil {
-		return 0
-	}
-	return 1
-}
-
-func (p *singleConnPool) Filter(f func(*conn) bool) {
-	p.cnMtx.Lock()
-	if p.cn != nil {
-		if !f(p.cn) {
-			p.remove()
-		}
-	}
-	p.cnMtx.Unlock()
-}
-
-func (p *singleConnPool) Close() error {
-	defer p.cnMtx.Unlock()
-	p.cnMtx.Lock()
-	if p.closed {
-		return nil
-	}
-	p.closed = true
-	var err error
-	if p.cn != nil {
-		if p.reusable {
-			err = p.put()
-		} else {
-			err = p.remove()
-		}
-	}
-	return err
-}

+ 0 - 134
vendor/gopkg.in/redis.v2/pubsub.go

@@ -1,134 +0,0 @@
-package redis
-
-import (
-	"fmt"
-	"time"
-)
-
-// Not thread-safe.
-type PubSub struct {
-	*baseClient
-}
-
-func (c *Client) PubSub() *PubSub {
-	return &PubSub{
-		baseClient: &baseClient{
-			opt:      c.opt,
-			connPool: newSingleConnPool(c.connPool, false),
-		},
-	}
-}
-
-func (c *Client) Publish(channel, message string) *IntCmd {
-	req := NewIntCmd("PUBLISH", channel, message)
-	c.Process(req)
-	return req
-}
-
-type Message struct {
-	Channel string
-	Payload string
-}
-
-func (m *Message) String() string {
-	return fmt.Sprintf("Message<%s: %s>", m.Channel, m.Payload)
-}
-
-type PMessage struct {
-	Channel string
-	Pattern string
-	Payload string
-}
-
-func (m *PMessage) String() string {
-	return fmt.Sprintf("PMessage<%s: %s>", m.Channel, m.Payload)
-}
-
-type Subscription struct {
-	Kind    string
-	Channel string
-	Count   int
-}
-
-func (m *Subscription) String() string {
-	return fmt.Sprintf("%s: %s", m.Kind, m.Channel)
-}
-
-func (c *PubSub) Receive() (interface{}, error) {
-	return c.ReceiveTimeout(0)
-}
-
-func (c *PubSub) ReceiveTimeout(timeout time.Duration) (interface{}, error) {
-	cn, err := c.conn()
-	if err != nil {
-		return nil, err
-	}
-	cn.readTimeout = timeout
-
-	cmd := NewSliceCmd()
-	if err := cmd.parseReply(cn.rd); err != nil {
-		return nil, err
-	}
-
-	reply := cmd.Val()
-
-	msgName := reply[0].(string)
-	switch msgName {
-	case "subscribe", "unsubscribe", "psubscribe", "punsubscribe":
-		return &Subscription{
-			Kind:    msgName,
-			Channel: reply[1].(string),
-			Count:   int(reply[2].(int64)),
-		}, nil
-	case "message":
-		return &Message{
-			Channel: reply[1].(string),
-			Payload: reply[2].(string),
-		}, nil
-	case "pmessage":
-		return &PMessage{
-			Pattern: reply[1].(string),
-			Channel: reply[2].(string),
-			Payload: reply[3].(string),
-		}, nil
-	}
-	return nil, fmt.Errorf("redis: unsupported message name: %q", msgName)
-}
-
-func (c *PubSub) subscribe(cmd string, channels ...string) error {
-	cn, err := c.conn()
-	if err != nil {
-		return err
-	}
-
-	args := append([]string{cmd}, channels...)
-	req := NewSliceCmd(args...)
-	return c.writeCmd(cn, req)
-}
-
-func (c *PubSub) Subscribe(channels ...string) error {
-	return c.subscribe("SUBSCRIBE", channels...)
-}
-
-func (c *PubSub) PSubscribe(patterns ...string) error {
-	return c.subscribe("PSUBSCRIBE", patterns...)
-}
-
-func (c *PubSub) unsubscribe(cmd string, channels ...string) error {
-	cn, err := c.conn()
-	if err != nil {
-		return err
-	}
-
-	args := append([]string{cmd}, channels...)
-	req := NewSliceCmd(args...)
-	return c.writeCmd(cn, req)
-}
-
-func (c *PubSub) Unsubscribe(channels ...string) error {
-	return c.unsubscribe("UNSUBSCRIBE", channels...)
-}
-
-func (c *PubSub) PUnsubscribe(patterns ...string) error {
-	return c.unsubscribe("PUNSUBSCRIBE", patterns...)
-}

+ 0 - 53
vendor/gopkg.in/redis.v2/rate_limit.go

@@ -1,53 +0,0 @@
-package redis
-
-import (
-	"sync/atomic"
-	"time"
-)
-
-type rateLimiter struct {
-	v int64
-
-	_closed int64
-}
-
-func newRateLimiter(limit time.Duration, bucketSize int) *rateLimiter {
-	rl := &rateLimiter{
-		v: int64(bucketSize),
-	}
-	go rl.loop(limit, int64(bucketSize))
-	return rl
-}
-
-func (rl *rateLimiter) loop(limit time.Duration, bucketSize int64) {
-	for {
-		if rl.closed() {
-			break
-		}
-		if v := atomic.LoadInt64(&rl.v); v < bucketSize {
-			atomic.AddInt64(&rl.v, 1)
-		}
-		time.Sleep(limit)
-	}
-}
-
-func (rl *rateLimiter) Check() bool {
-	for {
-		if v := atomic.LoadInt64(&rl.v); v > 0 {
-			if atomic.CompareAndSwapInt64(&rl.v, v, v-1) {
-				return true
-			}
-		} else {
-			return false
-		}
-	}
-}
-
-func (rl *rateLimiter) Close() error {
-	atomic.StoreInt64(&rl._closed, 1)
-	return nil
-}
-
-func (rl *rateLimiter) closed() bool {
-	return atomic.LoadInt64(&rl._closed) == 1
-}

+ 0 - 231
vendor/gopkg.in/redis.v2/redis.go

@@ -1,231 +0,0 @@
-package redis
-
-import (
-	"log"
-	"net"
-	"time"
-)
-
-type baseClient struct {
-	connPool pool
-	opt      *options
-	cmds     []Cmder
-}
-
-func (c *baseClient) writeCmd(cn *conn, cmds ...Cmder) error {
-	buf := cn.buf[:0]
-	for _, cmd := range cmds {
-		buf = appendArgs(buf, cmd.args())
-	}
-
-	_, err := cn.Write(buf)
-	return err
-}
-
-func (c *baseClient) conn() (*conn, error) {
-	cn, isNew, err := c.connPool.Get()
-	if err != nil {
-		return nil, err
-	}
-
-	if isNew {
-		if err := c.initConn(cn); err != nil {
-			c.removeConn(cn)
-			return nil, err
-		}
-	}
-
-	return cn, nil
-}
-
-func (c *baseClient) initConn(cn *conn) error {
-	if c.opt.Password == "" && c.opt.DB == 0 {
-		return nil
-	}
-
-	pool := newSingleConnPool(c.connPool, false)
-	pool.SetConn(cn)
-
-	// Client is not closed because we want to reuse underlying connection.
-	client := &Client{
-		baseClient: &baseClient{
-			opt:      c.opt,
-			connPool: pool,
-		},
-	}
-
-	if c.opt.Password != "" {
-		if err := client.Auth(c.opt.Password).Err(); err != nil {
-			return err
-		}
-	}
-
-	if c.opt.DB > 0 {
-		if err := client.Select(c.opt.DB).Err(); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (c *baseClient) freeConn(cn *conn, ei error) error {
-	if cn.rd.Buffered() > 0 {
-		return c.connPool.Remove(cn)
-	}
-	if _, ok := ei.(redisError); ok {
-		return c.connPool.Put(cn)
-	}
-	return c.connPool.Remove(cn)
-}
-
-func (c *baseClient) removeConn(cn *conn) {
-	if err := c.connPool.Remove(cn); err != nil {
-		log.Printf("pool.Remove failed: %s", err)
-	}
-}
-
-func (c *baseClient) putConn(cn *conn) {
-	if err := c.connPool.Put(cn); err != nil {
-		log.Printf("pool.Put failed: %s", err)
-	}
-}
-
-func (c *baseClient) Process(cmd Cmder) {
-	if c.cmds == nil {
-		c.run(cmd)
-	} else {
-		c.cmds = append(c.cmds, cmd)
-	}
-}
-
-func (c *baseClient) run(cmd Cmder) {
-	cn, err := c.conn()
-	if err != nil {
-		cmd.setErr(err)
-		return
-	}
-
-	if timeout := cmd.writeTimeout(); timeout != nil {
-		cn.writeTimeout = *timeout
-	} else {
-		cn.writeTimeout = c.opt.WriteTimeout
-	}
-
-	if timeout := cmd.readTimeout(); timeout != nil {
-		cn.readTimeout = *timeout
-	} else {
-		cn.readTimeout = c.opt.ReadTimeout
-	}
-
-	if err := c.writeCmd(cn, cmd); err != nil {
-		c.freeConn(cn, err)
-		cmd.setErr(err)
-		return
-	}
-
-	if err := cmd.parseReply(cn.rd); err != nil {
-		c.freeConn(cn, err)
-		return
-	}
-
-	c.putConn(cn)
-}
-
-// Close closes the client, releasing any open resources.
-func (c *baseClient) Close() error {
-	return c.connPool.Close()
-}
-
-//------------------------------------------------------------------------------
-
-type options struct {
-	Password string
-	DB       int64
-
-	DialTimeout  time.Duration
-	ReadTimeout  time.Duration
-	WriteTimeout time.Duration
-
-	PoolSize    int
-	IdleTimeout time.Duration
-}
-
-type Options struct {
-	Network string
-	Addr    string
-
-	// Dialer creates new network connection and has priority over
-	// Network and Addr options.
-	Dialer func() (net.Conn, error)
-
-	Password string
-	DB       int64
-
-	DialTimeout  time.Duration
-	ReadTimeout  time.Duration
-	WriteTimeout time.Duration
-
-	PoolSize    int
-	IdleTimeout time.Duration
-}
-
-func (opt *Options) getPoolSize() int {
-	if opt.PoolSize == 0 {
-		return 10
-	}
-	return opt.PoolSize
-}
-
-func (opt *Options) getDialTimeout() time.Duration {
-	if opt.DialTimeout == 0 {
-		return 5 * time.Second
-	}
-	return opt.DialTimeout
-}
-
-func (opt *Options) options() *options {
-	return &options{
-		DB:       opt.DB,
-		Password: opt.Password,
-
-		DialTimeout:  opt.getDialTimeout(),
-		ReadTimeout:  opt.ReadTimeout,
-		WriteTimeout: opt.WriteTimeout,
-
-		PoolSize:    opt.getPoolSize(),
-		IdleTimeout: opt.IdleTimeout,
-	}
-}
-
-type Client struct {
-	*baseClient
-}
-
-func NewClient(clOpt *Options) *Client {
-	opt := clOpt.options()
-	dialer := clOpt.Dialer
-	if dialer == nil {
-		dialer = func() (net.Conn, error) {
-			return net.DialTimeout(clOpt.Network, clOpt.Addr, opt.DialTimeout)
-		}
-	}
-	return &Client{
-		baseClient: &baseClient{
-			opt:      opt,
-			connPool: newConnPool(newConnFunc(dialer), opt),
-		},
-	}
-}
-
-// Deprecated. Use NewClient instead.
-func NewTCPClient(opt *Options) *Client {
-	opt.Network = "tcp"
-	return NewClient(opt)
-}
-
-// Deprecated. Use NewClient instead.
-func NewUnixClient(opt *Options) *Client {
-	opt.Network = "unix"
-	return NewClient(opt)
-}

+ 0 - 291
vendor/gopkg.in/redis.v2/sentinel.go

@@ -1,291 +0,0 @@
-package redis
-
-import (
-	"errors"
-	"log"
-	"net"
-	"strings"
-	"sync"
-	"time"
-)
-
-//------------------------------------------------------------------------------
-
-type FailoverOptions struct {
-	MasterName    string
-	SentinelAddrs []string
-
-	Password string
-	DB       int64
-
-	PoolSize int
-
-	DialTimeout  time.Duration
-	ReadTimeout  time.Duration
-	WriteTimeout time.Duration
-	IdleTimeout  time.Duration
-}
-
-func (opt *FailoverOptions) getPoolSize() int {
-	if opt.PoolSize == 0 {
-		return 10
-	}
-	return opt.PoolSize
-}
-
-func (opt *FailoverOptions) getDialTimeout() time.Duration {
-	if opt.DialTimeout == 0 {
-		return 5 * time.Second
-	}
-	return opt.DialTimeout
-}
-
-func (opt *FailoverOptions) options() *options {
-	return &options{
-		DB:       opt.DB,
-		Password: opt.Password,
-
-		DialTimeout:  opt.getDialTimeout(),
-		ReadTimeout:  opt.ReadTimeout,
-		WriteTimeout: opt.WriteTimeout,
-
-		PoolSize:    opt.getPoolSize(),
-		IdleTimeout: opt.IdleTimeout,
-	}
-}
-
-func NewFailoverClient(failoverOpt *FailoverOptions) *Client {
-	opt := failoverOpt.options()
-	failover := &sentinelFailover{
-		masterName:    failoverOpt.MasterName,
-		sentinelAddrs: failoverOpt.SentinelAddrs,
-
-		opt: opt,
-	}
-	return &Client{
-		baseClient: &baseClient{
-			opt:      opt,
-			connPool: failover.Pool(),
-		},
-	}
-}
-
-//------------------------------------------------------------------------------
-
-type sentinelClient struct {
-	*baseClient
-}
-
-func newSentinel(clOpt *Options) *sentinelClient {
-	opt := clOpt.options()
-	opt.Password = ""
-	opt.DB = 0
-	dialer := func() (net.Conn, error) {
-		return net.DialTimeout("tcp", clOpt.Addr, opt.DialTimeout)
-	}
-	return &sentinelClient{
-		baseClient: &baseClient{
-			opt:      opt,
-			connPool: newConnPool(newConnFunc(dialer), opt),
-		},
-	}
-}
-
-func (c *sentinelClient) PubSub() *PubSub {
-	return &PubSub{
-		baseClient: &baseClient{
-			opt:      c.opt,
-			connPool: newSingleConnPool(c.connPool, false),
-		},
-	}
-}
-
-func (c *sentinelClient) GetMasterAddrByName(name string) *StringSliceCmd {
-	cmd := NewStringSliceCmd("SENTINEL", "get-master-addr-by-name", name)
-	c.Process(cmd)
-	return cmd
-}
-
-func (c *sentinelClient) Sentinels(name string) *SliceCmd {
-	cmd := NewSliceCmd("SENTINEL", "sentinels", name)
-	c.Process(cmd)
-	return cmd
-}
-
-type sentinelFailover struct {
-	masterName    string
-	sentinelAddrs []string
-
-	opt *options
-
-	pool     pool
-	poolOnce sync.Once
-
-	lock      sync.RWMutex
-	_sentinel *sentinelClient
-}
-
-func (d *sentinelFailover) dial() (net.Conn, error) {
-	addr, err := d.MasterAddr()
-	if err != nil {
-		return nil, err
-	}
-	return net.DialTimeout("tcp", addr, d.opt.DialTimeout)
-}
-
-func (d *sentinelFailover) Pool() pool {
-	d.poolOnce.Do(func() {
-		d.pool = newConnPool(newConnFunc(d.dial), d.opt)
-	})
-	return d.pool
-}
-
-func (d *sentinelFailover) MasterAddr() (string, error) {
-	defer d.lock.Unlock()
-	d.lock.Lock()
-
-	// Try last working sentinel.
-	if d._sentinel != nil {
-		addr, err := d._sentinel.GetMasterAddrByName(d.masterName).Result()
-		if err != nil {
-			log.Printf("redis-sentinel: GetMasterAddrByName %q failed: %s", d.masterName, err)
-			d.resetSentinel()
-		} else {
-			addr := net.JoinHostPort(addr[0], addr[1])
-			log.Printf("redis-sentinel: %q addr is %s", d.masterName, addr)
-			return addr, nil
-		}
-	}
-
-	for i, sentinelAddr := range d.sentinelAddrs {
-		sentinel := newSentinel(&Options{
-			Addr: sentinelAddr,
-
-			DB:       d.opt.DB,
-			Password: d.opt.Password,
-
-			DialTimeout:  d.opt.DialTimeout,
-			ReadTimeout:  d.opt.ReadTimeout,
-			WriteTimeout: d.opt.WriteTimeout,
-
-			PoolSize:    d.opt.PoolSize,
-			IdleTimeout: d.opt.IdleTimeout,
-		})
-		masterAddr, err := sentinel.GetMasterAddrByName(d.masterName).Result()
-		if err != nil {
-			log.Printf("redis-sentinel: GetMasterAddrByName %q failed: %s", d.masterName, err)
-			sentinel.Close()
-			continue
-		}
-
-		// Push working sentinel to the top.
-		d.sentinelAddrs[0], d.sentinelAddrs[i] = d.sentinelAddrs[i], d.sentinelAddrs[0]
-
-		d.setSentinel(sentinel)
-		addr := net.JoinHostPort(masterAddr[0], masterAddr[1])
-		log.Printf("redis-sentinel: %q addr is %s", d.masterName, addr)
-		return addr, nil
-	}
-
-	return "", errors.New("redis: all sentinels are unreachable")
-}
-
-func (d *sentinelFailover) setSentinel(sentinel *sentinelClient) {
-	d.discoverSentinels(sentinel)
-	d._sentinel = sentinel
-	go d.listen()
-}
-
-func (d *sentinelFailover) discoverSentinels(sentinel *sentinelClient) {
-	sentinels, err := sentinel.Sentinels(d.masterName).Result()
-	if err != nil {
-		log.Printf("redis-sentinel: Sentinels %q failed: %s", d.masterName, err)
-		return
-	}
-	for _, sentinel := range sentinels {
-		vals := sentinel.([]interface{})
-		for i := 0; i < len(vals); i += 2 {
-			key := vals[i].(string)
-			if key == "name" {
-				sentinelAddr := vals[i+1].(string)
-				if !contains(d.sentinelAddrs, sentinelAddr) {
-					log.Printf(
-						"redis-sentinel: discovered new %q sentinel: %s",
-						d.masterName, sentinelAddr,
-					)
-					d.sentinelAddrs = append(d.sentinelAddrs, sentinelAddr)
-				}
-			}
-		}
-	}
-}
-
-func (d *sentinelFailover) listen() {
-	var pubsub *PubSub
-	for {
-		if pubsub == nil {
-			pubsub = d._sentinel.PubSub()
-			if err := pubsub.Subscribe("+switch-master"); err != nil {
-				log.Printf("redis-sentinel: Subscribe failed: %s", err)
-				d.lock.Lock()
-				d.resetSentinel()
-				d.lock.Unlock()
-				return
-			}
-		}
-
-		msgIface, err := pubsub.Receive()
-		if err != nil {
-			log.Printf("redis-sentinel: Receive failed: %s", err)
-			pubsub.Close()
-			return
-		}
-
-		switch msg := msgIface.(type) {
-		case *Message:
-			switch msg.Channel {
-			case "+switch-master":
-				parts := strings.Split(msg.Payload, " ")
-				if parts[0] != d.masterName {
-					log.Printf("redis-sentinel: ignore new %s addr", parts[0])
-					continue
-				}
-				addr := net.JoinHostPort(parts[3], parts[4])
-				log.Printf(
-					"redis-sentinel: new %q addr is %s",
-					d.masterName, addr,
-				)
-				d.pool.Filter(func(cn *conn) bool {
-					if cn.RemoteAddr().String() != addr {
-						log.Printf(
-							"redis-sentinel: closing connection to old master %s",
-							cn.RemoteAddr(),
-						)
-						return false
-					}
-					return true
-				})
-			default:
-				log.Printf("redis-sentinel: unsupported message: %s", msg)
-			}
-		case *Subscription:
-			// Ignore.
-		default:
-			log.Printf("redis-sentinel: unsupported message: %s", msgIface)
-		}
-	}
-}
-
-func (d *sentinelFailover) resetSentinel() {
-	d._sentinel.Close()
-	d._sentinel = nil
-}
-
-func contains(slice []string, str string) bool {
-	for _, s := range slice {
-		if s == str {
-			return true
-		}
-	}
-	return false
-}

+ 2 - 0
vendor/gopkg.in/redis.v5/.gitignore

@@ -0,0 +1,2 @@
+*.rdb
+testdata/*/

+ 2 - 4
vendor/gopkg.in/bufio.v1/LICENSE → vendor/gopkg.in/redis.v5/LICENSE

@@ -1,4 +1,5 @@
-Copyright (c) 2013 The bufio Authors. All rights reserved.
+Copyright (c) 2013 The github.com/go-redis/redis Authors.
+All rights reserved.
 
 Redistribution and use in source and binary forms, with or without
 modification, are permitted provided that the following conditions are
@@ -10,9 +11,6 @@ notice, this list of conditions and the following disclaimer.
 copyright notice, this list of conditions and the following disclaimer
 in the documentation and/or other materials provided with the
 distribution.
-   * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
 
 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT

+ 19 - 0
vendor/gopkg.in/redis.v5/Makefile

@@ -0,0 +1,19 @@
+all: testdeps
+	go test ./...
+	go test ./... -short -race
+	go vet
+
+testdeps: testdata/redis/src/redis-server
+
+bench: testdeps
+	go test ./... -test.run=NONE -test.bench=. -test.benchmem
+
+.PHONY: all test testdeps bench
+
+testdata/redis:
+	mkdir -p $@
+	wget -qO- https://github.com/antirez/redis/archive/unstable.tar.gz | tar xvz --strip-components=1 -C $@
+
+testdata/redis/src/redis-server: testdata/redis
+	sed -i 's/libjemalloc.a/libjemalloc.a -lrt/g' $</src/Makefile
+	cd $< && make all

+ 136 - 0
vendor/gopkg.in/redis.v5/README.md

@@ -0,0 +1,136 @@
+# Redis client for Golang [![Build Status](https://travis-ci.org/go-redis/redis.png?branch=v5)](https://travis-ci.org/go-redis/redis)
+
+Supports:
+
+- Redis 3 commands except QUIT, MONITOR, SLOWLOG and SYNC.
+- [Pub/Sub](https://godoc.org/gopkg.in/redis.v5#PubSub).
+- [Transactions](https://godoc.org/gopkg.in/redis.v5#Multi).
+- [Pipeline](https://godoc.org/gopkg.in/redis.v5#example-Client-Pipeline) and [TxPipeline](https://godoc.org/gopkg.in/redis.v5#example-Client-TxPipeline).
+- [Scripting](https://godoc.org/gopkg.in/redis.v5#Script).
+- [Timeouts](https://godoc.org/gopkg.in/redis.v5#Options).
+- [Redis Sentinel](https://godoc.org/gopkg.in/redis.v5#NewFailoverClient).
+- [Redis Cluster](https://godoc.org/gopkg.in/redis.v5#NewClusterClient).
+- [Ring](https://godoc.org/gopkg.in/redis.v5#NewRing).
+- [Instrumentation](https://godoc.org/gopkg.in/redis.v5#ex-package--Instrumentation).
+- [Cache friendly](https://github.com/go-redis/cache).
+- [Rate limiting](https://github.com/go-redis/rate).
+- [Distributed Locks](https://github.com/bsm/redis-lock).
+
+API docs: https://godoc.org/gopkg.in/redis.v5.
+Examples: https://godoc.org/gopkg.in/redis.v5#pkg-examples.
+
+## Installation
+
+Install:
+
+```shell
+go get gopkg.in/redis.v5
+```
+
+Import:
+
+```go
+import "gopkg.in/redis.v5"
+```
+
+## Quickstart
+
+```go
+func ExampleNewClient() {
+	client := redis.NewClient(&redis.Options{
+		Addr:     "localhost:6379",
+		Password: "", // no password set
+		DB:       0,  // use default DB
+	})
+
+	pong, err := client.Ping().Result()
+	fmt.Println(pong, err)
+	// Output: PONG <nil>
+}
+
+func ExampleClient() {
+	err := client.Set("key", "value", 0).Err()
+	if err != nil {
+		panic(err)
+	}
+
+	val, err := client.Get("key").Result()
+	if err != nil {
+		panic(err)
+	}
+	fmt.Println("key", val)
+
+	val2, err := client.Get("key2").Result()
+	if err == redis.Nil {
+		fmt.Println("key2 does not exists")
+	} else if err != nil {
+		panic(err)
+	} else {
+		fmt.Println("key2", val2)
+	}
+	// Output: key value
+	// key2 does not exists
+}
+```
+
+## Howto
+
+Please go through [examples](https://godoc.org/gopkg.in/redis.v5#pkg-examples) to get an idea how to use this package.
+
+## Look and feel
+
+Some corner cases:
+
+    SET key value EX 10 NX
+    set, err := client.SetNX("key", "value", 10*time.Second).Result()
+
+    SORT list LIMIT 0 2 ASC
+    vals, err := client.Sort("list", redis.Sort{Offset: 0, Count: 2, Order: "ASC"}).Result()
+
+    ZRANGEBYSCORE zset -inf +inf WITHSCORES LIMIT 0 2
+    vals, err := client.ZRangeByScoreWithScores("zset", redis.ZRangeBy{
+        Min: "-inf",
+        Max: "+inf",
+        Offset: 0,
+        Count: 2,
+    }).Result()
+
+    ZINTERSTORE out 2 zset1 zset2 WEIGHTS 2 3 AGGREGATE SUM
+    vals, err := client.ZInterStore("out", redis.ZStore{Weights: []int64{2, 3}}, "zset1", "zset2").Result()
+
+    EVAL "return {KEYS[1],ARGV[1]}" 1 "key" "hello"
+    vals, err := client.Eval("return {KEYS[1],ARGV[1]}", []string{"key"}, []string{"hello"}).Result()
+
+## Benchmark
+
+go-redis vs redigo:
+
+```
+BenchmarkSetGoRedis10Conns64Bytes-4 	  200000	      7621 ns/op	     210 B/op	       6 allocs/op
+BenchmarkSetGoRedis100Conns64Bytes-4	  200000	      7554 ns/op	     210 B/op	       6 allocs/op
+BenchmarkSetGoRedis10Conns1KB-4     	  200000	      7697 ns/op	     210 B/op	       6 allocs/op
+BenchmarkSetGoRedis100Conns1KB-4    	  200000	      7688 ns/op	     210 B/op	       6 allocs/op
+BenchmarkSetGoRedis10Conns10KB-4    	  200000	      9214 ns/op	     210 B/op	       6 allocs/op
+BenchmarkSetGoRedis100Conns10KB-4   	  200000	      9181 ns/op	     210 B/op	       6 allocs/op
+BenchmarkSetGoRedis10Conns1MB-4     	    2000	    583242 ns/op	    2337 B/op	       6 allocs/op
+BenchmarkSetGoRedis100Conns1MB-4    	    2000	    583089 ns/op	    2338 B/op	       6 allocs/op
+BenchmarkSetRedigo10Conns64Bytes-4  	  200000	      7576 ns/op	     208 B/op	       7 allocs/op
+BenchmarkSetRedigo100Conns64Bytes-4 	  200000	      7782 ns/op	     208 B/op	       7 allocs/op
+BenchmarkSetRedigo10Conns1KB-4      	  200000	      7958 ns/op	     208 B/op	       7 allocs/op
+BenchmarkSetRedigo100Conns1KB-4     	  200000	      7725 ns/op	     208 B/op	       7 allocs/op
+BenchmarkSetRedigo10Conns10KB-4     	  100000	     18442 ns/op	     208 B/op	       7 allocs/op
+BenchmarkSetRedigo100Conns10KB-4    	  100000	     18818 ns/op	     208 B/op	       7 allocs/op
+BenchmarkSetRedigo10Conns1MB-4      	    2000	    668829 ns/op	     226 B/op	       7 allocs/op
+BenchmarkSetRedigo100Conns1MB-4     	    2000	    679542 ns/op	     226 B/op	       7 allocs/op
+```
+
+Redis Cluster:
+
+```
+BenchmarkRedisPing-4                	  200000	      6983 ns/op	     116 B/op	       4 allocs/op
+BenchmarkRedisClusterPing-4         	  100000	     11535 ns/op	     117 B/op	       4 allocs/op
+```
+
+## Shameless plug
+
+Check my [PostgreSQL client for Go](https://github.com/go-pg/pg).

+ 940 - 0
vendor/gopkg.in/redis.v5/cluster.go

@@ -0,0 +1,940 @@
+package redis
+
+import (
+	"fmt"
+	"math/rand"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"gopkg.in/redis.v5/internal"
+	"gopkg.in/redis.v5/internal/hashtag"
+	"gopkg.in/redis.v5/internal/pool"
+	"gopkg.in/redis.v5/internal/proto"
+)
+
+var errClusterNoNodes = internal.RedisError("redis: cluster has no nodes")
+var errNilClusterState = internal.RedisError("redis: cannot load cluster slots")
+
+// ClusterOptions are used to configure a cluster client and should be
+// passed to NewClusterClient.
+type ClusterOptions struct {
+	// A seed list of host:port addresses of cluster nodes.
+	Addrs []string
+
+	// The maximum number of retries before giving up. Command is retried
+	// on network errors and MOVED/ASK redirects.
+	// Default is 16.
+	MaxRedirects int
+
+	// Enables read queries for a connection to a Redis Cluster slave node.
+	ReadOnly bool
+
+	// Enables routing read-only queries to the closest master or slave node.
+	RouteByLatency bool
+
+	// Following options are copied from Options struct.
+
+	Password string
+
+	DialTimeout  time.Duration
+	ReadTimeout  time.Duration
+	WriteTimeout time.Duration
+
+	// PoolSize applies per cluster node and not for the whole cluster.
+	PoolSize           int
+	PoolTimeout        time.Duration
+	IdleTimeout        time.Duration
+	IdleCheckFrequency time.Duration
+}
+
+func (opt *ClusterOptions) init() {
+	if opt.MaxRedirects == -1 {
+		opt.MaxRedirects = 0
+	} else if opt.MaxRedirects == 0 {
+		opt.MaxRedirects = 16
+	}
+
+	if opt.RouteByLatency {
+		opt.ReadOnly = true
+	}
+}
+
+func (opt *ClusterOptions) clientOptions() *Options {
+	const disableIdleCheck = -1
+
+	return &Options{
+		Password: opt.Password,
+		ReadOnly: opt.ReadOnly,
+
+		DialTimeout:  opt.DialTimeout,
+		ReadTimeout:  opt.ReadTimeout,
+		WriteTimeout: opt.WriteTimeout,
+
+		PoolSize:    opt.PoolSize,
+		PoolTimeout: opt.PoolTimeout,
+		IdleTimeout: opt.IdleTimeout,
+
+		// IdleCheckFrequency is not copied to disable reaper
+		IdleCheckFrequency: disableIdleCheck,
+	}
+}
+
+//------------------------------------------------------------------------------
+
+type clusterNode struct {
+	Client  *Client
+	Latency time.Duration
+	loading time.Time
+}
+
+func newClusterNode(clOpt *ClusterOptions, addr string) *clusterNode {
+	opt := clOpt.clientOptions()
+	opt.Addr = addr
+	node := clusterNode{
+		Client: NewClient(opt),
+	}
+
+	if clOpt.RouteByLatency {
+		node.updateLatency()
+	}
+
+	return &node
+}
+
+func (n *clusterNode) updateLatency() {
+	const probes = 10
+	for i := 0; i < probes; i++ {
+		start := time.Now()
+		n.Client.Ping()
+		n.Latency += time.Since(start)
+	}
+	n.Latency = n.Latency / probes
+}
+
+func (n *clusterNode) Loading() bool {
+	return !n.loading.IsZero() && time.Since(n.loading) < time.Minute
+}
+
+//------------------------------------------------------------------------------
+
+type clusterNodes struct {
+	opt *ClusterOptions
+
+	mu     sync.RWMutex
+	addrs  []string
+	nodes  map[string]*clusterNode
+	closed bool
+}
+
+func newClusterNodes(opt *ClusterOptions) *clusterNodes {
+	return &clusterNodes{
+		opt:   opt,
+		nodes: make(map[string]*clusterNode),
+	}
+}
+
+func (c *clusterNodes) Close() error {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+
+	if c.closed {
+		return nil
+	}
+	c.closed = true
+
+	var firstErr error
+	for _, node := range c.nodes {
+		if err := node.Client.Close(); err != nil && firstErr == nil {
+			firstErr = err
+		}
+	}
+	c.addrs = nil
+	c.nodes = nil
+
+	return firstErr
+}
+
+func (c *clusterNodes) All() ([]*clusterNode, error) {
+	c.mu.RLock()
+	defer c.mu.RUnlock()
+
+	if c.closed {
+		return nil, pool.ErrClosed
+	}
+
+	nodes := make([]*clusterNode, 0, len(c.nodes))
+	for _, node := range c.nodes {
+		nodes = append(nodes, node)
+	}
+	return nodes, nil
+}
+
+func (c *clusterNodes) Get(addr string) (*clusterNode, error) {
+	var node *clusterNode
+	var ok bool
+
+	c.mu.RLock()
+	if !c.closed {
+		node, ok = c.nodes[addr]
+	}
+	c.mu.RUnlock()
+	if ok {
+		return node, nil
+	}
+
+	c.mu.Lock()
+	defer c.mu.Unlock()
+
+	if c.closed {
+		return nil, pool.ErrClosed
+	}
+
+	node, ok = c.nodes[addr]
+	if ok {
+		return node, nil
+	}
+
+	c.addrs = append(c.addrs, addr)
+	node = newClusterNode(c.opt, addr)
+	c.nodes[addr] = node
+	return node, nil
+}
+
+func (c *clusterNodes) Random() (*clusterNode, error) {
+	c.mu.RLock()
+	closed := c.closed
+	addrs := c.addrs
+	c.mu.RUnlock()
+
+	if closed {
+		return nil, pool.ErrClosed
+	}
+	if len(addrs) == 0 {
+		return nil, errClusterNoNodes
+	}
+
+	var nodeErr error
+	for i := 0; i <= c.opt.MaxRedirects; i++ {
+		n := rand.Intn(len(addrs))
+		node, err := c.Get(addrs[n])
+		if err != nil {
+			return nil, err
+		}
+
+		nodeErr = node.Client.ClusterInfo().Err()
+		if nodeErr == nil {
+			return node, nil
+		}
+	}
+	return nil, nodeErr
+}
+
+//------------------------------------------------------------------------------
+
+type clusterState struct {
+	nodes *clusterNodes
+	slots [][]*clusterNode
+}
+
+func newClusterState(nodes *clusterNodes, slots []ClusterSlot) (*clusterState, error) {
+	c := clusterState{
+		nodes: nodes,
+		slots: make([][]*clusterNode, hashtag.SlotNumber),
+	}
+
+	for _, slot := range slots {
+		var nodes []*clusterNode
+		for _, slotNode := range slot.Nodes {
+			node, err := c.nodes.Get(slotNode.Addr)
+			if err != nil {
+				return nil, err
+			}
+			nodes = append(nodes, node)
+		}
+
+		for i := slot.Start; i <= slot.End; i++ {
+			c.slots[i] = nodes
+		}
+	}
+
+	return &c, nil
+}
+
+func (c *clusterState) slotMasterNode(slot int) (*clusterNode, error) {
+	nodes := c.slotNodes(slot)
+	if len(nodes) > 0 {
+		return nodes[0], nil
+	}
+	return c.nodes.Random()
+}
+
+func (c *clusterState) slotSlaveNode(slot int) (*clusterNode, error) {
+	nodes := c.slotNodes(slot)
+	switch len(nodes) {
+	case 0:
+		return c.nodes.Random()
+	case 1:
+		return nodes[0], nil
+	case 2:
+		if slave := nodes[1]; !slave.Loading() {
+			return slave, nil
+		}
+		return nodes[0], nil
+	default:
+		var slave *clusterNode
+		for i := 0; i < 10; i++ {
+			n := rand.Intn(len(nodes)-1) + 1
+			slave = nodes[n]
+			if !slave.Loading() {
+				break
+			}
+		}
+		return slave, nil
+	}
+}
+
+func (c *clusterState) slotClosestNode(slot int) (*clusterNode, error) {
+	const threshold = time.Millisecond
+
+	nodes := c.slotNodes(slot)
+	if len(nodes) == 0 {
+		return c.nodes.Random()
+	}
+
+	var node *clusterNode
+	for _, n := range nodes {
+		if n.Loading() {
+			continue
+		}
+		if node == nil || node.Latency-n.Latency > threshold {
+			node = n
+		}
+	}
+	return node, nil
+}
+
+func (c *clusterState) slotNodes(slot int) []*clusterNode {
+	if slot < len(c.slots) {
+		return c.slots[slot]
+	}
+	return nil
+}
+
+//------------------------------------------------------------------------------
+
+// ClusterClient is a Redis Cluster client representing a pool of zero
+// or more underlying connections. It's safe for concurrent use by
+// multiple goroutines.
+type ClusterClient struct {
+	cmdable
+
+	opt    *ClusterOptions
+	cmds   map[string]*CommandInfo
+	nodes  *clusterNodes
+	_state atomic.Value
+
+	// Reports where slots reloading is in progress.
+	reloading uint32
+
+	closed bool
+}
+
+// NewClusterClient returns a Redis Cluster client as described in
+// http://redis.io/topics/cluster-spec.
+func NewClusterClient(opt *ClusterOptions) *ClusterClient {
+	opt.init()
+
+	c := &ClusterClient{
+		opt:   opt,
+		nodes: newClusterNodes(opt),
+	}
+	c.cmdable.process = c.Process
+
+	// Add initial nodes.
+	for _, addr := range opt.Addrs {
+		_, _ = c.nodes.Get(addr)
+	}
+
+	// Preload cluster slots.
+	for i := 0; i < 10; i++ {
+		state, err := c.reloadSlots()
+		if err == nil {
+			c._state.Store(state)
+			break
+		}
+	}
+
+	if opt.IdleCheckFrequency > 0 {
+		go c.reaper(opt.IdleCheckFrequency)
+	}
+
+	return c
+}
+
+func (c *ClusterClient) state() *clusterState {
+	v := c._state.Load()
+	if v != nil {
+		return v.(*clusterState)
+	}
+	c.lazyReloadSlots()
+	return nil
+}
+
+func (c *ClusterClient) cmdSlotAndNode(state *clusterState, cmd Cmder) (int, *clusterNode, error) {
+	if state == nil {
+		node, err := c.nodes.Random()
+		return 0, node, err
+	}
+
+	cmdInfo := c.cmds[cmd.name()]
+	firstKey := cmd.arg(cmdFirstKeyPos(cmd, cmdInfo))
+	slot := hashtag.Slot(firstKey)
+
+	if cmdInfo != nil && cmdInfo.ReadOnly && c.opt.ReadOnly {
+		if c.opt.RouteByLatency {
+			node, err := state.slotClosestNode(slot)
+			return slot, node, err
+		}
+
+		node, err := state.slotSlaveNode(slot)
+		return slot, node, err
+	}
+
+	node, err := state.slotMasterNode(slot)
+	return slot, node, err
+}
+
+func (c *ClusterClient) Watch(fn func(*Tx) error, keys ...string) error {
+	state := c.state()
+
+	var node *clusterNode
+	var err error
+	if state != nil && len(keys) > 0 {
+		node, err = state.slotMasterNode(hashtag.Slot(keys[0]))
+	} else {
+		node, err = c.nodes.Random()
+	}
+	if err != nil {
+		return err
+	}
+	return node.Client.Watch(fn, keys...)
+}
+
+// Close closes the cluster client, releasing any open resources.
+//
+// It is rare to Close a ClusterClient, as the ClusterClient is meant
+// to be long-lived and shared between many goroutines.
+func (c *ClusterClient) Close() error {
+	return c.nodes.Close()
+}
+
+func (c *ClusterClient) Process(cmd Cmder) error {
+	slot, node, err := c.cmdSlotAndNode(c.state(), cmd)
+	if err != nil {
+		cmd.setErr(err)
+		return err
+	}
+
+	var ask bool
+	for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ {
+		if ask {
+			pipe := node.Client.Pipeline()
+			pipe.Process(NewCmd("ASKING"))
+			pipe.Process(cmd)
+			_, err = pipe.Exec()
+			pipe.Close()
+			ask = false
+		} else {
+			err = node.Client.Process(cmd)
+		}
+
+		// If there is no (real) error - we are done.
+		if err == nil {
+			return nil
+		}
+
+		// If slave is loading - read from master.
+		if c.opt.ReadOnly && internal.IsLoadingError(err) {
+			node.loading = time.Now()
+			continue
+		}
+
+		// On network errors try random node.
+		if internal.IsRetryableError(err) {
+			node, err = c.nodes.Random()
+			if err != nil {
+				cmd.setErr(err)
+				return err
+			}
+			continue
+		}
+
+		var moved bool
+		var addr string
+		moved, ask, addr = internal.IsMovedError(err)
+		if moved || ask {
+			state := c.state()
+			if state != nil && slot >= 0 {
+				master, _ := state.slotMasterNode(slot)
+				if moved && (master == nil || master.Client.getAddr() != addr) {
+					c.lazyReloadSlots()
+				}
+			}
+
+			node, err = c.nodes.Get(addr)
+			if err != nil {
+				cmd.setErr(err)
+				return err
+			}
+
+			continue
+		}
+
+		break
+	}
+
+	return cmd.Err()
+}
+
+// ForEachNode concurrently calls the fn on each ever known node in the cluster.
+// It returns the first error if any.
+func (c *ClusterClient) ForEachNode(fn func(client *Client) error) error {
+	nodes, err := c.nodes.All()
+	if err != nil {
+		return err
+	}
+
+	var wg sync.WaitGroup
+	errCh := make(chan error, 1)
+	for _, node := range nodes {
+		wg.Add(1)
+		go func(node *clusterNode) {
+			defer wg.Done()
+			err := fn(node.Client)
+			if err != nil {
+				select {
+				case errCh <- err:
+				default:
+				}
+			}
+		}(node)
+	}
+	wg.Wait()
+
+	select {
+	case err := <-errCh:
+		return err
+	default:
+		return nil
+	}
+}
+
+// ForEachMaster concurrently calls the fn on each master node in the cluster.
+// It returns the first error if any.
+func (c *ClusterClient) ForEachMaster(fn func(client *Client) error) error {
+	state := c.state()
+	if state == nil {
+		return errNilClusterState
+	}
+
+	var wg sync.WaitGroup
+	visited := make(map[*clusterNode]struct{})
+	errCh := make(chan error, 1)
+	for _, nodes := range state.slots {
+		if len(nodes) == 0 {
+			continue
+		}
+
+		master := nodes[0]
+		if _, ok := visited[master]; ok {
+			continue
+		}
+		visited[master] = struct{}{}
+
+		wg.Add(1)
+		go func(node *clusterNode) {
+			defer wg.Done()
+			err := fn(node.Client)
+			if err != nil {
+				select {
+				case errCh <- err:
+				default:
+				}
+			}
+		}(master)
+	}
+	wg.Wait()
+
+	select {
+	case err := <-errCh:
+		return err
+	default:
+		return nil
+	}
+}
+
+// PoolStats returns accumulated connection pool stats.
+func (c *ClusterClient) PoolStats() *PoolStats {
+	var acc PoolStats
+
+	nodes, err := c.nodes.All()
+	if err != nil {
+		return &acc
+	}
+
+	for _, node := range nodes {
+		s := node.Client.connPool.Stats()
+		acc.Requests += s.Requests
+		acc.Hits += s.Hits
+		acc.Timeouts += s.Timeouts
+		acc.TotalConns += s.TotalConns
+		acc.FreeConns += s.FreeConns
+	}
+	return &acc
+}
+
+func (c *ClusterClient) lazyReloadSlots() {
+	if !atomic.CompareAndSwapUint32(&c.reloading, 0, 1) {
+		return
+	}
+
+	go func() {
+		for i := 0; i < 1000; i++ {
+			state, err := c.reloadSlots()
+			if err == pool.ErrClosed {
+				break
+			}
+			if err == nil {
+				c._state.Store(state)
+				break
+			}
+			time.Sleep(time.Millisecond)
+		}
+
+		time.Sleep(3 * time.Second)
+		atomic.StoreUint32(&c.reloading, 0)
+	}()
+}
+
+func (c *ClusterClient) reloadSlots() (*clusterState, error) {
+	node, err := c.nodes.Random()
+	if err != nil {
+		return nil, err
+	}
+
+	// TODO: fix race
+	if c.cmds == nil {
+		cmds, err := node.Client.Command().Result()
+		if err != nil {
+			return nil, err
+		}
+		c.cmds = cmds
+	}
+
+	slots, err := node.Client.ClusterSlots().Result()
+	if err != nil {
+		return nil, err
+	}
+
+	return newClusterState(c.nodes, slots)
+}
+
+// reaper closes idle connections to the cluster.
+func (c *ClusterClient) reaper(idleCheckFrequency time.Duration) {
+	ticker := time.NewTicker(idleCheckFrequency)
+	defer ticker.Stop()
+
+	for _ = range ticker.C {
+		nodes, err := c.nodes.All()
+		if err != nil {
+			break
+		}
+
+		var n int
+		for _, node := range nodes {
+			nn, err := node.Client.connPool.(*pool.ConnPool).ReapStaleConns()
+			if err != nil {
+				internal.Logf("ReapStaleConns failed: %s", err)
+			} else {
+				n += nn
+			}
+		}
+
+		s := c.PoolStats()
+		internal.Logf(
+			"reaper: removed %d stale conns (TotalConns=%d FreeConns=%d Requests=%d Hits=%d Timeouts=%d)",
+			n, s.TotalConns, s.FreeConns, s.Requests, s.Hits, s.Timeouts,
+		)
+	}
+}
+
+func (c *ClusterClient) Pipeline() *Pipeline {
+	pipe := Pipeline{
+		exec: c.pipelineExec,
+	}
+	pipe.cmdable.process = pipe.Process
+	pipe.statefulCmdable.process = pipe.Process
+	return &pipe
+}
+
+func (c *ClusterClient) Pipelined(fn func(*Pipeline) error) ([]Cmder, error) {
+	return c.Pipeline().pipelined(fn)
+}
+
+func (c *ClusterClient) pipelineExec(cmds []Cmder) error {
+	cmdsMap, err := c.mapCmdsByNode(cmds)
+	if err != nil {
+		return err
+	}
+
+	for i := 0; i <= c.opt.MaxRedirects; i++ {
+		failedCmds := make(map[*clusterNode][]Cmder)
+
+		for node, cmds := range cmdsMap {
+			cn, _, err := node.Client.conn()
+			if err != nil {
+				setCmdsErr(cmds, err)
+				continue
+			}
+
+			err = c.pipelineProcessCmds(cn, cmds, failedCmds)
+			node.Client.putConn(cn, err, false)
+		}
+
+		if len(failedCmds) == 0 {
+			break
+		}
+		cmdsMap = failedCmds
+	}
+
+	var firstErr error
+	for _, cmd := range cmds {
+		if err := cmd.Err(); err != nil {
+			firstErr = err
+			break
+		}
+	}
+	return firstErr
+}
+
+func (c *ClusterClient) mapCmdsByNode(cmds []Cmder) (map[*clusterNode][]Cmder, error) {
+	state := c.state()
+	cmdsMap := make(map[*clusterNode][]Cmder)
+	for _, cmd := range cmds {
+		_, node, err := c.cmdSlotAndNode(state, cmd)
+		if err != nil {
+			return nil, err
+		}
+		cmdsMap[node] = append(cmdsMap[node], cmd)
+	}
+	return cmdsMap, nil
+}
+
+func (c *ClusterClient) pipelineProcessCmds(
+	cn *pool.Conn, cmds []Cmder, failedCmds map[*clusterNode][]Cmder,
+) error {
+	cn.SetWriteTimeout(c.opt.WriteTimeout)
+	if err := writeCmd(cn, cmds...); err != nil {
+		setCmdsErr(cmds, err)
+		return err
+	}
+
+	// Set read timeout for all commands.
+	cn.SetReadTimeout(c.opt.ReadTimeout)
+
+	return c.pipelineReadCmds(cn, cmds, failedCmds)
+}
+
+func (c *ClusterClient) pipelineReadCmds(
+	cn *pool.Conn, cmds []Cmder, failedCmds map[*clusterNode][]Cmder,
+) error {
+	var firstErr error
+	for _, cmd := range cmds {
+		err := cmd.readReply(cn)
+		if err == nil {
+			continue
+		}
+
+		if firstErr == nil {
+			firstErr = err
+		}
+
+		err = c.checkMovedErr(cmd, failedCmds)
+		if err != nil && firstErr == nil {
+			firstErr = err
+		}
+	}
+	return firstErr
+}
+
+func (c *ClusterClient) checkMovedErr(cmd Cmder, failedCmds map[*clusterNode][]Cmder) error {
+	moved, ask, addr := internal.IsMovedError(cmd.Err())
+	if moved {
+		c.lazyReloadSlots()
+
+		node, err := c.nodes.Get(addr)
+		if err != nil {
+			return err
+		}
+
+		failedCmds[node] = append(failedCmds[node], cmd)
+	}
+	if ask {
+		node, err := c.nodes.Get(addr)
+		if err != nil {
+			return err
+		}
+
+		failedCmds[node] = append(failedCmds[node], NewCmd("ASKING"), cmd)
+	}
+	return nil
+}
+
+// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC.
+func (c *ClusterClient) TxPipeline() *Pipeline {
+	pipe := Pipeline{
+		exec: c.txPipelineExec,
+	}
+	pipe.cmdable.process = pipe.Process
+	pipe.statefulCmdable.process = pipe.Process
+	return &pipe
+}
+
+func (c *ClusterClient) TxPipelined(fn func(*Pipeline) error) ([]Cmder, error) {
+	return c.Pipeline().pipelined(fn)
+}
+
+func (c *ClusterClient) txPipelineExec(cmds []Cmder) error {
+	cmdsMap, err := c.mapCmdsBySlot(cmds)
+	if err != nil {
+		return err
+	}
+
+	state := c.state()
+	if state == nil {
+		return errNilClusterState
+	}
+
+	for slot, cmds := range cmdsMap {
+		node, err := state.slotMasterNode(slot)
+		if err != nil {
+			setCmdsErr(cmds, err)
+			continue
+		}
+
+		cmdsMap := map[*clusterNode][]Cmder{node: cmds}
+		for i := 0; i <= c.opt.MaxRedirects; i++ {
+			failedCmds := make(map[*clusterNode][]Cmder)
+
+			for node, cmds := range cmdsMap {
+				cn, _, err := node.Client.conn()
+				if err != nil {
+					setCmdsErr(cmds, err)
+					continue
+				}
+
+				err = c.txPipelineProcessCmds(node, cn, cmds, failedCmds)
+				node.Client.putConn(cn, err, false)
+			}
+
+			if len(failedCmds) == 0 {
+				break
+			}
+			cmdsMap = failedCmds
+		}
+	}
+
+	var firstErr error
+	for _, cmd := range cmds {
+		if err := cmd.Err(); err != nil {
+			firstErr = err
+			break
+		}
+	}
+	return firstErr
+}
+
+func (c *ClusterClient) mapCmdsBySlot(cmds []Cmder) (map[int][]Cmder, error) {
+	state := c.state()
+	cmdsMap := make(map[int][]Cmder)
+	for _, cmd := range cmds {
+		slot, _, err := c.cmdSlotAndNode(state, cmd)
+		if err != nil {
+			return nil, err
+		}
+		cmdsMap[slot] = append(cmdsMap[slot], cmd)
+	}
+	return cmdsMap, nil
+}
+
+func (c *ClusterClient) txPipelineProcessCmds(
+	node *clusterNode, cn *pool.Conn, cmds []Cmder, failedCmds map[*clusterNode][]Cmder,
+) error {
+	cn.SetWriteTimeout(c.opt.WriteTimeout)
+	if err := txPipelineWriteMulti(cn, cmds); err != nil {
+		setCmdsErr(cmds, err)
+		failedCmds[node] = cmds
+		return err
+	}
+
+	// Set read timeout for all commands.
+	cn.SetReadTimeout(c.opt.ReadTimeout)
+
+	if err := c.txPipelineReadQueued(cn, cmds, failedCmds); err != nil {
+		return err
+	}
+
+	_, err := pipelineReadCmds(cn, cmds)
+	return err
+}
+
+func (c *ClusterClient) txPipelineReadQueued(
+	cn *pool.Conn, cmds []Cmder, failedCmds map[*clusterNode][]Cmder,
+) error {
+	var firstErr error
+
+	// Parse queued replies.
+	var statusCmd StatusCmd
+	if err := statusCmd.readReply(cn); err != nil && firstErr == nil {
+		firstErr = err
+	}
+
+	for _, cmd := range cmds {
+		err := statusCmd.readReply(cn)
+		if err == nil {
+			continue
+		}
+
+		cmd.setErr(err)
+		if firstErr == nil {
+			firstErr = err
+		}
+
+		err = c.checkMovedErr(cmd, failedCmds)
+		if err != nil && firstErr == nil {
+			firstErr = err
+		}
+	}
+
+	// Parse number of replies.
+	line, err := cn.Rd.ReadLine()
+	if err != nil {
+		if err == Nil {
+			err = TxFailedErr
+		}
+		return err
+	}
+
+	switch line[0] {
+	case proto.ErrorReply:
+		return proto.ParseErrorReply(line)
+	case proto.ArrayReply:
+		// ok
+	default:
+		err := fmt.Errorf("redis: expected '*', but got line %q", line)
+		return err
+	}
+
+	return firstErr
+}

+ 956 - 0
vendor/gopkg.in/redis.v5/command.go

@@ -0,0 +1,956 @@
+package redis
+
+import (
+	"bytes"
+	"fmt"
+	"strconv"
+	"strings"
+	"time"
+
+	"gopkg.in/redis.v5/internal"
+	"gopkg.in/redis.v5/internal/pool"
+	"gopkg.in/redis.v5/internal/proto"
+)
+
+var (
+	_ Cmder = (*Cmd)(nil)
+	_ Cmder = (*SliceCmd)(nil)
+	_ Cmder = (*StatusCmd)(nil)
+	_ Cmder = (*IntCmd)(nil)
+	_ Cmder = (*DurationCmd)(nil)
+	_ Cmder = (*BoolCmd)(nil)
+	_ Cmder = (*StringCmd)(nil)
+	_ Cmder = (*FloatCmd)(nil)
+	_ Cmder = (*StringSliceCmd)(nil)
+	_ Cmder = (*BoolSliceCmd)(nil)
+	_ Cmder = (*StringStringMapCmd)(nil)
+	_ Cmder = (*StringIntMapCmd)(nil)
+	_ Cmder = (*ZSliceCmd)(nil)
+	_ Cmder = (*ScanCmd)(nil)
+	_ Cmder = (*ClusterSlotsCmd)(nil)
+)
+
+type Cmder interface {
+	args() []interface{}
+	arg(int) string
+	name() string
+
+	readReply(*pool.Conn) error
+	setErr(error)
+
+	readTimeout() *time.Duration
+
+	Err() error
+	fmt.Stringer
+}
+
+func setCmdsErr(cmds []Cmder, e error) {
+	for _, cmd := range cmds {
+		cmd.setErr(e)
+	}
+}
+
+func writeCmd(cn *pool.Conn, cmds ...Cmder) error {
+	cn.Wb.Reset()
+	for _, cmd := range cmds {
+		if err := cn.Wb.Append(cmd.args()); err != nil {
+			return err
+		}
+	}
+
+	_, err := cn.Write(cn.Wb.Bytes())
+	return err
+}
+
+func cmdString(cmd Cmder, val interface{}) string {
+	var ss []string
+	for _, arg := range cmd.args() {
+		ss = append(ss, fmt.Sprint(arg))
+	}
+	s := strings.Join(ss, " ")
+	if err := cmd.Err(); err != nil {
+		return s + ": " + err.Error()
+	}
+	if val != nil {
+		switch vv := val.(type) {
+		case []byte:
+			return s + ": " + string(vv)
+		default:
+			return s + ": " + fmt.Sprint(val)
+		}
+	}
+	return s
+
+}
+
+func cmdFirstKeyPos(cmd Cmder, info *CommandInfo) int {
+	switch cmd.name() {
+	case "eval", "evalsha":
+		if cmd.arg(2) != "0" {
+			return 3
+		} else {
+			return -1
+		}
+	}
+	if info == nil {
+		internal.Logf("info for cmd=%s not found", cmd.name())
+		return -1
+	}
+	return int(info.FirstKeyPos)
+}
+
+//------------------------------------------------------------------------------
+
+type baseCmd struct {
+	_args []interface{}
+	err   error
+
+	_readTimeout *time.Duration
+}
+
+func (cmd *baseCmd) Err() error {
+	if cmd.err != nil {
+		return cmd.err
+	}
+	return nil
+}
+
+func (cmd *baseCmd) args() []interface{} {
+	return cmd._args
+}
+
+func (cmd *baseCmd) arg(pos int) string {
+	if pos < 0 || pos >= len(cmd._args) {
+		return ""
+	}
+	s, _ := cmd._args[pos].(string)
+	return s
+}
+
+func (cmd *baseCmd) name() string {
+	if len(cmd._args) > 0 {
+		// Cmd name must be lower cased.
+		s := internal.ToLower(cmd.arg(0))
+		cmd._args[0] = s
+		return s
+	}
+	return ""
+}
+
+func (cmd *baseCmd) readTimeout() *time.Duration {
+	return cmd._readTimeout
+}
+
+func (cmd *baseCmd) setReadTimeout(d time.Duration) {
+	cmd._readTimeout = &d
+}
+
+func (cmd *baseCmd) setErr(e error) {
+	cmd.err = e
+}
+
+func newBaseCmd(args []interface{}) baseCmd {
+	if len(args) > 0 {
+		// Cmd name is expected to be in lower case.
+		args[0] = internal.ToLower(args[0].(string))
+	}
+	return baseCmd{_args: args}
+}
+
+//------------------------------------------------------------------------------
+
+type Cmd struct {
+	baseCmd
+
+	val interface{}
+}
+
+func NewCmd(args ...interface{}) *Cmd {
+	return &Cmd{
+		baseCmd: baseCmd{_args: args},
+	}
+}
+
+func (cmd *Cmd) Val() interface{} {
+	return cmd.val
+}
+
+func (cmd *Cmd) Result() (interface{}, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *Cmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *Cmd) readReply(cn *pool.Conn) error {
+	cmd.val, cmd.err = cn.Rd.ReadReply(sliceParser)
+	if cmd.err != nil {
+		return cmd.err
+	}
+	if b, ok := cmd.val.([]byte); ok {
+		// Bytes must be copied, because underlying memory is reused.
+		cmd.val = string(b)
+	}
+	return nil
+}
+
+//------------------------------------------------------------------------------
+
+type SliceCmd struct {
+	baseCmd
+
+	val []interface{}
+}
+
+func NewSliceCmd(args ...interface{}) *SliceCmd {
+	return &SliceCmd{
+		baseCmd: baseCmd{_args: args},
+	}
+}
+
+func (cmd *SliceCmd) Val() []interface{} {
+	return cmd.val
+}
+
+func (cmd *SliceCmd) Result() ([]interface{}, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *SliceCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *SliceCmd) readReply(cn *pool.Conn) error {
+	var v interface{}
+	v, cmd.err = cn.Rd.ReadArrayReply(sliceParser)
+	if cmd.err != nil {
+		return cmd.err
+	}
+	cmd.val = v.([]interface{})
+	return nil
+}
+
+//------------------------------------------------------------------------------
+
+type StatusCmd struct {
+	baseCmd
+
+	val string
+}
+
+func NewStatusCmd(args ...interface{}) *StatusCmd {
+	return &StatusCmd{
+		baseCmd: baseCmd{_args: args},
+	}
+}
+
+func (cmd *StatusCmd) Val() string {
+	return cmd.val
+}
+
+func (cmd *StatusCmd) Result() (string, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *StatusCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StatusCmd) readReply(cn *pool.Conn) error {
+	cmd.val, cmd.err = cn.Rd.ReadStringReply()
+	return cmd.err
+}
+
+//------------------------------------------------------------------------------
+
+type IntCmd struct {
+	baseCmd
+
+	val int64
+}
+
+func NewIntCmd(args ...interface{}) *IntCmd {
+	return &IntCmd{
+		baseCmd: baseCmd{_args: args},
+	}
+}
+
+func (cmd *IntCmd) Val() int64 {
+	return cmd.val
+}
+
+func (cmd *IntCmd) Result() (int64, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *IntCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *IntCmd) readReply(cn *pool.Conn) error {
+	cmd.val, cmd.err = cn.Rd.ReadIntReply()
+	return cmd.err
+}
+
+//------------------------------------------------------------------------------
+
+type DurationCmd struct {
+	baseCmd
+
+	val       time.Duration
+	precision time.Duration
+}
+
+func NewDurationCmd(precision time.Duration, args ...interface{}) *DurationCmd {
+	return &DurationCmd{
+		baseCmd:   baseCmd{_args: args},
+		precision: precision,
+	}
+}
+
+func (cmd *DurationCmd) Val() time.Duration {
+	return cmd.val
+}
+
+func (cmd *DurationCmd) Result() (time.Duration, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *DurationCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *DurationCmd) readReply(cn *pool.Conn) error {
+	var n int64
+	n, cmd.err = cn.Rd.ReadIntReply()
+	if cmd.err != nil {
+		return cmd.err
+	}
+	cmd.val = time.Duration(n) * cmd.precision
+	return nil
+}
+
+//------------------------------------------------------------------------------
+
+type TimeCmd struct {
+	baseCmd
+
+	val time.Time
+}
+
+func NewTimeCmd(args ...interface{}) *TimeCmd {
+	return &TimeCmd{
+		baseCmd: baseCmd{_args: args},
+	}
+}
+
+func (cmd *TimeCmd) Val() time.Time {
+	return cmd.val
+}
+
+func (cmd *TimeCmd) Result() (time.Time, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *TimeCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *TimeCmd) readReply(cn *pool.Conn) error {
+	var v interface{}
+	v, cmd.err = cn.Rd.ReadArrayReply(timeParser)
+	if cmd.err != nil {
+		return cmd.err
+	}
+	cmd.val = v.(time.Time)
+	return nil
+}
+
+//------------------------------------------------------------------------------
+
+type BoolCmd struct {
+	baseCmd
+
+	val bool
+}
+
+func NewBoolCmd(args ...interface{}) *BoolCmd {
+	return &BoolCmd{
+		baseCmd: baseCmd{_args: args},
+	}
+}
+
+func (cmd *BoolCmd) Val() bool {
+	return cmd.val
+}
+
+func (cmd *BoolCmd) Result() (bool, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *BoolCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+var ok = []byte("OK")
+
+func (cmd *BoolCmd) readReply(cn *pool.Conn) error {
+	var v interface{}
+	v, cmd.err = cn.Rd.ReadReply(nil)
+	// `SET key value NX` returns nil when key already exists. But
+	// `SETNX key value` returns bool (0/1). So convert nil to bool.
+	// TODO: is this okay?
+	if cmd.err == Nil {
+		cmd.val = false
+		cmd.err = nil
+		return nil
+	}
+	if cmd.err != nil {
+		return cmd.err
+	}
+	switch v := v.(type) {
+	case int64:
+		cmd.val = v == 1
+		return nil
+	case []byte:
+		cmd.val = bytes.Equal(v, ok)
+		return nil
+	default:
+		cmd.err = fmt.Errorf("got %T, wanted int64 or string", v)
+		return cmd.err
+	}
+}
+
+//------------------------------------------------------------------------------
+
+type StringCmd struct {
+	baseCmd
+
+	val []byte
+}
+
+func NewStringCmd(args ...interface{}) *StringCmd {
+	return &StringCmd{
+		baseCmd: baseCmd{_args: args},
+	}
+}
+
+func (cmd *StringCmd) Val() string {
+	return internal.BytesToString(cmd.val)
+}
+
+func (cmd *StringCmd) Result() (string, error) {
+	return cmd.Val(), cmd.err
+}
+
+func (cmd *StringCmd) Bytes() ([]byte, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *StringCmd) Int64() (int64, error) {
+	if cmd.err != nil {
+		return 0, cmd.err
+	}
+	return strconv.ParseInt(cmd.Val(), 10, 64)
+}
+
+func (cmd *StringCmd) Uint64() (uint64, error) {
+	if cmd.err != nil {
+		return 0, cmd.err
+	}
+	return strconv.ParseUint(cmd.Val(), 10, 64)
+}
+
+func (cmd *StringCmd) Float64() (float64, error) {
+	if cmd.err != nil {
+		return 0, cmd.err
+	}
+	return strconv.ParseFloat(cmd.Val(), 64)
+}
+
+func (cmd *StringCmd) Scan(val interface{}) error {
+	if cmd.err != nil {
+		return cmd.err
+	}
+	return proto.Scan(cmd.val, val)
+}
+
+func (cmd *StringCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StringCmd) readReply(cn *pool.Conn) error {
+	cmd.val, cmd.err = cn.Rd.ReadBytesReply()
+	return cmd.err
+}
+
+//------------------------------------------------------------------------------
+
+type FloatCmd struct {
+	baseCmd
+
+	val float64
+}
+
+func NewFloatCmd(args ...interface{}) *FloatCmd {
+	return &FloatCmd{
+		baseCmd: baseCmd{_args: args},
+	}
+}
+
+func (cmd *FloatCmd) Val() float64 {
+	return cmd.val
+}
+
+func (cmd *FloatCmd) Result() (float64, error) {
+	return cmd.Val(), cmd.Err()
+}
+
+func (cmd *FloatCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *FloatCmd) readReply(cn *pool.Conn) error {
+	cmd.val, cmd.err = cn.Rd.ReadFloatReply()
+	return cmd.err
+}
+
+//------------------------------------------------------------------------------
+
+type StringSliceCmd struct {
+	baseCmd
+
+	val []string
+}
+
+func NewStringSliceCmd(args ...interface{}) *StringSliceCmd {
+	return &StringSliceCmd{
+		baseCmd: baseCmd{_args: args},
+	}
+}
+
+func (cmd *StringSliceCmd) Val() []string {
+	return cmd.val
+}
+
+func (cmd *StringSliceCmd) Result() ([]string, error) {
+	return cmd.Val(), cmd.Err()
+}
+
+func (cmd *StringSliceCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StringSliceCmd) ScanSlice(container interface{}) error {
+	return proto.ScanSlice(cmd.Val(), container)
+}
+
+func (cmd *StringSliceCmd) readReply(cn *pool.Conn) error {
+	var v interface{}
+	v, cmd.err = cn.Rd.ReadArrayReply(stringSliceParser)
+	if cmd.err != nil {
+		return cmd.err
+	}
+	cmd.val = v.([]string)
+	return nil
+}
+
+//------------------------------------------------------------------------------
+
+type BoolSliceCmd struct {
+	baseCmd
+
+	val []bool
+}
+
+func NewBoolSliceCmd(args ...interface{}) *BoolSliceCmd {
+	return &BoolSliceCmd{
+		baseCmd: baseCmd{_args: args},
+	}
+}
+
+func (cmd *BoolSliceCmd) Val() []bool {
+	return cmd.val
+}
+
+func (cmd *BoolSliceCmd) Result() ([]bool, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *BoolSliceCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *BoolSliceCmd) readReply(cn *pool.Conn) error {
+	var v interface{}
+	v, cmd.err = cn.Rd.ReadArrayReply(boolSliceParser)
+	if cmd.err != nil {
+		return cmd.err
+	}
+	cmd.val = v.([]bool)
+	return nil
+}
+
+//------------------------------------------------------------------------------
+
+type StringStringMapCmd struct {
+	baseCmd
+
+	val map[string]string
+}
+
+func NewStringStringMapCmd(args ...interface{}) *StringStringMapCmd {
+	return &StringStringMapCmd{
+		baseCmd: baseCmd{_args: args},
+	}
+}
+
+func (cmd *StringStringMapCmd) Val() map[string]string {
+	return cmd.val
+}
+
+func (cmd *StringStringMapCmd) Result() (map[string]string, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *StringStringMapCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StringStringMapCmd) readReply(cn *pool.Conn) error {
+	var v interface{}
+	v, cmd.err = cn.Rd.ReadArrayReply(stringStringMapParser)
+	if cmd.err != nil {
+		return cmd.err
+	}
+	cmd.val = v.(map[string]string)
+	return nil
+}
+
+//------------------------------------------------------------------------------
+
+type StringIntMapCmd struct {
+	baseCmd
+
+	val map[string]int64
+}
+
+func NewStringIntMapCmd(args ...interface{}) *StringIntMapCmd {
+	return &StringIntMapCmd{
+		baseCmd: baseCmd{_args: args},
+	}
+}
+
+func (cmd *StringIntMapCmd) Val() map[string]int64 {
+	return cmd.val
+}
+
+func (cmd *StringIntMapCmd) Result() (map[string]int64, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *StringIntMapCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StringIntMapCmd) readReply(cn *pool.Conn) error {
+	var v interface{}
+	v, cmd.err = cn.Rd.ReadArrayReply(stringIntMapParser)
+	if cmd.err != nil {
+		return cmd.err
+	}
+	cmd.val = v.(map[string]int64)
+	return nil
+}
+
+//------------------------------------------------------------------------------
+
+type ZSliceCmd struct {
+	baseCmd
+
+	val []Z
+}
+
+func NewZSliceCmd(args ...interface{}) *ZSliceCmd {
+	return &ZSliceCmd{
+		baseCmd: baseCmd{_args: args},
+	}
+}
+
+func (cmd *ZSliceCmd) Val() []Z {
+	return cmd.val
+}
+
+func (cmd *ZSliceCmd) Result() ([]Z, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *ZSliceCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ZSliceCmd) readReply(cn *pool.Conn) error {
+	var v interface{}
+	v, cmd.err = cn.Rd.ReadArrayReply(zSliceParser)
+	if cmd.err != nil {
+		return cmd.err
+	}
+	cmd.val = v.([]Z)
+	return nil
+}
+
+//------------------------------------------------------------------------------
+
+type ScanCmd struct {
+	baseCmd
+
+	page   []string
+	cursor uint64
+
+	process func(cmd Cmder) error
+}
+
+func NewScanCmd(process func(cmd Cmder) error, args ...interface{}) *ScanCmd {
+	return &ScanCmd{
+		baseCmd: baseCmd{_args: args},
+		process: process,
+	}
+}
+
+func (cmd *ScanCmd) Val() (keys []string, cursor uint64) {
+	return cmd.page, cmd.cursor
+}
+
+func (cmd *ScanCmd) Result() (keys []string, cursor uint64, err error) {
+	return cmd.page, cmd.cursor, cmd.err
+}
+
+func (cmd *ScanCmd) String() string {
+	return cmdString(cmd, cmd.page)
+}
+
+func (cmd *ScanCmd) readReply(cn *pool.Conn) error {
+	cmd.page, cmd.cursor, cmd.err = cn.Rd.ReadScanReply()
+	return cmd.err
+}
+
+// Iterator creates a new ScanIterator.
+func (cmd *ScanCmd) Iterator() *ScanIterator {
+	return &ScanIterator{
+		cmd: cmd,
+	}
+}
+
+//------------------------------------------------------------------------------
+
+type ClusterNode struct {
+	Id   string
+	Addr string
+}
+
+type ClusterSlot struct {
+	Start int
+	End   int
+	Nodes []ClusterNode
+}
+
+type ClusterSlotsCmd struct {
+	baseCmd
+
+	val []ClusterSlot
+}
+
+func NewClusterSlotsCmd(args ...interface{}) *ClusterSlotsCmd {
+	return &ClusterSlotsCmd{
+		baseCmd: baseCmd{_args: args},
+	}
+}
+
+func (cmd *ClusterSlotsCmd) Val() []ClusterSlot {
+	return cmd.val
+}
+
+func (cmd *ClusterSlotsCmd) Result() ([]ClusterSlot, error) {
+	return cmd.Val(), cmd.Err()
+}
+
+func (cmd *ClusterSlotsCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ClusterSlotsCmd) readReply(cn *pool.Conn) error {
+	var v interface{}
+	v, cmd.err = cn.Rd.ReadArrayReply(clusterSlotsParser)
+	if cmd.err != nil {
+		return cmd.err
+	}
+	cmd.val = v.([]ClusterSlot)
+	return nil
+}
+
+//------------------------------------------------------------------------------
+
+// GeoLocation is used with GeoAdd to add geospatial location.
+type GeoLocation struct {
+	Name                      string
+	Longitude, Latitude, Dist float64
+	GeoHash                   int64
+}
+
+// GeoRadiusQuery is used with GeoRadius to query geospatial index.
+type GeoRadiusQuery struct {
+	Radius float64
+	// Can be m, km, ft, or mi. Default is km.
+	Unit        string
+	WithCoord   bool
+	WithDist    bool
+	WithGeoHash bool
+	Count       int
+	// Can be ASC or DESC. Default is no sort order.
+	Sort string
+}
+
+type GeoLocationCmd struct {
+	baseCmd
+
+	q         *GeoRadiusQuery
+	locations []GeoLocation
+}
+
+func NewGeoLocationCmd(q *GeoRadiusQuery, args ...interface{}) *GeoLocationCmd {
+	args = append(args, q.Radius)
+	if q.Unit != "" {
+		args = append(args, q.Unit)
+	} else {
+		args = append(args, "km")
+	}
+	if q.WithCoord {
+		args = append(args, "WITHCOORD")
+	}
+	if q.WithDist {
+		args = append(args, "WITHDIST")
+	}
+	if q.WithGeoHash {
+		args = append(args, "WITHHASH")
+	}
+	if q.Count > 0 {
+		args = append(args, "COUNT", q.Count)
+	}
+	if q.Sort != "" {
+		args = append(args, q.Sort)
+	}
+	cmd := newBaseCmd(args)
+	return &GeoLocationCmd{
+		baseCmd: cmd,
+		q:       q,
+	}
+}
+
+func (cmd *GeoLocationCmd) Val() []GeoLocation {
+	return cmd.locations
+}
+
+func (cmd *GeoLocationCmd) Result() ([]GeoLocation, error) {
+	return cmd.locations, cmd.err
+}
+
+func (cmd *GeoLocationCmd) String() string {
+	return cmdString(cmd, cmd.locations)
+}
+
+func (cmd *GeoLocationCmd) readReply(cn *pool.Conn) error {
+	var v interface{}
+	v, cmd.err = cn.Rd.ReadArrayReply(newGeoLocationSliceParser(cmd.q))
+	if cmd.err != nil {
+		return cmd.err
+	}
+	cmd.locations = v.([]GeoLocation)
+	return nil
+}
+
+//------------------------------------------------------------------------------
+
+type GeoPos struct {
+	Longitude, Latitude float64
+}
+
+type GeoPosCmd struct {
+	baseCmd
+
+	positions []*GeoPos
+}
+
+func NewGeoPosCmd(args ...interface{}) *GeoPosCmd {
+	return &GeoPosCmd{
+		baseCmd: baseCmd{_args: args},
+	}
+}
+
+func (cmd *GeoPosCmd) Val() []*GeoPos {
+	return cmd.positions
+}
+
+func (cmd *GeoPosCmd) Result() ([]*GeoPos, error) {
+	return cmd.Val(), cmd.Err()
+}
+
+func (cmd *GeoPosCmd) String() string {
+	return cmdString(cmd, cmd.positions)
+}
+
+func (cmd *GeoPosCmd) readReply(cn *pool.Conn) error {
+	var v interface{}
+	v, cmd.err = cn.Rd.ReadArrayReply(geoPosSliceParser)
+	if cmd.err != nil {
+		return cmd.err
+	}
+	cmd.positions = v.([]*GeoPos)
+	return nil
+}
+
+//------------------------------------------------------------------------------
+
+type CommandInfo struct {
+	Name        string
+	Arity       int8
+	Flags       []string
+	FirstKeyPos int8
+	LastKeyPos  int8
+	StepCount   int8
+	ReadOnly    bool
+}
+
+type CommandsInfoCmd struct {
+	baseCmd
+
+	val map[string]*CommandInfo
+}
+
+func NewCommandsInfoCmd(args ...interface{}) *CommandsInfoCmd {
+	return &CommandsInfoCmd{
+		baseCmd: baseCmd{_args: args},
+	}
+}
+
+func (cmd *CommandsInfoCmd) Val() map[string]*CommandInfo {
+	return cmd.val
+}
+
+func (cmd *CommandsInfoCmd) Result() (map[string]*CommandInfo, error) {
+	return cmd.Val(), cmd.Err()
+}
+
+func (cmd *CommandsInfoCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *CommandsInfoCmd) readReply(cn *pool.Conn) error {
+	var v interface{}
+	v, cmd.err = cn.Rd.ReadArrayReply(commandInfoSliceParser)
+	if cmd.err != nil {
+		return cmd.err
+	}
+	cmd.val = v.(map[string]*CommandInfo)
+	return nil
+}

+ 2078 - 0
vendor/gopkg.in/redis.v5/commands.go

@@ -0,0 +1,2078 @@
+package redis
+
+import (
+	"io"
+	"strconv"
+	"time"
+
+	"gopkg.in/redis.v5/internal"
+)
+
+func readTimeout(timeout time.Duration) time.Duration {
+	if timeout == 0 {
+		return 0
+	}
+	return timeout + time.Second
+}
+
+func usePrecise(dur time.Duration) bool {
+	return dur < time.Second || dur%time.Second != 0
+}
+
+func formatMs(dur time.Duration) string {
+	if dur > 0 && dur < time.Millisecond {
+		internal.Logf(
+			"specified duration is %s, but minimal supported value is %s",
+			dur, time.Millisecond,
+		)
+	}
+	return strconv.FormatInt(int64(dur/time.Millisecond), 10)
+}
+
+func formatSec(dur time.Duration) string {
+	if dur > 0 && dur < time.Second {
+		internal.Logf(
+			"specified duration is %s, but minimal supported value is %s",
+			dur, time.Second,
+		)
+	}
+	return strconv.FormatInt(int64(dur/time.Second), 10)
+}
+
+type Cmdable interface {
+	Pipeline() *Pipeline
+	Pipelined(fn func(*Pipeline) error) ([]Cmder, error)
+
+	Echo(message interface{}) *StringCmd
+	Ping() *StatusCmd
+	Quit() *StatusCmd
+	Del(keys ...string) *IntCmd
+	Unlink(keys ...string) *IntCmd
+	Dump(key string) *StringCmd
+	Exists(key string) *BoolCmd
+	// TODO: merge with Exists in v6
+	ExistsMulti(keys ...string) *IntCmd
+	Expire(key string, expiration time.Duration) *BoolCmd
+	ExpireAt(key string, tm time.Time) *BoolCmd
+	Keys(pattern string) *StringSliceCmd
+	Migrate(host, port, key string, db int64, timeout time.Duration) *StatusCmd
+	Move(key string, db int64) *BoolCmd
+	ObjectRefCount(key string) *IntCmd
+	ObjectEncoding(key string) *StringCmd
+	ObjectIdleTime(key string) *DurationCmd
+	Persist(key string) *BoolCmd
+	PExpire(key string, expiration time.Duration) *BoolCmd
+	PExpireAt(key string, tm time.Time) *BoolCmd
+	PTTL(key string) *DurationCmd
+	RandomKey() *StringCmd
+	Rename(key, newkey string) *StatusCmd
+	RenameNX(key, newkey string) *BoolCmd
+	Restore(key string, ttl time.Duration, value string) *StatusCmd
+	RestoreReplace(key string, ttl time.Duration, value string) *StatusCmd
+	Sort(key string, sort Sort) *StringSliceCmd
+	SortInterfaces(key string, sort Sort) *SliceCmd
+	TTL(key string) *DurationCmd
+	Type(key string) *StatusCmd
+	Scan(cursor uint64, match string, count int64) *ScanCmd
+	SScan(key string, cursor uint64, match string, count int64) *ScanCmd
+	HScan(key string, cursor uint64, match string, count int64) *ScanCmd
+	ZScan(key string, cursor uint64, match string, count int64) *ScanCmd
+	Append(key, value string) *IntCmd
+	BitCount(key string, bitCount *BitCount) *IntCmd
+	BitOpAnd(destKey string, keys ...string) *IntCmd
+	BitOpOr(destKey string, keys ...string) *IntCmd
+	BitOpXor(destKey string, keys ...string) *IntCmd
+	BitOpNot(destKey string, key string) *IntCmd
+	BitPos(key string, bit int64, pos ...int64) *IntCmd
+	Decr(key string) *IntCmd
+	DecrBy(key string, decrement int64) *IntCmd
+	Get(key string) *StringCmd
+	GetBit(key string, offset int64) *IntCmd
+	GetRange(key string, start, end int64) *StringCmd
+	GetSet(key string, value interface{}) *StringCmd
+	Incr(key string) *IntCmd
+	IncrBy(key string, value int64) *IntCmd
+	IncrByFloat(key string, value float64) *FloatCmd
+	MGet(keys ...string) *SliceCmd
+	MSet(pairs ...interface{}) *StatusCmd
+	MSetNX(pairs ...interface{}) *BoolCmd
+	Set(key string, value interface{}, expiration time.Duration) *StatusCmd
+	SetBit(key string, offset int64, value int) *IntCmd
+	SetNX(key string, value interface{}, expiration time.Duration) *BoolCmd
+	SetXX(key string, value interface{}, expiration time.Duration) *BoolCmd
+	SetRange(key string, offset int64, value string) *IntCmd
+	StrLen(key string) *IntCmd
+	HDel(key string, fields ...string) *IntCmd
+	HExists(key, field string) *BoolCmd
+	HGet(key, field string) *StringCmd
+	HGetAll(key string) *StringStringMapCmd
+	HIncrBy(key, field string, incr int64) *IntCmd
+	HIncrByFloat(key, field string, incr float64) *FloatCmd
+	HKeys(key string) *StringSliceCmd
+	HLen(key string) *IntCmd
+	HMGet(key string, fields ...string) *SliceCmd
+	HMSet(key string, fields map[string]string) *StatusCmd
+	HSet(key, field string, value interface{}) *BoolCmd
+	HSetNX(key, field string, value interface{}) *BoolCmd
+	HVals(key string) *StringSliceCmd
+	BLPop(timeout time.Duration, keys ...string) *StringSliceCmd
+	BRPop(timeout time.Duration, keys ...string) *StringSliceCmd
+	BRPopLPush(source, destination string, timeout time.Duration) *StringCmd
+	LIndex(key string, index int64) *StringCmd
+	LInsert(key, op string, pivot, value interface{}) *IntCmd
+	LInsertBefore(key string, pivot, value interface{}) *IntCmd
+	LInsertAfter(key string, pivot, value interface{}) *IntCmd
+	LLen(key string) *IntCmd
+	LPop(key string) *StringCmd
+	LPush(key string, values ...interface{}) *IntCmd
+	LPushX(key string, value interface{}) *IntCmd
+	LRange(key string, start, stop int64) *StringSliceCmd
+	LRem(key string, count int64, value interface{}) *IntCmd
+	LSet(key string, index int64, value interface{}) *StatusCmd
+	LTrim(key string, start, stop int64) *StatusCmd
+	RPop(key string) *StringCmd
+	RPopLPush(source, destination string) *StringCmd
+	RPush(key string, values ...interface{}) *IntCmd
+	RPushX(key string, value interface{}) *IntCmd
+	SAdd(key string, members ...interface{}) *IntCmd
+	SCard(key string) *IntCmd
+	SDiff(keys ...string) *StringSliceCmd
+	SDiffStore(destination string, keys ...string) *IntCmd
+	SInter(keys ...string) *StringSliceCmd
+	SInterStore(destination string, keys ...string) *IntCmd
+	SIsMember(key string, member interface{}) *BoolCmd
+	SMembers(key string) *StringSliceCmd
+	SMove(source, destination string, member interface{}) *BoolCmd
+	SPop(key string) *StringCmd
+	SPopN(key string, count int64) *StringSliceCmd
+	SRandMember(key string) *StringCmd
+	SRandMemberN(key string, count int64) *StringSliceCmd
+	SRem(key string, members ...interface{}) *IntCmd
+	SUnion(keys ...string) *StringSliceCmd
+	SUnionStore(destination string, keys ...string) *IntCmd
+	ZAdd(key string, members ...Z) *IntCmd
+	ZAddNX(key string, members ...Z) *IntCmd
+	ZAddXX(key string, members ...Z) *IntCmd
+	ZAddCh(key string, members ...Z) *IntCmd
+	ZAddNXCh(key string, members ...Z) *IntCmd
+	ZAddXXCh(key string, members ...Z) *IntCmd
+	ZIncr(key string, member Z) *FloatCmd
+	ZIncrNX(key string, member Z) *FloatCmd
+	ZIncrXX(key string, member Z) *FloatCmd
+	ZCard(key string) *IntCmd
+	ZCount(key, min, max string) *IntCmd
+	ZIncrBy(key string, increment float64, member string) *FloatCmd
+	ZInterStore(destination string, store ZStore, keys ...string) *IntCmd
+	ZRange(key string, start, stop int64) *StringSliceCmd
+	ZRangeWithScores(key string, start, stop int64) *ZSliceCmd
+	ZRangeByScore(key string, opt ZRangeBy) *StringSliceCmd
+	ZRangeByLex(key string, opt ZRangeBy) *StringSliceCmd
+	ZRangeByScoreWithScores(key string, opt ZRangeBy) *ZSliceCmd
+	ZRank(key, member string) *IntCmd
+	ZRem(key string, members ...interface{}) *IntCmd
+	ZRemRangeByRank(key string, start, stop int64) *IntCmd
+	ZRemRangeByScore(key, min, max string) *IntCmd
+	ZRemRangeByLex(key, min, max string) *IntCmd
+	ZRevRange(key string, start, stop int64) *StringSliceCmd
+	ZRevRangeWithScores(key string, start, stop int64) *ZSliceCmd
+	ZRevRangeByScore(key string, opt ZRangeBy) *StringSliceCmd
+	ZRevRangeByLex(key string, opt ZRangeBy) *StringSliceCmd
+	ZRevRangeByScoreWithScores(key string, opt ZRangeBy) *ZSliceCmd
+	ZRevRank(key, member string) *IntCmd
+	ZScore(key, member string) *FloatCmd
+	ZUnionStore(dest string, store ZStore, keys ...string) *IntCmd
+	PFAdd(key string, els ...interface{}) *IntCmd
+	PFCount(keys ...string) *IntCmd
+	PFMerge(dest string, keys ...string) *StatusCmd
+	BgRewriteAOF() *StatusCmd
+	BgSave() *StatusCmd
+	ClientKill(ipPort string) *StatusCmd
+	ClientList() *StringCmd
+	ClientPause(dur time.Duration) *BoolCmd
+	ConfigGet(parameter string) *SliceCmd
+	ConfigResetStat() *StatusCmd
+	ConfigSet(parameter, value string) *StatusCmd
+	DbSize() *IntCmd
+	FlushAll() *StatusCmd
+	FlushDb() *StatusCmd
+	Info(section ...string) *StringCmd
+	LastSave() *IntCmd
+	Save() *StatusCmd
+	Shutdown() *StatusCmd
+	ShutdownSave() *StatusCmd
+	ShutdownNoSave() *StatusCmd
+	SlaveOf(host, port string) *StatusCmd
+	Time() *TimeCmd
+	Eval(script string, keys []string, args ...interface{}) *Cmd
+	EvalSha(sha1 string, keys []string, args ...interface{}) *Cmd
+	ScriptExists(scripts ...string) *BoolSliceCmd
+	ScriptFlush() *StatusCmd
+	ScriptKill() *StatusCmd
+	ScriptLoad(script string) *StringCmd
+	DebugObject(key string) *StringCmd
+	PubSubChannels(pattern string) *StringSliceCmd
+	PubSubNumSub(channels ...string) *StringIntMapCmd
+	PubSubNumPat() *IntCmd
+	ClusterSlots() *ClusterSlotsCmd
+	ClusterNodes() *StringCmd
+	ClusterMeet(host, port string) *StatusCmd
+	ClusterForget(nodeID string) *StatusCmd
+	ClusterReplicate(nodeID string) *StatusCmd
+	ClusterResetSoft() *StatusCmd
+	ClusterResetHard() *StatusCmd
+	ClusterInfo() *StringCmd
+	ClusterKeySlot(key string) *IntCmd
+	ClusterCountFailureReports(nodeID string) *IntCmd
+	ClusterCountKeysInSlot(slot int) *IntCmd
+	ClusterDelSlots(slots ...int) *StatusCmd
+	ClusterDelSlotsRange(min, max int) *StatusCmd
+	ClusterSaveConfig() *StatusCmd
+	ClusterSlaves(nodeID string) *StringSliceCmd
+	ClusterFailover() *StatusCmd
+	ClusterAddSlots(slots ...int) *StatusCmd
+	ClusterAddSlotsRange(min, max int) *StatusCmd
+	GeoAdd(key string, geoLocation ...*GeoLocation) *IntCmd
+	GeoPos(key string, members ...string) *GeoPosCmd
+	GeoRadius(key string, longitude, latitude float64, query *GeoRadiusQuery) *GeoLocationCmd
+	GeoRadiusByMember(key, member string, query *GeoRadiusQuery) *GeoLocationCmd
+	GeoDist(key string, member1, member2, unit string) *FloatCmd
+	GeoHash(key string, members ...string) *StringSliceCmd
+	Command() *CommandsInfoCmd
+}
+
+var _ Cmdable = (*Client)(nil)
+var _ Cmdable = (*Tx)(nil)
+var _ Cmdable = (*Ring)(nil)
+var _ Cmdable = (*ClusterClient)(nil)
+
+type cmdable struct {
+	process func(cmd Cmder) error
+}
+
+type statefulCmdable struct {
+	process func(cmd Cmder) error
+}
+
+//------------------------------------------------------------------------------
+
+func (c *statefulCmdable) Auth(password string) *StatusCmd {
+	cmd := NewStatusCmd("auth", password)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) Echo(message interface{}) *StringCmd {
+	cmd := NewStringCmd("echo", message)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) Ping() *StatusCmd {
+	cmd := NewStatusCmd("ping")
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) Wait(numSlaves int, timeout time.Duration) *IntCmd {
+
+	cmd := NewIntCmd("wait", numSlaves, int(timeout/time.Millisecond))
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) Quit() *StatusCmd {
+	panic("not implemented")
+}
+
+func (c *statefulCmdable) Select(index int) *StatusCmd {
+	cmd := NewStatusCmd("select", index)
+	c.process(cmd)
+	return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c *cmdable) Del(keys ...string) *IntCmd {
+	args := make([]interface{}, 1+len(keys))
+	args[0] = "del"
+	for i, key := range keys {
+		args[1+i] = key
+	}
+	cmd := NewIntCmd(args...)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) Unlink(keys ...string) *IntCmd {
+	args := make([]interface{}, 1+len(keys))
+	args[0] = "unlink"
+	for i, key := range keys {
+		args[1+i] = key
+	}
+	cmd := NewIntCmd(args...)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) Dump(key string) *StringCmd {
+	cmd := NewStringCmd("dump", key)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) Exists(key string) *BoolCmd {
+	cmd := NewBoolCmd("exists", key)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) ExistsMulti(keys ...string) *IntCmd {
+	args := make([]interface{}, 1+len(keys))
+	args[0] = "exists"
+	for i, key := range keys {
+		args[1+i] = key
+	}
+	cmd := NewIntCmd(args...)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) Expire(key string, expiration time.Duration) *BoolCmd {
+	cmd := NewBoolCmd("expire", key, formatSec(expiration))
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) ExpireAt(key string, tm time.Time) *BoolCmd {
+	cmd := NewBoolCmd("expireat", key, tm.Unix())
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) Keys(pattern string) *StringSliceCmd {
+	cmd := NewStringSliceCmd("keys", pattern)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) Migrate(host, port, key string, db int64, timeout time.Duration) *StatusCmd {
+	cmd := NewStatusCmd(
+		"migrate",
+		host,
+		port,
+		key,
+		db,
+		formatMs(timeout),
+	)
+	cmd.setReadTimeout(readTimeout(timeout))
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) Move(key string, db int64) *BoolCmd {
+	cmd := NewBoolCmd("move", key, db)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) ObjectRefCount(key string) *IntCmd {
+	cmd := NewIntCmd("object", "refcount", key)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) ObjectEncoding(key string) *StringCmd {
+	cmd := NewStringCmd("object", "encoding", key)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) ObjectIdleTime(key string) *DurationCmd {
+	cmd := NewDurationCmd(time.Second, "object", "idletime", key)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) Persist(key string) *BoolCmd {
+	cmd := NewBoolCmd("persist", key)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) PExpire(key string, expiration time.Duration) *BoolCmd {
+	cmd := NewBoolCmd("pexpire", key, formatMs(expiration))
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) PExpireAt(key string, tm time.Time) *BoolCmd {
+	cmd := NewBoolCmd(
+		"pexpireat",
+		key,
+		tm.UnixNano()/int64(time.Millisecond),
+	)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) PTTL(key string) *DurationCmd {
+	cmd := NewDurationCmd(time.Millisecond, "pttl", key)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) RandomKey() *StringCmd {
+	cmd := NewStringCmd("randomkey")
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) Rename(key, newkey string) *StatusCmd {
+	cmd := NewStatusCmd("rename", key, newkey)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) RenameNX(key, newkey string) *BoolCmd {
+	cmd := NewBoolCmd("renamenx", key, newkey)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) Restore(key string, ttl time.Duration, value string) *StatusCmd {
+	cmd := NewStatusCmd(
+		"restore",
+		key,
+		formatMs(ttl),
+		value,
+	)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) RestoreReplace(key string, ttl time.Duration, value string) *StatusCmd {
+	cmd := NewStatusCmd(
+		"restore",
+		key,
+		formatMs(ttl),
+		value,
+		"replace",
+	)
+	c.process(cmd)
+	return cmd
+}
+
+type Sort struct {
+	By            string
+	Offset, Count float64
+	Get           []string
+	Order         string
+	IsAlpha       bool
+	Store         string
+}
+
+func (sort *Sort) args(key string) []interface{} {
+	args := []interface{}{"sort", key}
+	if sort.By != "" {
+		args = append(args, "by", sort.By)
+	}
+	if sort.Offset != 0 || sort.Count != 0 {
+		args = append(args, "limit", sort.Offset, sort.Count)
+	}
+	for _, get := range sort.Get {
+		args = append(args, "get", get)
+	}
+	if sort.Order != "" {
+		args = append(args, sort.Order)
+	}
+	if sort.IsAlpha {
+		args = append(args, "alpha")
+	}
+	if sort.Store != "" {
+		args = append(args, "store", sort.Store)
+	}
+	return args
+}
+
+func (c *cmdable) Sort(key string, sort Sort) *StringSliceCmd {
+	cmd := NewStringSliceCmd(sort.args(key)...)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) SortInterfaces(key string, sort Sort) *SliceCmd {
+	cmd := NewSliceCmd(sort.args(key)...)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) TTL(key string) *DurationCmd {
+	cmd := NewDurationCmd(time.Second, "ttl", key)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) Type(key string) *StatusCmd {
+	cmd := NewStatusCmd("type", key)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) Scan(cursor uint64, match string, count int64) *ScanCmd {
+	args := []interface{}{"scan", cursor}
+	if match != "" {
+		args = append(args, "match", match)
+	}
+	if count > 0 {
+		args = append(args, "count", count)
+	}
+	cmd := NewScanCmd(c.process, args...)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) SScan(key string, cursor uint64, match string, count int64) *ScanCmd {
+	args := []interface{}{"sscan", key, cursor}
+	if match != "" {
+		args = append(args, "match", match)
+	}
+	if count > 0 {
+		args = append(args, "count", count)
+	}
+	cmd := NewScanCmd(c.process, args...)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) HScan(key string, cursor uint64, match string, count int64) *ScanCmd {
+	args := []interface{}{"hscan", key, cursor}
+	if match != "" {
+		args = append(args, "match", match)
+	}
+	if count > 0 {
+		args = append(args, "count", count)
+	}
+	cmd := NewScanCmd(c.process, args...)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) ZScan(key string, cursor uint64, match string, count int64) *ScanCmd {
+	args := []interface{}{"zscan", key, cursor}
+	if match != "" {
+		args = append(args, "match", match)
+	}
+	if count > 0 {
+		args = append(args, "count", count)
+	}
+	cmd := NewScanCmd(c.process, args...)
+	c.process(cmd)
+	return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c *cmdable) Append(key, value string) *IntCmd {
+	cmd := NewIntCmd("append", key, value)
+	c.process(cmd)
+	return cmd
+}
+
+type BitCount struct {
+	Start, End int64
+}
+
+func (c *cmdable) BitCount(key string, bitCount *BitCount) *IntCmd {
+	args := []interface{}{"bitcount", key}
+	if bitCount != nil {
+		args = append(
+			args,
+			bitCount.Start,
+			bitCount.End,
+		)
+	}
+	cmd := NewIntCmd(args...)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) bitOp(op, destKey string, keys ...string) *IntCmd {
+	args := make([]interface{}, 3+len(keys))
+	args[0] = "bitop"
+	args[1] = op
+	args[2] = destKey
+	for i, key := range keys {
+		args[3+i] = key
+	}
+	cmd := NewIntCmd(args...)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) BitOpAnd(destKey string, keys ...string) *IntCmd {
+	return c.bitOp("and", destKey, keys...)
+}
+
+func (c *cmdable) BitOpOr(destKey string, keys ...string) *IntCmd {
+	return c.bitOp("or", destKey, keys...)
+}
+
+func (c *cmdable) BitOpXor(destKey string, keys ...string) *IntCmd {
+	return c.bitOp("xor", destKey, keys...)
+}
+
+func (c *cmdable) BitOpNot(destKey string, key string) *IntCmd {
+	return c.bitOp("not", destKey, key)
+}
+
+func (c *cmdable) BitPos(key string, bit int64, pos ...int64) *IntCmd {
+	args := make([]interface{}, 3+len(pos))
+	args[0] = "bitpos"
+	args[1] = key
+	args[2] = bit
+	switch len(pos) {
+	case 0:
+	case 1:
+		args[3] = pos[0]
+	case 2:
+		args[3] = pos[0]
+		args[4] = pos[1]
+	default:
+		panic("too many arguments")
+	}
+	cmd := NewIntCmd(args...)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) Decr(key string) *IntCmd {
+	cmd := NewIntCmd("decr", key)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) DecrBy(key string, decrement int64) *IntCmd {
+	cmd := NewIntCmd("decrby", key, decrement)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) Get(key string) *StringCmd {
+	cmd := NewStringCmd("get", key)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) GetBit(key string, offset int64) *IntCmd {
+	cmd := NewIntCmd("getbit", key, offset)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) GetRange(key string, start, end int64) *StringCmd {
+	cmd := NewStringCmd("getrange", key, start, end)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) GetSet(key string, value interface{}) *StringCmd {
+	cmd := NewStringCmd("getset", key, value)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) Incr(key string) *IntCmd {
+	cmd := NewIntCmd("incr", key)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) IncrBy(key string, value int64) *IntCmd {
+	cmd := NewIntCmd("incrby", key, value)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) IncrByFloat(key string, value float64) *FloatCmd {
+	cmd := NewFloatCmd("incrbyfloat", key, value)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) MGet(keys ...string) *SliceCmd {
+	args := make([]interface{}, 1+len(keys))
+	args[0] = "mget"
+	for i, key := range keys {
+		args[1+i] = key
+	}
+	cmd := NewSliceCmd(args...)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) MSet(pairs ...interface{}) *StatusCmd {
+	args := make([]interface{}, 1+len(pairs))
+	args[0] = "mset"
+	for i, pair := range pairs {
+		args[1+i] = pair
+	}
+	cmd := NewStatusCmd(args...)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) MSetNX(pairs ...interface{}) *BoolCmd {
+	args := make([]interface{}, 1+len(pairs))
+	args[0] = "msetnx"
+	for i, pair := range pairs {
+		args[1+i] = pair
+	}
+	cmd := NewBoolCmd(args...)
+	c.process(cmd)
+	return cmd
+}
+
+// Redis `SET key value [expiration]` command.
+//
+// Use expiration for `SETEX`-like behavior.
+// Zero expiration means the key has no expiration time.
+func (c *cmdable) Set(key string, value interface{}, expiration time.Duration) *StatusCmd {
+	args := make([]interface{}, 3, 4)
+	args[0] = "set"
+	args[1] = key
+	args[2] = value
+	if expiration > 0 {
+		if usePrecise(expiration) {
+			args = append(args, "px", formatMs(expiration))
+		} else {
+			args = append(args, "ex", formatSec(expiration))
+		}
+	}
+	cmd := NewStatusCmd(args...)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) SetBit(key string, offset int64, value int) *IntCmd {
+	cmd := NewIntCmd(
+		"setbit",
+		key,
+		offset,
+		value,
+	)
+	c.process(cmd)
+	return cmd
+}
+
+// Redis `SET key value [expiration] NX` command.
+//
+// Zero expiration means the key has no expiration time.
+func (c *cmdable) SetNX(key string, value interface{}, expiration time.Duration) *BoolCmd {
+	var cmd *BoolCmd
+	if expiration == 0 {
+		// Use old `SETNX` to support old Redis versions.
+		cmd = NewBoolCmd("setnx", key, value)
+	} else {
+		if usePrecise(expiration) {
+			cmd = NewBoolCmd("set", key, value, "px", formatMs(expiration), "nx")
+		} else {
+			cmd = NewBoolCmd("set", key, value, "ex", formatSec(expiration), "nx")
+		}
+	}
+	c.process(cmd)
+	return cmd
+}
+
+// Redis `SET key value [expiration] XX` command.
+//
+// Zero expiration means the key has no expiration time.
+func (c *cmdable) SetXX(key string, value interface{}, expiration time.Duration) *BoolCmd {
+	var cmd *BoolCmd
+	if expiration == 0 {
+		cmd = NewBoolCmd("set", key, value, "xx")
+	} else {
+		if usePrecise(expiration) {
+			cmd = NewBoolCmd("set", key, value, "px", formatMs(expiration), "xx")
+		} else {
+			cmd = NewBoolCmd("set", key, value, "ex", formatSec(expiration), "xx")
+		}
+	}
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) SetRange(key string, offset int64, value string) *IntCmd {
+	cmd := NewIntCmd("setrange", key, offset, value)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) StrLen(key string) *IntCmd {
+	cmd := NewIntCmd("strlen", key)
+	c.process(cmd)
+	return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c *cmdable) HDel(key string, fields ...string) *IntCmd {
+	args := make([]interface{}, 2+len(fields))
+	args[0] = "hdel"
+	args[1] = key
+	for i, field := range fields {
+		args[2+i] = field
+	}
+	cmd := NewIntCmd(args...)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) HExists(key, field string) *BoolCmd {
+	cmd := NewBoolCmd("hexists", key, field)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) HGet(key, field string) *StringCmd {
+	cmd := NewStringCmd("hget", key, field)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) HGetAll(key string) *StringStringMapCmd {
+	cmd := NewStringStringMapCmd("hgetall", key)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) HIncrBy(key, field string, incr int64) *IntCmd {
+	cmd := NewIntCmd("hincrby", key, field, incr)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) HIncrByFloat(key, field string, incr float64) *FloatCmd {
+	cmd := NewFloatCmd("hincrbyfloat", key, field, incr)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) HKeys(key string) *StringSliceCmd {
+	cmd := NewStringSliceCmd("hkeys", key)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) HLen(key string) *IntCmd {
+	cmd := NewIntCmd("hlen", key)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) HMGet(key string, fields ...string) *SliceCmd {
+	args := make([]interface{}, 2+len(fields))
+	args[0] = "hmget"
+	args[1] = key
+	for i, field := range fields {
+		args[2+i] = field
+	}
+	cmd := NewSliceCmd(args...)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) HMSet(key string, fields map[string]string) *StatusCmd {
+	args := make([]interface{}, 2+len(fields)*2)
+	args[0] = "hmset"
+	args[1] = key
+	i := 2
+	for k, v := range fields {
+		args[i] = k
+		args[i+1] = v
+		i += 2
+	}
+	cmd := NewStatusCmd(args...)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) HSet(key, field string, value interface{}) *BoolCmd {
+	cmd := NewBoolCmd("hset", key, field, value)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) HSetNX(key, field string, value interface{}) *BoolCmd {
+	cmd := NewBoolCmd("hsetnx", key, field, value)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) HVals(key string) *StringSliceCmd {
+	cmd := NewStringSliceCmd("hvals", key)
+	c.process(cmd)
+	return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c *cmdable) BLPop(timeout time.Duration, keys ...string) *StringSliceCmd {
+	args := make([]interface{}, 1+len(keys)+1)
+	args[0] = "blpop"
+	for i, key := range keys {
+		args[1+i] = key
+	}
+	args[len(args)-1] = formatSec(timeout)
+	cmd := NewStringSliceCmd(args...)
+	cmd.setReadTimeout(readTimeout(timeout))
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) BRPop(timeout time.Duration, keys ...string) *StringSliceCmd {
+	args := make([]interface{}, 1+len(keys)+1)
+	args[0] = "brpop"
+	for i, key := range keys {
+		args[1+i] = key
+	}
+	args[len(keys)+1] = formatSec(timeout)
+	cmd := NewStringSliceCmd(args...)
+	cmd.setReadTimeout(readTimeout(timeout))
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) BRPopLPush(source, destination string, timeout time.Duration) *StringCmd {
+	cmd := NewStringCmd(
+		"brpoplpush",
+		source,
+		destination,
+		formatSec(timeout),
+	)
+	cmd.setReadTimeout(readTimeout(timeout))
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) LIndex(key string, index int64) *StringCmd {
+	cmd := NewStringCmd("lindex", key, index)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) LInsert(key, op string, pivot, value interface{}) *IntCmd {
+	cmd := NewIntCmd("linsert", key, op, pivot, value)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) LInsertBefore(key string, pivot, value interface{}) *IntCmd {
+	cmd := NewIntCmd("linsert", key, "before", pivot, value)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) LInsertAfter(key string, pivot, value interface{}) *IntCmd {
+	cmd := NewIntCmd("linsert", key, "after", pivot, value)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) LLen(key string) *IntCmd {
+	cmd := NewIntCmd("llen", key)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) LPop(key string) *StringCmd {
+	cmd := NewStringCmd("lpop", key)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) LPush(key string, values ...interface{}) *IntCmd {
+	args := make([]interface{}, 2+len(values))
+	args[0] = "lpush"
+	args[1] = key
+	for i, value := range values {
+		args[2+i] = value
+	}
+	cmd := NewIntCmd(args...)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) LPushX(key string, value interface{}) *IntCmd {
+	cmd := NewIntCmd("lpushx", key, value)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) LRange(key string, start, stop int64) *StringSliceCmd {
+	cmd := NewStringSliceCmd(
+		"lrange",
+		key,
+		start,
+		stop,
+	)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) LRem(key string, count int64, value interface{}) *IntCmd {
+	cmd := NewIntCmd("lrem", key, count, value)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) LSet(key string, index int64, value interface{}) *StatusCmd {
+	cmd := NewStatusCmd("lset", key, index, value)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) LTrim(key string, start, stop int64) *StatusCmd {
+	cmd := NewStatusCmd(
+		"ltrim",
+		key,
+		start,
+		stop,
+	)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) RPop(key string) *StringCmd {
+	cmd := NewStringCmd("rpop", key)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) RPopLPush(source, destination string) *StringCmd {
+	cmd := NewStringCmd("rpoplpush", source, destination)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) RPush(key string, values ...interface{}) *IntCmd {
+	args := make([]interface{}, 2+len(values))
+	args[0] = "rpush"
+	args[1] = key
+	for i, value := range values {
+		args[2+i] = value
+	}
+	cmd := NewIntCmd(args...)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) RPushX(key string, value interface{}) *IntCmd {
+	cmd := NewIntCmd("rpushx", key, value)
+	c.process(cmd)
+	return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c *cmdable) SAdd(key string, members ...interface{}) *IntCmd {
+	args := make([]interface{}, 2+len(members))
+	args[0] = "sadd"
+	args[1] = key
+	for i, member := range members {
+		args[2+i] = member
+	}
+	cmd := NewIntCmd(args...)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) SCard(key string) *IntCmd {
+	cmd := NewIntCmd("scard", key)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) SDiff(keys ...string) *StringSliceCmd {
+	args := make([]interface{}, 1+len(keys))
+	args[0] = "sdiff"
+	for i, key := range keys {
+		args[1+i] = key
+	}
+	cmd := NewStringSliceCmd(args...)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) SDiffStore(destination string, keys ...string) *IntCmd {
+	args := make([]interface{}, 2+len(keys))
+	args[0] = "sdiffstore"
+	args[1] = destination
+	for i, key := range keys {
+		args[2+i] = key
+	}
+	cmd := NewIntCmd(args...)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) SInter(keys ...string) *StringSliceCmd {
+	args := make([]interface{}, 1+len(keys))
+	args[0] = "sinter"
+	for i, key := range keys {
+		args[1+i] = key
+	}
+	cmd := NewStringSliceCmd(args...)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) SInterStore(destination string, keys ...string) *IntCmd {
+	args := make([]interface{}, 2+len(keys))
+	args[0] = "sinterstore"
+	args[1] = destination
+	for i, key := range keys {
+		args[2+i] = key
+	}
+	cmd := NewIntCmd(args...)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) SIsMember(key string, member interface{}) *BoolCmd {
+	cmd := NewBoolCmd("sismember", key, member)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) SMembers(key string) *StringSliceCmd {
+	cmd := NewStringSliceCmd("smembers", key)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) SMove(source, destination string, member interface{}) *BoolCmd {
+	cmd := NewBoolCmd("smove", source, destination, member)
+	c.process(cmd)
+	return cmd
+}
+
+// Redis `SPOP key` command.
+func (c *cmdable) SPop(key string) *StringCmd {
+	cmd := NewStringCmd("spop", key)
+	c.process(cmd)
+	return cmd
+}
+
+// Redis `SPOP key count` command.
+func (c *cmdable) SPopN(key string, count int64) *StringSliceCmd {
+	cmd := NewStringSliceCmd("spop", key, count)
+	c.process(cmd)
+	return cmd
+}
+
+// Redis `SRANDMEMBER key` command.
+func (c *cmdable) SRandMember(key string) *StringCmd {
+	cmd := NewStringCmd("srandmember", key)
+	c.process(cmd)
+	return cmd
+}
+
+// Redis `SRANDMEMBER key count` command.
+func (c *cmdable) SRandMemberN(key string, count int64) *StringSliceCmd {
+	cmd := NewStringSliceCmd("srandmember", key, count)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) SRem(key string, members ...interface{}) *IntCmd {
+	args := make([]interface{}, 2+len(members))
+	args[0] = "srem"
+	args[1] = key
+	for i, member := range members {
+		args[2+i] = member
+	}
+	cmd := NewIntCmd(args...)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) SUnion(keys ...string) *StringSliceCmd {
+	args := make([]interface{}, 1+len(keys))
+	args[0] = "sunion"
+	for i, key := range keys {
+		args[1+i] = key
+	}
+	cmd := NewStringSliceCmd(args...)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) SUnionStore(destination string, keys ...string) *IntCmd {
+	args := make([]interface{}, 2+len(keys))
+	args[0] = "sunionstore"
+	args[1] = destination
+	for i, key := range keys {
+		args[2+i] = key
+	}
+	cmd := NewIntCmd(args...)
+	c.process(cmd)
+	return cmd
+}
+
+//------------------------------------------------------------------------------
+
+// Z represents sorted set member.
+type Z struct {
+	Score  float64
+	Member interface{}
+}
+
+// ZStore is used as an arg to ZInterStore and ZUnionStore.
+type ZStore struct {
+	Weights []float64
+	// Can be SUM, MIN or MAX.
+	Aggregate string
+}
+
+func (c *cmdable) zAdd(a []interface{}, n int, members ...Z) *IntCmd {
+	for i, m := range members {
+		a[n+2*i] = m.Score
+		a[n+2*i+1] = m.Member
+	}
+	cmd := NewIntCmd(a...)
+	c.process(cmd)
+	return cmd
+}
+
+// Redis `ZADD key score member [score member ...]` command.
+func (c *cmdable) ZAdd(key string, members ...Z) *IntCmd {
+	const n = 2
+	a := make([]interface{}, n+2*len(members))
+	a[0], a[1] = "zadd", key
+	return c.zAdd(a, n, members...)
+}
+
+// Redis `ZADD key NX score member [score member ...]` command.
+func (c *cmdable) ZAddNX(key string, members ...Z) *IntCmd {
+	const n = 3
+	a := make([]interface{}, n+2*len(members))
+	a[0], a[1], a[2] = "zadd", key, "nx"
+	return c.zAdd(a, n, members...)
+}
+
+// Redis `ZADD key XX score member [score member ...]` command.
+func (c *cmdable) ZAddXX(key string, members ...Z) *IntCmd {
+	const n = 3
+	a := make([]interface{}, n+2*len(members))
+	a[0], a[1], a[2] = "zadd", key, "xx"
+	return c.zAdd(a, n, members...)
+}
+
+// Redis `ZADD key CH score member [score member ...]` command.
+func (c *cmdable) ZAddCh(key string, members ...Z) *IntCmd {
+	const n = 3
+	a := make([]interface{}, n+2*len(members))
+	a[0], a[1], a[2] = "zadd", key, "ch"
+	return c.zAdd(a, n, members...)
+}
+
+// Redis `ZADD key NX CH score member [score member ...]` command.
+func (c *cmdable) ZAddNXCh(key string, members ...Z) *IntCmd {
+	const n = 4
+	a := make([]interface{}, n+2*len(members))
+	a[0], a[1], a[2], a[3] = "zadd", key, "nx", "ch"
+	return c.zAdd(a, n, members...)
+}
+
+// Redis `ZADD key XX CH score member [score member ...]` command.
+func (c *cmdable) ZAddXXCh(key string, members ...Z) *IntCmd {
+	const n = 4
+	a := make([]interface{}, n+2*len(members))
+	a[0], a[1], a[2], a[3] = "zadd", key, "xx", "ch"
+	return c.zAdd(a, n, members...)
+}
+
+func (c *cmdable) zIncr(a []interface{}, n int, members ...Z) *FloatCmd {
+	for i, m := range members {
+		a[n+2*i] = m.Score
+		a[n+2*i+1] = m.Member
+	}
+	cmd := NewFloatCmd(a...)
+	c.process(cmd)
+	return cmd
+}
+
+// Redis `ZADD key INCR score member` command.
+func (c *cmdable) ZIncr(key string, member Z) *FloatCmd {
+	const n = 3
+	a := make([]interface{}, n+2)
+	a[0], a[1], a[2] = "zadd", key, "incr"
+	return c.zIncr(a, n, member)
+}
+
+// Redis `ZADD key NX INCR score member` command.
+func (c *cmdable) ZIncrNX(key string, member Z) *FloatCmd {
+	const n = 4
+	a := make([]interface{}, n+2)
+	a[0], a[1], a[2], a[3] = "zadd", key, "incr", "nx"
+	return c.zIncr(a, n, member)
+}
+
+// Redis `ZADD key XX INCR score member` command.
+func (c *cmdable) ZIncrXX(key string, member Z) *FloatCmd {
+	const n = 4
+	a := make([]interface{}, n+2)
+	a[0], a[1], a[2], a[3] = "zadd", key, "incr", "xx"
+	return c.zIncr(a, n, member)
+}
+
+func (c *cmdable) ZCard(key string) *IntCmd {
+	cmd := NewIntCmd("zcard", key)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) ZCount(key, min, max string) *IntCmd {
+	cmd := NewIntCmd("zcount", key, min, max)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) ZIncrBy(key string, increment float64, member string) *FloatCmd {
+	cmd := NewFloatCmd("zincrby", key, increment, member)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) ZInterStore(destination string, store ZStore, keys ...string) *IntCmd {
+	args := make([]interface{}, 3+len(keys))
+	args[0] = "zinterstore"
+	args[1] = destination
+	args[2] = strconv.Itoa(len(keys))
+	for i, key := range keys {
+		args[3+i] = key
+	}
+	if len(store.Weights) > 0 {
+		args = append(args, "weights")
+		for _, weight := range store.Weights {
+			args = append(args, weight)
+		}
+	}
+	if store.Aggregate != "" {
+		args = append(args, "aggregate", store.Aggregate)
+	}
+	cmd := NewIntCmd(args...)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) zRange(key string, start, stop int64, withScores bool) *StringSliceCmd {
+	args := []interface{}{
+		"zrange",
+		key,
+		start,
+		stop,
+	}
+	if withScores {
+		args = append(args, "withscores")
+	}
+	cmd := NewStringSliceCmd(args...)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) ZRange(key string, start, stop int64) *StringSliceCmd {
+	return c.zRange(key, start, stop, false)
+}
+
+func (c *cmdable) ZRangeWithScores(key string, start, stop int64) *ZSliceCmd {
+	cmd := NewZSliceCmd("zrange", key, start, stop, "withscores")
+	c.process(cmd)
+	return cmd
+}
+
+type ZRangeBy struct {
+	Min, Max      string
+	Offset, Count int64
+}
+
+func (c *cmdable) zRangeBy(zcmd, key string, opt ZRangeBy, withScores bool) *StringSliceCmd {
+	args := []interface{}{zcmd, key, opt.Min, opt.Max}
+	if withScores {
+		args = append(args, "withscores")
+	}
+	if opt.Offset != 0 || opt.Count != 0 {
+		args = append(
+			args,
+			"limit",
+			opt.Offset,
+			opt.Count,
+		)
+	}
+	cmd := NewStringSliceCmd(args...)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) ZRangeByScore(key string, opt ZRangeBy) *StringSliceCmd {
+	return c.zRangeBy("zrangebyscore", key, opt, false)
+}
+
+func (c *cmdable) ZRangeByLex(key string, opt ZRangeBy) *StringSliceCmd {
+	return c.zRangeBy("zrangebylex", key, opt, false)
+}
+
+func (c *cmdable) ZRangeByScoreWithScores(key string, opt ZRangeBy) *ZSliceCmd {
+	args := []interface{}{"zrangebyscore", key, opt.Min, opt.Max, "withscores"}
+	if opt.Offset != 0 || opt.Count != 0 {
+		args = append(
+			args,
+			"limit",
+			opt.Offset,
+			opt.Count,
+		)
+	}
+	cmd := NewZSliceCmd(args...)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) ZRank(key, member string) *IntCmd {
+	cmd := NewIntCmd("zrank", key, member)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) ZRem(key string, members ...interface{}) *IntCmd {
+	args := make([]interface{}, 2+len(members))
+	args[0] = "zrem"
+	args[1] = key
+	for i, member := range members {
+		args[2+i] = member
+	}
+	cmd := NewIntCmd(args...)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) ZRemRangeByRank(key string, start, stop int64) *IntCmd {
+	cmd := NewIntCmd(
+		"zremrangebyrank",
+		key,
+		start,
+		stop,
+	)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) ZRemRangeByScore(key, min, max string) *IntCmd {
+	cmd := NewIntCmd("zremrangebyscore", key, min, max)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) ZRemRangeByLex(key, min, max string) *IntCmd {
+	cmd := NewIntCmd("zremrangebylex", key, min, max)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) ZRevRange(key string, start, stop int64) *StringSliceCmd {
+	cmd := NewStringSliceCmd("zrevrange", key, start, stop)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) ZRevRangeWithScores(key string, start, stop int64) *ZSliceCmd {
+	cmd := NewZSliceCmd("zrevrange", key, start, stop, "withscores")
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) zRevRangeBy(zcmd, key string, opt ZRangeBy) *StringSliceCmd {
+	args := []interface{}{zcmd, key, opt.Max, opt.Min}
+	if opt.Offset != 0 || opt.Count != 0 {
+		args = append(
+			args,
+			"limit",
+			opt.Offset,
+			opt.Count,
+		)
+	}
+	cmd := NewStringSliceCmd(args...)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) ZRevRangeByScore(key string, opt ZRangeBy) *StringSliceCmd {
+	return c.zRevRangeBy("zrevrangebyscore", key, opt)
+}
+
+func (c *cmdable) ZRevRangeByLex(key string, opt ZRangeBy) *StringSliceCmd {
+	return c.zRevRangeBy("zrevrangebylex", key, opt)
+}
+
+func (c *cmdable) ZRevRangeByScoreWithScores(key string, opt ZRangeBy) *ZSliceCmd {
+	args := []interface{}{"zrevrangebyscore", key, opt.Max, opt.Min, "withscores"}
+	if opt.Offset != 0 || opt.Count != 0 {
+		args = append(
+			args,
+			"limit",
+			opt.Offset,
+			opt.Count,
+		)
+	}
+	cmd := NewZSliceCmd(args...)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) ZRevRank(key, member string) *IntCmd {
+	cmd := NewIntCmd("zrevrank", key, member)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) ZScore(key, member string) *FloatCmd {
+	cmd := NewFloatCmd("zscore", key, member)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) ZUnionStore(dest string, store ZStore, keys ...string) *IntCmd {
+	args := make([]interface{}, 3+len(keys))
+	args[0] = "zunionstore"
+	args[1] = dest
+	args[2] = strconv.Itoa(len(keys))
+	for i, key := range keys {
+		args[3+i] = key
+	}
+	if len(store.Weights) > 0 {
+		args = append(args, "weights")
+		for _, weight := range store.Weights {
+			args = append(args, weight)
+		}
+	}
+	if store.Aggregate != "" {
+		args = append(args, "aggregate", store.Aggregate)
+	}
+	cmd := NewIntCmd(args...)
+	c.process(cmd)
+	return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c *cmdable) PFAdd(key string, els ...interface{}) *IntCmd {
+	args := make([]interface{}, 2+len(els))
+	args[0] = "pfadd"
+	args[1] = key
+	for i, el := range els {
+		args[2+i] = el
+	}
+	cmd := NewIntCmd(args...)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) PFCount(keys ...string) *IntCmd {
+	args := make([]interface{}, 1+len(keys))
+	args[0] = "pfcount"
+	for i, key := range keys {
+		args[1+i] = key
+	}
+	cmd := NewIntCmd(args...)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) PFMerge(dest string, keys ...string) *StatusCmd {
+	args := make([]interface{}, 2+len(keys))
+	args[0] = "pfmerge"
+	args[1] = dest
+	for i, key := range keys {
+		args[2+i] = key
+	}
+	cmd := NewStatusCmd(args...)
+	c.process(cmd)
+	return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c *cmdable) BgRewriteAOF() *StatusCmd {
+	cmd := NewStatusCmd("bgrewriteaof")
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) BgSave() *StatusCmd {
+	cmd := NewStatusCmd("bgsave")
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) ClientKill(ipPort string) *StatusCmd {
+	cmd := NewStatusCmd("client", "kill", ipPort)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) ClientList() *StringCmd {
+	cmd := NewStringCmd("client", "list")
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) ClientPause(dur time.Duration) *BoolCmd {
+	cmd := NewBoolCmd("client", "pause", formatMs(dur))
+	c.process(cmd)
+	return cmd
+}
+
+// ClientSetName assigns a name to the connection.
+func (c *statefulCmdable) ClientSetName(name string) *BoolCmd {
+	cmd := NewBoolCmd("client", "setname", name)
+	c.process(cmd)
+	return cmd
+}
+
+// ClientGetName returns the name of the connection.
+func (c *statefulCmdable) ClientGetName() *StringCmd {
+	cmd := NewStringCmd("client", "getname")
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) ConfigGet(parameter string) *SliceCmd {
+	cmd := NewSliceCmd("config", "get", parameter)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) ConfigResetStat() *StatusCmd {
+	cmd := NewStatusCmd("config", "resetstat")
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) ConfigSet(parameter, value string) *StatusCmd {
+	cmd := NewStatusCmd("config", "set", parameter, value)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) DbSize() *IntCmd {
+	cmd := NewIntCmd("dbsize")
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) FlushAll() *StatusCmd {
+	cmd := NewStatusCmd("flushall")
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) FlushDb() *StatusCmd {
+	cmd := NewStatusCmd("flushdb")
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) Info(section ...string) *StringCmd {
+	args := []interface{}{"info"}
+	if len(section) > 0 {
+		args = append(args, section[0])
+	}
+	cmd := NewStringCmd(args...)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) LastSave() *IntCmd {
+	cmd := NewIntCmd("lastsave")
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) Save() *StatusCmd {
+	cmd := NewStatusCmd("save")
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) shutdown(modifier string) *StatusCmd {
+	var args []interface{}
+	if modifier == "" {
+		args = []interface{}{"shutdown"}
+	} else {
+		args = []interface{}{"shutdown", modifier}
+	}
+	cmd := NewStatusCmd(args...)
+	c.process(cmd)
+	if err := cmd.Err(); err != nil {
+		if err == io.EOF {
+			// Server quit as expected.
+			cmd.err = nil
+		}
+	} else {
+		// Server did not quit. String reply contains the reason.
+		cmd.err = internal.RedisError(cmd.val)
+		cmd.val = ""
+	}
+	return cmd
+}
+
+func (c *cmdable) Shutdown() *StatusCmd {
+	return c.shutdown("")
+}
+
+func (c *cmdable) ShutdownSave() *StatusCmd {
+	return c.shutdown("save")
+}
+
+func (c *cmdable) ShutdownNoSave() *StatusCmd {
+	return c.shutdown("nosave")
+}
+
+func (c *cmdable) SlaveOf(host, port string) *StatusCmd {
+	cmd := NewStatusCmd("slaveof", host, port)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) SlowLog() {
+	panic("not implemented")
+}
+
+func (c *cmdable) Sync() {
+	panic("not implemented")
+}
+
+func (c *cmdable) Time() *TimeCmd {
+	cmd := NewTimeCmd("time")
+	c.process(cmd)
+	return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c *cmdable) Eval(script string, keys []string, args ...interface{}) *Cmd {
+	cmdArgs := make([]interface{}, 3+len(keys)+len(args))
+	cmdArgs[0] = "eval"
+	cmdArgs[1] = script
+	cmdArgs[2] = strconv.Itoa(len(keys))
+	for i, key := range keys {
+		cmdArgs[3+i] = key
+	}
+	pos := 3 + len(keys)
+	for i, arg := range args {
+		cmdArgs[pos+i] = arg
+	}
+	cmd := NewCmd(cmdArgs...)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) EvalSha(sha1 string, keys []string, args ...interface{}) *Cmd {
+	cmdArgs := make([]interface{}, 3+len(keys)+len(args))
+	cmdArgs[0] = "evalsha"
+	cmdArgs[1] = sha1
+	cmdArgs[2] = strconv.Itoa(len(keys))
+	for i, key := range keys {
+		cmdArgs[3+i] = key
+	}
+	pos := 3 + len(keys)
+	for i, arg := range args {
+		cmdArgs[pos+i] = arg
+	}
+	cmd := NewCmd(cmdArgs...)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) ScriptExists(scripts ...string) *BoolSliceCmd {
+	args := make([]interface{}, 2+len(scripts))
+	args[0] = "script"
+	args[1] = "exists"
+	for i, script := range scripts {
+		args[2+i] = script
+	}
+	cmd := NewBoolSliceCmd(args...)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) ScriptFlush() *StatusCmd {
+	cmd := NewStatusCmd("script", "flush")
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) ScriptKill() *StatusCmd {
+	cmd := NewStatusCmd("script", "kill")
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) ScriptLoad(script string) *StringCmd {
+	cmd := NewStringCmd("script", "load", script)
+	c.process(cmd)
+	return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c *cmdable) DebugObject(key string) *StringCmd {
+	cmd := NewStringCmd("debug", "object", key)
+	c.process(cmd)
+	return cmd
+}
+
+//------------------------------------------------------------------------------
+
+// Publish posts the message to the channel.
+func (c *cmdable) Publish(channel, message string) *IntCmd {
+	cmd := NewIntCmd("PUBLISH", channel, message)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) PubSubChannels(pattern string) *StringSliceCmd {
+	args := []interface{}{"pubsub", "channels"}
+	if pattern != "*" {
+		args = append(args, pattern)
+	}
+	cmd := NewStringSliceCmd(args...)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) PubSubNumSub(channels ...string) *StringIntMapCmd {
+	args := make([]interface{}, 2+len(channels))
+	args[0] = "pubsub"
+	args[1] = "numsub"
+	for i, channel := range channels {
+		args[2+i] = channel
+	}
+	cmd := NewStringIntMapCmd(args...)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) PubSubNumPat() *IntCmd {
+	cmd := NewIntCmd("pubsub", "numpat")
+	c.process(cmd)
+	return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c *cmdable) ClusterSlots() *ClusterSlotsCmd {
+	cmd := NewClusterSlotsCmd("cluster", "slots")
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) ClusterNodes() *StringCmd {
+	cmd := NewStringCmd("cluster", "nodes")
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) ClusterMeet(host, port string) *StatusCmd {
+	cmd := NewStatusCmd("cluster", "meet", host, port)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) ClusterForget(nodeID string) *StatusCmd {
+	cmd := NewStatusCmd("cluster", "forget", nodeID)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) ClusterReplicate(nodeID string) *StatusCmd {
+	cmd := NewStatusCmd("cluster", "replicate", nodeID)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) ClusterResetSoft() *StatusCmd {
+	cmd := NewStatusCmd("cluster", "reset", "soft")
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) ClusterResetHard() *StatusCmd {
+	cmd := NewStatusCmd("cluster", "reset", "hard")
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) ClusterInfo() *StringCmd {
+	cmd := NewStringCmd("cluster", "info")
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) ClusterKeySlot(key string) *IntCmd {
+	cmd := NewIntCmd("cluster", "keyslot", key)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) ClusterCountFailureReports(nodeID string) *IntCmd {
+	cmd := NewIntCmd("cluster", "count-failure-reports", nodeID)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) ClusterCountKeysInSlot(slot int) *IntCmd {
+	cmd := NewIntCmd("cluster", "countkeysinslot", slot)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) ClusterDelSlots(slots ...int) *StatusCmd {
+	args := make([]interface{}, 2+len(slots))
+	args[0] = "cluster"
+	args[1] = "delslots"
+	for i, slot := range slots {
+		args[2+i] = slot
+	}
+	cmd := NewStatusCmd(args...)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) ClusterDelSlotsRange(min, max int) *StatusCmd {
+	size := max - min + 1
+	slots := make([]int, size)
+	for i := 0; i < size; i++ {
+		slots[i] = min + i
+	}
+	return c.ClusterDelSlots(slots...)
+}
+
+func (c *cmdable) ClusterSaveConfig() *StatusCmd {
+	cmd := NewStatusCmd("cluster", "saveconfig")
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) ClusterSlaves(nodeID string) *StringSliceCmd {
+	cmd := NewStringSliceCmd("cluster", "slaves", nodeID)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *statefulCmdable) ReadOnly() *StatusCmd {
+	cmd := NewStatusCmd("readonly")
+	c.process(cmd)
+	return cmd
+}
+
+func (c *statefulCmdable) ReadWrite() *StatusCmd {
+	cmd := NewStatusCmd("readwrite")
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) ClusterFailover() *StatusCmd {
+	cmd := NewStatusCmd("cluster", "failover")
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) ClusterAddSlots(slots ...int) *StatusCmd {
+	args := make([]interface{}, 2+len(slots))
+	args[0] = "cluster"
+	args[1] = "addslots"
+	for i, num := range slots {
+		args[2+i] = strconv.Itoa(num)
+	}
+	cmd := NewStatusCmd(args...)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) ClusterAddSlotsRange(min, max int) *StatusCmd {
+	size := max - min + 1
+	slots := make([]int, size)
+	for i := 0; i < size; i++ {
+		slots[i] = min + i
+	}
+	return c.ClusterAddSlots(slots...)
+}
+
+//------------------------------------------------------------------------------
+
+func (c *cmdable) GeoAdd(key string, geoLocation ...*GeoLocation) *IntCmd {
+	args := make([]interface{}, 2+3*len(geoLocation))
+	args[0] = "geoadd"
+	args[1] = key
+	for i, eachLoc := range geoLocation {
+		args[2+3*i] = eachLoc.Longitude
+		args[2+3*i+1] = eachLoc.Latitude
+		args[2+3*i+2] = eachLoc.Name
+	}
+	cmd := NewIntCmd(args...)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) GeoRadius(key string, longitude, latitude float64, query *GeoRadiusQuery) *GeoLocationCmd {
+	cmd := NewGeoLocationCmd(query, "georadius", key, longitude, latitude)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) GeoRadiusByMember(key, member string, query *GeoRadiusQuery) *GeoLocationCmd {
+	cmd := NewGeoLocationCmd(query, "georadiusbymember", key, member)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) GeoDist(key string, member1, member2, unit string) *FloatCmd {
+	if unit == "" {
+		unit = "km"
+	}
+	cmd := NewFloatCmd("geodist", key, member1, member2, unit)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) GeoHash(key string, members ...string) *StringSliceCmd {
+	args := make([]interface{}, 2+len(members))
+	args[0] = "geohash"
+	args[1] = key
+	for i, member := range members {
+		args[2+i] = member
+	}
+	cmd := NewStringSliceCmd(args...)
+	c.process(cmd)
+	return cmd
+}
+
+func (c *cmdable) GeoPos(key string, members ...string) *GeoPosCmd {
+	args := make([]interface{}, 2+len(members))
+	args[0] = "geopos"
+	args[1] = key
+	for i, member := range members {
+		args[2+i] = member
+	}
+	cmd := NewGeoPosCmd(args...)
+	c.process(cmd)
+	return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c *cmdable) Command() *CommandsInfoCmd {
+	cmd := NewCommandsInfoCmd("command")
+	c.process(cmd)
+	return cmd
+}

+ 0 - 0
vendor/gopkg.in/redis.v2/doc.go → vendor/gopkg.in/redis.v5/doc.go


+ 81 - 0
vendor/gopkg.in/redis.v5/internal/consistenthash/consistenthash.go

@@ -0,0 +1,81 @@
+/*
+Copyright 2013 Google Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package consistenthash provides an implementation of a ring hash.
+package consistenthash
+
+import (
+	"hash/crc32"
+	"sort"
+	"strconv"
+)
+
+type Hash func(data []byte) uint32
+
+type Map struct {
+	hash     Hash
+	replicas int
+	keys     []int // Sorted
+	hashMap  map[int]string
+}
+
+func New(replicas int, fn Hash) *Map {
+	m := &Map{
+		replicas: replicas,
+		hash:     fn,
+		hashMap:  make(map[int]string),
+	}
+	if m.hash == nil {
+		m.hash = crc32.ChecksumIEEE
+	}
+	return m
+}
+
+// Returns true if there are no items available.
+func (m *Map) IsEmpty() bool {
+	return len(m.keys) == 0
+}
+
+// Adds some keys to the hash.
+func (m *Map) Add(keys ...string) {
+	for _, key := range keys {
+		for i := 0; i < m.replicas; i++ {
+			hash := int(m.hash([]byte(strconv.Itoa(i) + key)))
+			m.keys = append(m.keys, hash)
+			m.hashMap[hash] = key
+		}
+	}
+	sort.Ints(m.keys)
+}
+
+// Gets the closest item in the hash to the provided key.
+func (m *Map) Get(key string) string {
+	if m.IsEmpty() {
+		return ""
+	}
+
+	hash := int(m.hash([]byte(key)))
+
+	// Binary search for appropriate replica.
+	idx := sort.Search(len(m.keys), func(i int) bool { return m.keys[i] >= hash })
+
+	// Means we have cycled back to the first replica.
+	if idx == len(m.keys) {
+		idx = 0
+	}
+
+	return m.hashMap[m.keys[idx]]
+}

+ 75 - 0
vendor/gopkg.in/redis.v5/internal/errors.go

@@ -0,0 +1,75 @@
+package internal
+
+import (
+	"io"
+	"net"
+	"strings"
+)
+
+const Nil = RedisError("redis: nil")
+
+type RedisError string
+
+func (e RedisError) Error() string { return string(e) }
+
+func IsRetryableError(err error) bool {
+	return IsNetworkError(err)
+}
+
+func IsInternalError(err error) bool {
+	_, ok := err.(RedisError)
+	return ok
+}
+
+func IsNetworkError(err error) bool {
+	if err == io.EOF {
+		return true
+	}
+	_, ok := err.(net.Error)
+	return ok
+}
+
+func IsBadConn(err error, allowTimeout bool) bool {
+	if err == nil {
+		return false
+	}
+	if IsInternalError(err) {
+		return false
+	}
+	if allowTimeout {
+		if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
+			return false
+		}
+	}
+	return true
+}
+
+func IsMovedError(err error) (moved bool, ask bool, addr string) {
+	if !IsInternalError(err) {
+		return
+	}
+
+	s := err.Error()
+	if strings.HasPrefix(s, "MOVED ") {
+		moved = true
+	} else if strings.HasPrefix(s, "ASK ") {
+		ask = true
+	} else {
+		return
+	}
+
+	ind := strings.LastIndex(s, " ")
+	if ind == -1 {
+		return false, false, ""
+	}
+	addr = s[ind+1:]
+	return
+}
+
+func IsLoadingError(err error) bool {
+	return strings.HasPrefix(err.Error(), "LOADING")
+}
+
+func IsExecAbortError(err error) bool {
+	return strings.HasPrefix(err.Error(), "EXECABORT")
+}

+ 73 - 0
vendor/gopkg.in/redis.v5/internal/hashtag/hashtag.go

@@ -0,0 +1,73 @@
+package hashtag
+
+import (
+	"math/rand"
+	"strings"
+)
+
+const SlotNumber = 16384
+
+// CRC16 implementation according to CCITT standards.
+// Copyright 2001-2010 Georges Menie (www.menie.org)
+// Copyright 2013 The Go Authors. All rights reserved.
+// http://redis.io/topics/cluster-spec#appendix-a-crc16-reference-implementation-in-ansi-c
+var crc16tab = [256]uint16{
+	0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7,
+	0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef,
+	0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6,
+	0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de,
+	0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485,
+	0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d,
+	0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4,
+	0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc,
+	0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823,
+	0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b,
+	0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12,
+	0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a,
+	0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41,
+	0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49,
+	0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70,
+	0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78,
+	0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f,
+	0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067,
+	0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e,
+	0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256,
+	0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d,
+	0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405,
+	0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c,
+	0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634,
+	0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab,
+	0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3,
+	0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a,
+	0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92,
+	0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9,
+	0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1,
+	0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8,
+	0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0,
+}
+
+func Key(key string) string {
+	if s := strings.IndexByte(key, '{'); s > -1 {
+		if e := strings.IndexByte(key[s+1:], '}'); e > 0 {
+			return key[s+1 : s+e+1]
+		}
+	}
+	return key
+}
+
+// hashSlot returns a consistent slot number between 0 and 16383
+// for any given string key.
+func Slot(key string) int {
+	key = Key(key)
+	if key == "" {
+		return rand.Intn(SlotNumber)
+	}
+	return int(crc16sum(key)) % SlotNumber
+}
+
+func crc16sum(key string) (crc uint16) {
+	for i := 0; i < len(key); i++ {
+		crc = (crc << 8) ^ crc16tab[(byte(crc>>8)^key[i])&0x00ff]
+	}
+	return
+}

+ 15 - 0
vendor/gopkg.in/redis.v5/internal/log.go

@@ -0,0 +1,15 @@
+package internal
+
+import (
+	"fmt"
+	"log"
+)
+
+var Logger *log.Logger
+
+func Logf(s string, args ...interface{}) {
+	if Logger == nil {
+		return
+	}
+	Logger.Output(2, fmt.Sprintf(s, args...))
+}

+ 78 - 0
vendor/gopkg.in/redis.v5/internal/pool/conn.go

@@ -0,0 +1,78 @@
+package pool
+
+import (
+	"net"
+	"sync/atomic"
+	"time"
+
+	"gopkg.in/redis.v5/internal/proto"
+)
+
+var noDeadline = time.Time{}
+
+type Conn struct {
+	netConn net.Conn
+
+	Rd *proto.Reader
+	Wb *proto.WriteBuffer
+
+	Inited bool
+	usedAt atomic.Value
+}
+
+func NewConn(netConn net.Conn) *Conn {
+	cn := &Conn{
+		netConn: netConn,
+		Wb:      proto.NewWriteBuffer(),
+	}
+	cn.Rd = proto.NewReader(cn.netConn)
+	cn.SetUsedAt(time.Now())
+	return cn
+}
+
+func (cn *Conn) UsedAt() time.Time {
+	return cn.usedAt.Load().(time.Time)
+}
+
+func (cn *Conn) SetUsedAt(tm time.Time) {
+	cn.usedAt.Store(tm)
+}
+
+func (cn *Conn) SetNetConn(netConn net.Conn) {
+	cn.netConn = netConn
+	cn.Rd.Reset(netConn)
+}
+
+func (cn *Conn) IsStale(timeout time.Duration) bool {
+	return timeout > 0 && time.Since(cn.UsedAt()) > timeout
+}
+
+func (cn *Conn) SetReadTimeout(timeout time.Duration) error {
+	now := time.Now()
+	cn.SetUsedAt(now)
+	if timeout > 0 {
+		return cn.netConn.SetReadDeadline(now.Add(timeout))
+	}
+	return cn.netConn.SetReadDeadline(noDeadline)
+}
+
+func (cn *Conn) SetWriteTimeout(timeout time.Duration) error {
+	now := time.Now()
+	cn.SetUsedAt(now)
+	if timeout > 0 {
+		return cn.netConn.SetWriteDeadline(now.Add(timeout))
+	}
+	return cn.netConn.SetWriteDeadline(noDeadline)
+}
+
+func (cn *Conn) Write(b []byte) (int, error) {
+	return cn.netConn.Write(b)
+}
+
+func (cn *Conn) RemoteAddr() net.Addr {
+	return cn.netConn.RemoteAddr()
+}
+
+func (cn *Conn) Close() error {
+	return cn.netConn.Close()
+}

+ 354 - 0
vendor/gopkg.in/redis.v5/internal/pool/pool.go

@@ -0,0 +1,354 @@
+package pool
+
+import (
+	"errors"
+	"fmt"
+	"net"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"gopkg.in/redis.v5/internal"
+)
+
+var (
+	ErrClosed      = errors.New("redis: client is closed")
+	ErrPoolTimeout = errors.New("redis: connection pool timeout")
+	errConnStale   = errors.New("connection is stale")
+)
+
+var timers = sync.Pool{
+	New: func() interface{} {
+		t := time.NewTimer(time.Hour)
+		t.Stop()
+		return t
+	},
+}
+
+// Stats contains pool state information and accumulated stats.
+type Stats struct {
+	Requests uint32 // number of times a connection was requested by the pool
+	Hits     uint32 // number of times free connection was found in the pool
+	Timeouts uint32 // number of times a wait timeout occurred
+
+	TotalConns uint32 // the number of total connections in the pool
+	FreeConns  uint32 // the number of free connections in the pool
+}
+
+type Pooler interface {
+	Get() (*Conn, bool, error)
+	Put(*Conn) error
+	Remove(*Conn, error) error
+	Len() int
+	FreeLen() int
+	Stats() *Stats
+	Close() error
+}
+
+type dialer func() (net.Conn, error)
+
+type ConnPool struct {
+	dial    dialer
+	OnClose func(*Conn) error
+
+	poolTimeout time.Duration
+	idleTimeout time.Duration
+
+	queue chan struct{}
+
+	connsMu sync.Mutex
+	conns   []*Conn
+
+	freeConnsMu sync.Mutex
+	freeConns   []*Conn
+
+	stats Stats
+
+	_closed int32 // atomic
+	lastErr atomic.Value
+}
+
+var _ Pooler = (*ConnPool)(nil)
+
+func NewConnPool(dial dialer, poolSize int, poolTimeout, idleTimeout, idleCheckFrequency time.Duration) *ConnPool {
+	p := &ConnPool{
+		dial: dial,
+
+		poolTimeout: poolTimeout,
+		idleTimeout: idleTimeout,
+
+		queue:     make(chan struct{}, poolSize),
+		conns:     make([]*Conn, 0, poolSize),
+		freeConns: make([]*Conn, 0, poolSize),
+	}
+	if idleTimeout > 0 && idleCheckFrequency > 0 {
+		go p.reaper(idleCheckFrequency)
+	}
+	return p
+}
+
+func (p *ConnPool) NewConn() (*Conn, error) {
+	netConn, err := p.dial()
+	if err != nil {
+		return nil, err
+	}
+	return NewConn(netConn), nil
+}
+
+func (p *ConnPool) PopFree() *Conn {
+	timer := timers.Get().(*time.Timer)
+	timer.Reset(p.poolTimeout)
+
+	select {
+	case p.queue <- struct{}{}:
+		if !timer.Stop() {
+			<-timer.C
+		}
+		timers.Put(timer)
+	case <-timer.C:
+		timers.Put(timer)
+		atomic.AddUint32(&p.stats.Timeouts, 1)
+		return nil
+	}
+
+	p.freeConnsMu.Lock()
+	cn := p.popFree()
+	p.freeConnsMu.Unlock()
+
+	if cn == nil {
+		<-p.queue
+	}
+	return cn
+}
+
+func (p *ConnPool) popFree() *Conn {
+	if len(p.freeConns) == 0 {
+		return nil
+	}
+
+	idx := len(p.freeConns) - 1
+	cn := p.freeConns[idx]
+	p.freeConns = p.freeConns[:idx]
+	return cn
+}
+
+// Get returns existed connection from the pool or creates a new one.
+func (p *ConnPool) Get() (*Conn, bool, error) {
+	if p.closed() {
+		return nil, false, ErrClosed
+	}
+
+	atomic.AddUint32(&p.stats.Requests, 1)
+
+	timer := timers.Get().(*time.Timer)
+	timer.Reset(p.poolTimeout)
+
+	select {
+	case p.queue <- struct{}{}:
+		if !timer.Stop() {
+			<-timer.C
+		}
+		timers.Put(timer)
+	case <-timer.C:
+		timers.Put(timer)
+		atomic.AddUint32(&p.stats.Timeouts, 1)
+		return nil, false, ErrPoolTimeout
+	}
+
+	for {
+		p.freeConnsMu.Lock()
+		cn := p.popFree()
+		p.freeConnsMu.Unlock()
+
+		if cn == nil {
+			break
+		}
+
+		if cn.IsStale(p.idleTimeout) {
+			p.remove(cn, errConnStale)
+			continue
+		}
+
+		atomic.AddUint32(&p.stats.Hits, 1)
+		return cn, false, nil
+	}
+
+	newcn, err := p.NewConn()
+	if err != nil {
+		<-p.queue
+		return nil, false, err
+	}
+
+	p.connsMu.Lock()
+	p.conns = append(p.conns, newcn)
+	p.connsMu.Unlock()
+
+	return newcn, true, nil
+}
+
+func (p *ConnPool) Put(cn *Conn) error {
+	if data := cn.Rd.PeekBuffered(); data != nil {
+		err := fmt.Errorf("connection has unread data: %q", data)
+		internal.Logf(err.Error())
+		return p.Remove(cn, err)
+	}
+	p.freeConnsMu.Lock()
+	p.freeConns = append(p.freeConns, cn)
+	p.freeConnsMu.Unlock()
+	<-p.queue
+	return nil
+}
+
+func (p *ConnPool) Remove(cn *Conn, reason error) error {
+	p.remove(cn, reason)
+	<-p.queue
+	return nil
+}
+
+func (p *ConnPool) remove(cn *Conn, reason error) {
+	_ = p.closeConn(cn, reason)
+
+	p.connsMu.Lock()
+	for i, c := range p.conns {
+		if c == cn {
+			p.conns = append(p.conns[:i], p.conns[i+1:]...)
+			break
+		}
+	}
+	p.connsMu.Unlock()
+}
+
+// Len returns total number of connections.
+func (p *ConnPool) Len() int {
+	p.connsMu.Lock()
+	l := len(p.conns)
+	p.connsMu.Unlock()
+	return l
+}
+
+// FreeLen returns number of free connections.
+func (p *ConnPool) FreeLen() int {
+	p.freeConnsMu.Lock()
+	l := len(p.freeConns)
+	p.freeConnsMu.Unlock()
+	return l
+}
+
+func (p *ConnPool) Stats() *Stats {
+	return &Stats{
+		Requests:   atomic.LoadUint32(&p.stats.Requests),
+		Hits:       atomic.LoadUint32(&p.stats.Hits),
+		Timeouts:   atomic.LoadUint32(&p.stats.Timeouts),
+		TotalConns: uint32(p.Len()),
+		FreeConns:  uint32(p.FreeLen()),
+	}
+}
+
+func (p *ConnPool) closed() bool {
+	return atomic.LoadInt32(&p._closed) == 1
+}
+
+func (p *ConnPool) Close() error {
+	if !atomic.CompareAndSwapInt32(&p._closed, 0, 1) {
+		return ErrClosed
+	}
+
+	p.connsMu.Lock()
+	var firstErr error
+	for _, cn := range p.conns {
+		if cn == nil {
+			continue
+		}
+		if err := p.closeConn(cn, ErrClosed); err != nil && firstErr == nil {
+			firstErr = err
+		}
+	}
+	p.conns = nil
+	p.connsMu.Unlock()
+
+	p.freeConnsMu.Lock()
+	p.freeConns = nil
+	p.freeConnsMu.Unlock()
+
+	return firstErr
+}
+
+func (p *ConnPool) closeConn(cn *Conn, reason error) error {
+	if p.OnClose != nil {
+		_ = p.OnClose(cn)
+	}
+	return cn.Close()
+}
+
+func (p *ConnPool) reapStaleConn() bool {
+	if len(p.freeConns) == 0 {
+		return false
+	}
+
+	cn := p.freeConns[0]
+	if !cn.IsStale(p.idleTimeout) {
+		return false
+	}
+
+	p.remove(cn, errConnStale)
+	p.freeConns = append(p.freeConns[:0], p.freeConns[1:]...)
+
+	return true
+}
+
+func (p *ConnPool) ReapStaleConns() (int, error) {
+	var n int
+	for {
+		p.queue <- struct{}{}
+		p.freeConnsMu.Lock()
+
+		reaped := p.reapStaleConn()
+
+		p.freeConnsMu.Unlock()
+		<-p.queue
+
+		if reaped {
+			n++
+		} else {
+			break
+		}
+	}
+	return n, nil
+}
+
+func (p *ConnPool) reaper(frequency time.Duration) {
+	ticker := time.NewTicker(frequency)
+	defer ticker.Stop()
+
+	for _ = range ticker.C {
+		if p.closed() {
+			break
+		}
+		n, err := p.ReapStaleConns()
+		if err != nil {
+			internal.Logf("ReapStaleConns failed: %s", err)
+			continue
+		}
+		s := p.Stats()
+		internal.Logf(
+			"reaper: removed %d stale conns (TotalConns=%d FreeConns=%d Requests=%d Hits=%d Timeouts=%d)",
+			n, s.TotalConns, s.FreeConns, s.Requests, s.Hits, s.Timeouts,
+		)
+	}
+}
+
+//------------------------------------------------------------------------------
+
+var idleCheckFrequency atomic.Value
+
+func SetIdleCheckFrequency(d time.Duration) {
+	idleCheckFrequency.Store(d)
+}
+
+func getIdleCheckFrequency() time.Duration {
+	v := idleCheckFrequency.Load()
+	if v == nil {
+		return time.Minute
+	}
+	return v.(time.Duration)
+}

+ 47 - 0
vendor/gopkg.in/redis.v5/internal/pool/pool_single.go

@@ -0,0 +1,47 @@
+package pool
+
+type SingleConnPool struct {
+	cn *Conn
+}
+
+var _ Pooler = (*SingleConnPool)(nil)
+
+func NewSingleConnPool(cn *Conn) *SingleConnPool {
+	return &SingleConnPool{
+		cn: cn,
+	}
+}
+
+func (p *SingleConnPool) Get() (*Conn, bool, error) {
+	return p.cn, false, nil
+}
+
+func (p *SingleConnPool) Put(cn *Conn) error {
+	if p.cn != cn {
+		panic("p.cn != cn")
+	}
+	return nil
+}
+
+func (p *SingleConnPool) Remove(cn *Conn, _ error) error {
+	if p.cn != cn {
+		panic("p.cn != cn")
+	}
+	return nil
+}
+
+func (p *SingleConnPool) Len() int {
+	return 1
+}
+
+func (p *SingleConnPool) FreeLen() int {
+	return 0
+}
+
+func (p *SingleConnPool) Stats() *Stats {
+	return nil
+}
+
+func (p *SingleConnPool) Close() error {
+	return nil
+}

+ 119 - 0
vendor/gopkg.in/redis.v5/internal/pool/pool_sticky.go

@@ -0,0 +1,119 @@
+package pool
+
+import (
+	"errors"
+	"sync"
+)
+
+type StickyConnPool struct {
+	pool     *ConnPool
+	reusable bool
+
+	cn     *Conn
+	closed bool
+	mu     sync.Mutex
+}
+
+var _ Pooler = (*StickyConnPool)(nil)
+
+func NewStickyConnPool(pool *ConnPool, reusable bool) *StickyConnPool {
+	return &StickyConnPool{
+		pool:     pool,
+		reusable: reusable,
+	}
+}
+
+func (p *StickyConnPool) Get() (*Conn, bool, error) {
+	p.mu.Lock()
+	defer p.mu.Unlock()
+
+	if p.closed {
+		return nil, false, ErrClosed
+	}
+	if p.cn != nil {
+		return p.cn, false, nil
+	}
+
+	cn, _, err := p.pool.Get()
+	if err != nil {
+		return nil, false, err
+	}
+	p.cn = cn
+	return cn, true, nil
+}
+
+func (p *StickyConnPool) putUpstream() (err error) {
+	err = p.pool.Put(p.cn)
+	p.cn = nil
+	return err
+}
+
+func (p *StickyConnPool) Put(cn *Conn) error {
+	p.mu.Lock()
+	defer p.mu.Unlock()
+
+	if p.closed {
+		return ErrClosed
+	}
+	return nil
+}
+
+func (p *StickyConnPool) removeUpstream(reason error) error {
+	err := p.pool.Remove(p.cn, reason)
+	p.cn = nil
+	return err
+}
+
+func (p *StickyConnPool) Remove(cn *Conn, reason error) error {
+	p.mu.Lock()
+	defer p.mu.Unlock()
+
+	if p.closed {
+		return nil
+	}
+	return p.removeUpstream(reason)
+}
+
+func (p *StickyConnPool) Len() int {
+	p.mu.Lock()
+	defer p.mu.Unlock()
+
+	if p.cn == nil {
+		return 0
+	}
+	return 1
+}
+
+func (p *StickyConnPool) FreeLen() int {
+	p.mu.Lock()
+	defer p.mu.Unlock()
+
+	if p.cn == nil {
+		return 1
+	}
+	return 0
+}
+
+func (p *StickyConnPool) Stats() *Stats {
+	return nil
+}
+
+func (p *StickyConnPool) Close() error {
+	p.mu.Lock()
+	defer p.mu.Unlock()
+
+	if p.closed {
+		return ErrClosed
+	}
+	p.closed = true
+	var err error
+	if p.cn != nil {
+		if p.reusable {
+			err = p.putUpstream()
+		} else {
+			reason := errors.New("redis: unreusable sticky connection")
+			err = p.removeUpstream(reason)
+		}
+	}
+	return err
+}

+ 334 - 0
vendor/gopkg.in/redis.v5/internal/proto/reader.go

@@ -0,0 +1,334 @@
+package proto
+
+import (
+	"bufio"
+	"fmt"
+	"io"
+	"strconv"
+
+	"gopkg.in/redis.v5/internal"
+)
+
+const bytesAllocLimit = 1024 * 1024 // 1mb
+
+const (
+	ErrorReply  = '-'
+	StatusReply = '+'
+	IntReply    = ':'
+	StringReply = '$'
+	ArrayReply  = '*'
+)
+
+type MultiBulkParse func(*Reader, int64) (interface{}, error)
+
+type Reader struct {
+	src *bufio.Reader
+	buf []byte
+}
+
+func NewReader(rd io.Reader) *Reader {
+	return &Reader{
+		src: bufio.NewReader(rd),
+		buf: make([]byte, 4096),
+	}
+}
+
+func (r *Reader) Reset(rd io.Reader) {
+	r.src.Reset(rd)
+}
+
+func (p *Reader) PeekBuffered() []byte {
+	if n := p.src.Buffered(); n != 0 {
+		b, _ := p.src.Peek(n)
+		return b
+	}
+	return nil
+}
+
+func (p *Reader) ReadN(n int) ([]byte, error) {
+	b, err := readN(p.src, p.buf, n)
+	if err != nil {
+		return nil, err
+	}
+	p.buf = b
+	return b, nil
+}
+
+func (p *Reader) ReadLine() ([]byte, error) {
+	line, isPrefix, err := p.src.ReadLine()
+	if err != nil {
+		return nil, err
+	}
+	if isPrefix {
+		return nil, bufio.ErrBufferFull
+	}
+	if len(line) == 0 {
+		return nil, internal.RedisError("redis: reply is empty")
+	}
+	if isNilReply(line) {
+		return nil, internal.Nil
+	}
+	return line, nil
+}
+
+func (p *Reader) ReadReply(m MultiBulkParse) (interface{}, error) {
+	line, err := p.ReadLine()
+	if err != nil {
+		return nil, err
+	}
+
+	switch line[0] {
+	case ErrorReply:
+		return nil, ParseErrorReply(line)
+	case StatusReply:
+		return parseStatusValue(line), nil
+	case IntReply:
+		return parseInt(line[1:], 10, 64)
+	case StringReply:
+		return p.readTmpBytesValue(line)
+	case ArrayReply:
+		n, err := parseArrayLen(line)
+		if err != nil {
+			return nil, err
+		}
+		return m(p, n)
+	}
+	return nil, fmt.Errorf("redis: can't parse %.100q", line)
+}
+
+func (p *Reader) ReadIntReply() (int64, error) {
+	line, err := p.ReadLine()
+	if err != nil {
+		return 0, err
+	}
+	switch line[0] {
+	case ErrorReply:
+		return 0, ParseErrorReply(line)
+	case IntReply:
+		return parseInt(line[1:], 10, 64)
+	default:
+		return 0, fmt.Errorf("redis: can't parse int reply: %.100q", line)
+	}
+}
+
+func (p *Reader) ReadTmpBytesReply() ([]byte, error) {
+	line, err := p.ReadLine()
+	if err != nil {
+		return nil, err
+	}
+	switch line[0] {
+	case ErrorReply:
+		return nil, ParseErrorReply(line)
+	case StringReply:
+		return p.readTmpBytesValue(line)
+	case StatusReply:
+		return parseStatusValue(line), nil
+	default:
+		return nil, fmt.Errorf("redis: can't parse string reply: %.100q", line)
+	}
+}
+
+func (r *Reader) ReadBytesReply() ([]byte, error) {
+	b, err := r.ReadTmpBytesReply()
+	if err != nil {
+		return nil, err
+	}
+	cp := make([]byte, len(b))
+	copy(cp, b)
+	return cp, nil
+}
+
+func (p *Reader) ReadStringReply() (string, error) {
+	b, err := p.ReadTmpBytesReply()
+	if err != nil {
+		return "", err
+	}
+	return string(b), nil
+}
+
+func (p *Reader) ReadFloatReply() (float64, error) {
+	b, err := p.ReadTmpBytesReply()
+	if err != nil {
+		return 0, err
+	}
+	return parseFloat(b, 64)
+}
+
+func (p *Reader) ReadArrayReply(m MultiBulkParse) (interface{}, error) {
+	line, err := p.ReadLine()
+	if err != nil {
+		return nil, err
+	}
+	switch line[0] {
+	case ErrorReply:
+		return nil, ParseErrorReply(line)
+	case ArrayReply:
+		n, err := parseArrayLen(line)
+		if err != nil {
+			return nil, err
+		}
+		return m(p, n)
+	default:
+		return nil, fmt.Errorf("redis: can't parse array reply: %.100q", line)
+	}
+}
+
+func (p *Reader) ReadArrayLen() (int64, error) {
+	line, err := p.ReadLine()
+	if err != nil {
+		return 0, err
+	}
+	switch line[0] {
+	case ErrorReply:
+		return 0, ParseErrorReply(line)
+	case ArrayReply:
+		return parseArrayLen(line)
+	default:
+		return 0, fmt.Errorf("redis: can't parse array reply: %.100q", line)
+	}
+}
+
+func (p *Reader) ReadScanReply() ([]string, uint64, error) {
+	n, err := p.ReadArrayLen()
+	if err != nil {
+		return nil, 0, err
+	}
+	if n != 2 {
+		return nil, 0, fmt.Errorf("redis: got %d elements in scan reply, expected 2", n)
+	}
+
+	cursor, err := p.ReadUint()
+	if err != nil {
+		return nil, 0, err
+	}
+
+	n, err = p.ReadArrayLen()
+	if err != nil {
+		return nil, 0, err
+	}
+
+	keys := make([]string, n)
+	for i := int64(0); i < n; i++ {
+		key, err := p.ReadStringReply()
+		if err != nil {
+			return nil, 0, err
+		}
+		keys[i] = key
+	}
+
+	return keys, cursor, err
+}
+
+func (p *Reader) readTmpBytesValue(line []byte) ([]byte, error) {
+	if isNilReply(line) {
+		return nil, internal.Nil
+	}
+
+	replyLen, err := strconv.Atoi(string(line[1:]))
+	if err != nil {
+		return nil, err
+	}
+
+	b, err := p.ReadN(replyLen + 2)
+	if err != nil {
+		return nil, err
+	}
+	return b[:replyLen], nil
+}
+
+func (r *Reader) ReadInt() (int64, error) {
+	b, err := r.ReadTmpBytesReply()
+	if err != nil {
+		return 0, err
+	}
+	return parseInt(b, 10, 64)
+}
+
+func (r *Reader) ReadUint() (uint64, error) {
+	b, err := r.ReadTmpBytesReply()
+	if err != nil {
+		return 0, err
+	}
+	return parseUint(b, 10, 64)
+}
+
+// --------------------------------------------------------------------
+
+func readN(r io.Reader, b []byte, n int) ([]byte, error) {
+	if n == 0 && b == nil {
+		return make([]byte, 0), nil
+	}
+
+	if cap(b) >= n {
+		b = b[:n]
+		_, err := io.ReadFull(r, b)
+		return b, err
+	}
+	b = b[:cap(b)]
+
+	pos := 0
+	for pos < n {
+		diff := n - len(b)
+		if diff > bytesAllocLimit {
+			diff = bytesAllocLimit
+		}
+		b = append(b, make([]byte, diff)...)
+
+		nn, err := io.ReadFull(r, b[pos:])
+		if err != nil {
+			return nil, err
+		}
+		pos += nn
+	}
+
+	return b, nil
+}
+
+func formatInt(n int64) string {
+	return strconv.FormatInt(n, 10)
+}
+
+func formatUint(u uint64) string {
+	return strconv.FormatUint(u, 10)
+}
+
+func formatFloat(f float64) string {
+	return strconv.FormatFloat(f, 'f', -1, 64)
+}
+
+func isNilReply(b []byte) bool {
+	return len(b) == 3 &&
+		(b[0] == StringReply || b[0] == ArrayReply) &&
+		b[1] == '-' && b[2] == '1'
+}
+
+func ParseErrorReply(line []byte) error {
+	return internal.RedisError(string(line[1:]))
+}
+
+func parseStatusValue(line []byte) []byte {
+	return line[1:]
+}
+
+func parseArrayLen(line []byte) (int64, error) {
+	if isNilReply(line) {
+		return 0, internal.Nil
+	}
+	return parseInt(line[1:], 10, 64)
+}
+
+func atoi(b []byte) (int, error) {
+	return strconv.Atoi(internal.BytesToString(b))
+}
+
+func parseInt(b []byte, base int, bitSize int) (int64, error) {
+	return strconv.ParseInt(internal.BytesToString(b), base, bitSize)
+}
+
+func parseUint(b []byte, base int, bitSize int) (uint64, error) {
+	return strconv.ParseUint(internal.BytesToString(b), base, bitSize)
+}
+
+func parseFloat(b []byte, bitSize int) (float64, error) {
+	return strconv.ParseFloat(internal.BytesToString(b), bitSize)
+}

+ 131 - 0
vendor/gopkg.in/redis.v5/internal/proto/scan.go

@@ -0,0 +1,131 @@
+package proto
+
+import (
+	"encoding"
+	"fmt"
+	"reflect"
+
+	"gopkg.in/redis.v5/internal"
+)
+
+func Scan(b []byte, v interface{}) error {
+	switch v := v.(type) {
+	case nil:
+		return internal.RedisError("redis: Scan(nil)")
+	case *string:
+		*v = internal.BytesToString(b)
+		return nil
+	case *[]byte:
+		*v = b
+		return nil
+	case *int:
+		var err error
+		*v, err = atoi(b)
+		return err
+	case *int8:
+		n, err := parseInt(b, 10, 8)
+		if err != nil {
+			return err
+		}
+		*v = int8(n)
+		return nil
+	case *int16:
+		n, err := parseInt(b, 10, 16)
+		if err != nil {
+			return err
+		}
+		*v = int16(n)
+		return nil
+	case *int32:
+		n, err := parseInt(b, 10, 32)
+		if err != nil {
+			return err
+		}
+		*v = int32(n)
+		return nil
+	case *int64:
+		n, err := parseInt(b, 10, 64)
+		if err != nil {
+			return err
+		}
+		*v = n
+		return nil
+	case *uint:
+		n, err := parseUint(b, 10, 64)
+		if err != nil {
+			return err
+		}
+		*v = uint(n)
+		return nil
+	case *uint8:
+		n, err := parseUint(b, 10, 8)
+		if err != nil {
+			return err
+		}
+		*v = uint8(n)
+		return nil
+	case *uint16:
+		n, err := parseUint(b, 10, 16)
+		if err != nil {
+			return err
+		}
+		*v = uint16(n)
+		return nil
+	case *uint32:
+		n, err := parseUint(b, 10, 32)
+		if err != nil {
+			return err
+		}
+		*v = uint32(n)
+		return nil
+	case *uint64:
+		n, err := parseUint(b, 10, 64)
+		if err != nil {
+			return err
+		}
+		*v = n
+		return nil
+	case *float32:
+		n, err := parseFloat(b, 32)
+		if err != nil {
+			return err
+		}
+		*v = float32(n)
+		return err
+	case *float64:
+		var err error
+		*v, err = parseFloat(b, 64)
+		return err
+	case *bool:
+		*v = len(b) == 1 && b[0] == '1'
+		return nil
+	case encoding.BinaryUnmarshaler:
+		return v.UnmarshalBinary(b)
+	default:
+		return fmt.Errorf(
+			"redis: can't unmarshal %T (consider implementing BinaryUnmarshaler)", v)
+	}
+}
+
+func ScanSlice(data []string, slice interface{}) error {
+	v := reflect.ValueOf(slice)
+	if !v.IsValid() {
+		return fmt.Errorf("redis: ScanSlice(nil)")
+	}
+	if v.Kind() != reflect.Ptr {
+		return fmt.Errorf("redis: ScanSlice(non-pointer %T)", slice)
+	}
+	v = v.Elem()
+	if v.Kind() != reflect.Slice {
+		return fmt.Errorf("redis: ScanSlice(non-slice %T)", slice)
+	}
+
+	for i, s := range data {
+		elem := internal.SliceNextElem(v)
+		if err := Scan([]byte(s), elem.Addr().Interface()); err != nil {
+			return fmt.Errorf("redis: ScanSlice(index=%d value=%q) failed: %s", i, s, err)
+		}
+	}
+
+	return nil
+}

+ 105 - 0
vendor/gopkg.in/redis.v5/internal/proto/write_buffer.go

@@ -0,0 +1,105 @@
+package proto
+
+import (
+	"encoding"
+	"fmt"
+	"strconv"
+)
+
+const bufferSize = 4096
+
+type WriteBuffer struct {
+	b []byte
+}
+
+func NewWriteBuffer() *WriteBuffer {
+	return &WriteBuffer{
+		b: make([]byte, 0, 4096),
+	}
+}
+
+func (w *WriteBuffer) Len() int      { return len(w.b) }
+func (w *WriteBuffer) Bytes() []byte { return w.b }
+func (w *WriteBuffer) Reset()        { w.b = w.b[:0] }
+
+func (w *WriteBuffer) Append(args []interface{}) error {
+	w.b = append(w.b, ArrayReply)
+	w.b = strconv.AppendUint(w.b, uint64(len(args)), 10)
+	w.b = append(w.b, '\r', '\n')
+
+	for _, arg := range args {
+		if err := w.append(arg); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (w *WriteBuffer) append(val interface{}) error {
+	switch v := val.(type) {
+	case nil:
+		w.AppendString("")
+	case string:
+		w.AppendString(v)
+	case []byte:
+		w.AppendBytes(v)
+	case int:
+		w.AppendString(formatInt(int64(v)))
+	case int8:
+		w.AppendString(formatInt(int64(v)))
+	case int16:
+		w.AppendString(formatInt(int64(v)))
+	case int32:
+		w.AppendString(formatInt(int64(v)))
+	case int64:
+		w.AppendString(formatInt(v))
+	case uint:
+		w.AppendString(formatUint(uint64(v)))
+	case uint8:
+		w.AppendString(formatUint(uint64(v)))
+	case uint16:
+		w.AppendString(formatUint(uint64(v)))
+	case uint32:
+		w.AppendString(formatUint(uint64(v)))
+	case uint64:
+		w.AppendString(formatUint(v))
+	case float32:
+		w.AppendString(formatFloat(float64(v)))
+	case float64:
+		w.AppendString(formatFloat(v))
+	case bool:
+		if v {
+			w.AppendString("1")
+		} else {
+			w.AppendString("0")
+		}
+	default:
+		if bm, ok := val.(encoding.BinaryMarshaler); ok {
+			bb, err := bm.MarshalBinary()
+			if err != nil {
+				return err
+			}
+			w.AppendBytes(bb)
+		} else {
+			return fmt.Errorf(
+				"redis: can't marshal %T (consider implementing encoding.BinaryMarshaler)", val)
+		}
+	}
+	return nil
+}
+
+func (w *WriteBuffer) AppendString(s string) {
+	w.b = append(w.b, StringReply)
+	w.b = strconv.AppendUint(w.b, uint64(len(s)), 10)
+	w.b = append(w.b, '\r', '\n')
+	w.b = append(w.b, s...)
+	w.b = append(w.b, '\r', '\n')
+}
+
+func (w *WriteBuffer) AppendBytes(p []byte) {
+	w.b = append(w.b, StringReply)
+	w.b = strconv.AppendUint(w.b, uint64(len(p)), 10)
+	w.b = append(w.b, '\r', '\n')
+	w.b = append(w.b, p...)
+	w.b = append(w.b, '\r', '\n')
+}

+ 7 - 0
vendor/gopkg.in/redis.v5/internal/safe.go

@@ -0,0 +1,7 @@
+// +build appengine
+
+package internal
+
+func BytesToString(b []byte) string {
+	return string(b)
+}

+ 14 - 0
vendor/gopkg.in/redis.v5/internal/unsafe.go

@@ -0,0 +1,14 @@
+// +build !appengine
+
+package internal
+
+import (
+	"reflect"
+	"unsafe"
+)
+
+func BytesToString(b []byte) string {
+	bytesHeader := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+	strHeader := reflect.StringHeader{bytesHeader.Data, bytesHeader.Len}
+	return *(*string)(unsafe.Pointer(&strHeader))
+}

+ 47 - 0
vendor/gopkg.in/redis.v5/internal/util.go

@@ -0,0 +1,47 @@
+package internal
+
+import "reflect"
+
+func ToLower(s string) string {
+	if isLower(s) {
+		return s
+	}
+
+	b := make([]byte, len(s))
+	for i := range b {
+		c := s[i]
+		if c >= 'A' && c <= 'Z' {
+			c += 'a' - 'A'
+		}
+		b[i] = c
+	}
+	return BytesToString(b)
+}
+
+func isLower(s string) bool {
+	for i := 0; i < len(s); i++ {
+		c := s[i]
+		if c >= 'A' && c <= 'Z' {
+			return false
+		}
+	}
+	return true
+}
+
+func SliceNextElem(v reflect.Value) reflect.Value {
+	if v.Len() < v.Cap() {
+		v.Set(v.Slice(0, v.Len()+1))
+		return v.Index(v.Len() - 1)
+	}
+
+	elemType := v.Type().Elem()
+
+	if elemType.Kind() == reflect.Ptr {
+		elem := reflect.New(elemType.Elem())
+		v.Set(reflect.Append(v, elem))
+		return elem.Elem()
+	}
+
+	v.Set(reflect.Append(v, reflect.Zero(elemType)))
+	return v.Index(v.Len() - 1)
+}

+ 73 - 0
vendor/gopkg.in/redis.v5/iterator.go

@@ -0,0 +1,73 @@
+package redis
+
+import "sync"
+
+// ScanIterator is used to incrementally iterate over a collection of elements.
+// It's safe for concurrent use by multiple goroutines.
+type ScanIterator struct {
+	mu  sync.Mutex // protects Scanner and pos
+	cmd *ScanCmd
+	pos int
+}
+
+// Err returns the last iterator error, if any.
+func (it *ScanIterator) Err() error {
+	it.mu.Lock()
+	err := it.cmd.Err()
+	it.mu.Unlock()
+	return err
+}
+
+// Next advances the cursor and returns true if more values can be read.
+func (it *ScanIterator) Next() bool {
+	it.mu.Lock()
+	defer it.mu.Unlock()
+
+	// Instantly return on errors.
+	if it.cmd.Err() != nil {
+		return false
+	}
+
+	// Advance cursor, check if we are still within range.
+	if it.pos < len(it.cmd.page) {
+		it.pos++
+		return true
+	}
+
+	for {
+		// Return if there is no more data to fetch.
+		if it.cmd.cursor == 0 {
+			return false
+		}
+
+		// Fetch next page.
+		if it.cmd._args[0] == "scan" {
+			it.cmd._args[1] = it.cmd.cursor
+		} else {
+			it.cmd._args[2] = it.cmd.cursor
+		}
+
+		err := it.cmd.process(it.cmd)
+		if err != nil {
+			return false
+		}
+
+		it.pos = 1
+
+		// Redis can occasionally return empty page.
+		if len(it.cmd.page) > 0 {
+			return true
+		}
+	}
+}
+
+// Val returns the key/field at the current cursor position.
+func (it *ScanIterator) Val() string {
+	var v string
+	it.mu.Lock()
+	if it.cmd.Err() == nil && it.pos > 0 && it.pos <= len(it.cmd.page) {
+		v = it.cmd.page[it.pos-1]
+	}
+	it.mu.Unlock()
+	return v
+}

+ 185 - 0
vendor/gopkg.in/redis.v5/options.go

@@ -0,0 +1,185 @@
+package redis
+
+import (
+	"crypto/tls"
+	"errors"
+	"fmt"
+	"net"
+	"net/url"
+	"strconv"
+	"strings"
+	"time"
+
+	"gopkg.in/redis.v5/internal/pool"
+)
+
+type Options struct {
+	// The network type, either tcp or unix.
+	// Default is tcp.
+	Network string
+	// host:port address.
+	Addr string
+
+	// Dialer creates new network connection and has priority over
+	// Network and Addr options.
+	Dialer func() (net.Conn, error)
+
+	// Optional password. Must match the password specified in the
+	// requirepass server configuration option.
+	Password string
+	// Database to be selected after connecting to the server.
+	DB int
+
+	// Maximum number of retries before giving up.
+	// Default is to not retry failed commands.
+	MaxRetries int
+
+	// Dial timeout for establishing new connections.
+	// Default is 5 seconds.
+	DialTimeout time.Duration
+	// Timeout for socket reads. If reached, commands will fail
+	// with a timeout instead of blocking.
+	// Default is 3 seconds.
+	ReadTimeout time.Duration
+	// Timeout for socket writes. If reached, commands will fail
+	// with a timeout instead of blocking.
+	// Default is 3 seconds.
+	WriteTimeout time.Duration
+
+	// Maximum number of socket connections.
+	// Default is 10 connections.
+	PoolSize int
+	// Amount of time client waits for connection if all connections
+	// are busy before returning an error.
+	// Default is ReadTimeout + 1 second.
+	PoolTimeout time.Duration
+	// Amount of time after which client closes idle connections.
+	// Should be less than server's timeout.
+	// Default is to not close idle connections.
+	IdleTimeout time.Duration
+	// Frequency of idle checks.
+	// Default is 1 minute.
+	// When minus value is set, then idle check is disabled.
+	IdleCheckFrequency time.Duration
+
+	// Enables read only queries on slave nodes.
+	ReadOnly bool
+
+	// TLS Config to use. When set TLS will be negotiated.
+	TLSConfig *tls.Config
+}
+
+func (opt *Options) init() {
+	if opt.Network == "" {
+		opt.Network = "tcp"
+	}
+	if opt.Dialer == nil {
+		opt.Dialer = func() (net.Conn, error) {
+			conn, err := net.DialTimeout(opt.Network, opt.Addr, opt.DialTimeout)
+			if opt.TLSConfig == nil || err != nil {
+				return conn, err
+			}
+			t := tls.Client(conn, opt.TLSConfig)
+			return t, t.Handshake()
+		}
+	}
+	if opt.PoolSize == 0 {
+		opt.PoolSize = 10
+	}
+	if opt.DialTimeout == 0 {
+		opt.DialTimeout = 5 * time.Second
+	}
+	if opt.ReadTimeout == 0 {
+		opt.ReadTimeout = 3 * time.Second
+	} else if opt.ReadTimeout == -1 {
+		opt.ReadTimeout = 0
+	}
+	if opt.WriteTimeout == 0 {
+		opt.WriteTimeout = opt.ReadTimeout
+	} else if opt.WriteTimeout == -1 {
+		opt.WriteTimeout = 0
+	}
+	if opt.PoolTimeout == 0 {
+		opt.PoolTimeout = opt.ReadTimeout + time.Second
+	}
+	if opt.IdleTimeout == 0 {
+		opt.IdleTimeout = 5 * time.Minute
+	}
+	if opt.IdleCheckFrequency == 0 {
+		opt.IdleCheckFrequency = time.Minute
+	}
+}
+
+// ParseURL parses a redis URL into options that can be used to connect to redis
+func ParseURL(redisURL string) (*Options, error) {
+	o := &Options{Network: "tcp"}
+	u, err := url.Parse(redisURL)
+	if err != nil {
+		return nil, err
+	}
+
+	if u.Scheme != "redis" && u.Scheme != "rediss" {
+		return nil, errors.New("invalid redis URL scheme: " + u.Scheme)
+	}
+
+	if u.User != nil {
+		if p, ok := u.User.Password(); ok {
+			o.Password = p
+		}
+	}
+
+	if len(u.Query()) > 0 {
+		return nil, errors.New("no options supported")
+	}
+
+	h, p, err := net.SplitHostPort(u.Host)
+	if err != nil {
+		h = u.Host
+	}
+	if h == "" {
+		h = "localhost"
+	}
+	if p == "" {
+		p = "6379"
+	}
+	o.Addr = net.JoinHostPort(h, p)
+
+	f := strings.FieldsFunc(u.Path, func(r rune) bool {
+		return r == '/'
+	})
+	switch len(f) {
+	case 0:
+		o.DB = 0
+	case 1:
+		if o.DB, err = strconv.Atoi(f[0]); err != nil {
+			return nil, fmt.Errorf("invalid redis database number: %q", f[0])
+		}
+	default:
+		return nil, errors.New("invalid redis URL path: " + u.Path)
+	}
+
+	if u.Scheme == "rediss" {
+		o.TLSConfig = &tls.Config{ServerName: h}
+	}
+	return o, nil
+}
+
+func newConnPool(opt *Options) *pool.ConnPool {
+	return pool.NewConnPool(
+		opt.Dialer,
+		opt.PoolSize,
+		opt.PoolTimeout,
+		opt.IdleTimeout,
+		opt.IdleCheckFrequency,
+	)
+}
+
+// PoolStats contains pool state information and accumulated stats.
+type PoolStats struct {
+	Requests uint32 // number of times a connection was requested by the pool
+	Hits     uint32 // number of times free connection was found in the pool
+	Timeouts uint32 // number of times a wait timeout occurred
+
+	TotalConns uint32 // the number of total connections in the pool
+	FreeConns  uint32 // the number of free connections in the pool
+}

+ 400 - 0
vendor/gopkg.in/redis.v5/parser.go

@@ -0,0 +1,400 @@
+package redis
+
+import (
+	"fmt"
+	"net"
+	"strconv"
+	"time"
+
+	"gopkg.in/redis.v5/internal/proto"
+)
+
+// Implements proto.MultiBulkParse
+func sliceParser(rd *proto.Reader, n int64) (interface{}, error) {
+	vals := make([]interface{}, 0, n)
+	for i := int64(0); i < n; i++ {
+		v, err := rd.ReadReply(sliceParser)
+		if err == Nil {
+			vals = append(vals, nil)
+		} else if err != nil {
+			return nil, err
+		} else {
+			switch vv := v.(type) {
+			case []byte:
+				vals = append(vals, string(vv))
+			default:
+				vals = append(vals, v)
+			}
+		}
+	}
+	return vals, nil
+}
+
+// Implements proto.MultiBulkParse
+func intSliceParser(rd *proto.Reader, n int64) (interface{}, error) {
+	ints := make([]int64, 0, n)
+	for i := int64(0); i < n; i++ {
+		n, err := rd.ReadIntReply()
+		if err != nil {
+			return nil, err
+		}
+		ints = append(ints, n)
+	}
+	return ints, nil
+}
+
+// Implements proto.MultiBulkParse
+func boolSliceParser(rd *proto.Reader, n int64) (interface{}, error) {
+	bools := make([]bool, 0, n)
+	for i := int64(0); i < n; i++ {
+		n, err := rd.ReadIntReply()
+		if err != nil {
+			return nil, err
+		}
+		bools = append(bools, n == 1)
+	}
+	return bools, nil
+}
+
+// Implements proto.MultiBulkParse
+func stringSliceParser(rd *proto.Reader, n int64) (interface{}, error) {
+	ss := make([]string, 0, n)
+	for i := int64(0); i < n; i++ {
+		s, err := rd.ReadStringReply()
+		if err == Nil {
+			ss = append(ss, "")
+		} else if err != nil {
+			return nil, err
+		} else {
+			ss = append(ss, s)
+		}
+	}
+	return ss, nil
+}
+
+// Implements proto.MultiBulkParse
+func floatSliceParser(rd *proto.Reader, n int64) (interface{}, error) {
+	nn := make([]float64, 0, n)
+	for i := int64(0); i < n; i++ {
+		n, err := rd.ReadFloatReply()
+		if err != nil {
+			return nil, err
+		}
+		nn = append(nn, n)
+	}
+	return nn, nil
+}
+
+// Implements proto.MultiBulkParse
+func stringStringMapParser(rd *proto.Reader, n int64) (interface{}, error) {
+	m := make(map[string]string, n/2)
+	for i := int64(0); i < n; i += 2 {
+		key, err := rd.ReadStringReply()
+		if err != nil {
+			return nil, err
+		}
+
+		value, err := rd.ReadStringReply()
+		if err != nil {
+			return nil, err
+		}
+
+		m[key] = value
+	}
+	return m, nil
+}
+
+// Implements proto.MultiBulkParse
+func stringIntMapParser(rd *proto.Reader, n int64) (interface{}, error) {
+	m := make(map[string]int64, n/2)
+	for i := int64(0); i < n; i += 2 {
+		key, err := rd.ReadStringReply()
+		if err != nil {
+			return nil, err
+		}
+
+		n, err := rd.ReadIntReply()
+		if err != nil {
+			return nil, err
+		}
+
+		m[key] = n
+	}
+	return m, nil
+}
+
+// Implements proto.MultiBulkParse
+func zSliceParser(rd *proto.Reader, n int64) (interface{}, error) {
+	zz := make([]Z, n/2)
+	for i := int64(0); i < n; i += 2 {
+		var err error
+
+		z := &zz[i/2]
+
+		z.Member, err = rd.ReadStringReply()
+		if err != nil {
+			return nil, err
+		}
+
+		z.Score, err = rd.ReadFloatReply()
+		if err != nil {
+			return nil, err
+		}
+	}
+	return zz, nil
+}
+
+// Implements proto.MultiBulkParse
+func clusterSlotsParser(rd *proto.Reader, n int64) (interface{}, error) {
+	slots := make([]ClusterSlot, n)
+	for i := 0; i < len(slots); i++ {
+		n, err := rd.ReadArrayLen()
+		if err != nil {
+			return nil, err
+		}
+		if n < 2 {
+			err := fmt.Errorf("redis: got %d elements in cluster info, expected at least 2", n)
+			return nil, err
+		}
+
+		start, err := rd.ReadIntReply()
+		if err != nil {
+			return nil, err
+		}
+
+		end, err := rd.ReadIntReply()
+		if err != nil {
+			return nil, err
+		}
+
+		nodes := make([]ClusterNode, n-2)
+		for j := 0; j < len(nodes); j++ {
+			n, err := rd.ReadArrayLen()
+			if err != nil {
+				return nil, err
+			}
+			if n != 2 && n != 3 {
+				err := fmt.Errorf("got %d elements in cluster info address, expected 2 or 3", n)
+				return nil, err
+			}
+
+			ip, err := rd.ReadStringReply()
+			if err != nil {
+				return nil, err
+			}
+
+			port, err := rd.ReadIntReply()
+			if err != nil {
+				return nil, err
+			}
+			nodes[j].Addr = net.JoinHostPort(ip, strconv.FormatInt(port, 10))
+
+			if n == 3 {
+				id, err := rd.ReadStringReply()
+				if err != nil {
+					return nil, err
+				}
+				nodes[j].Id = id
+			}
+		}
+
+		slots[i] = ClusterSlot{
+			Start: int(start),
+			End:   int(end),
+			Nodes: nodes,
+		}
+	}
+	return slots, nil
+}
+
+func newGeoLocationParser(q *GeoRadiusQuery) proto.MultiBulkParse {
+	return func(rd *proto.Reader, n int64) (interface{}, error) {
+		var loc GeoLocation
+		var err error
+
+		loc.Name, err = rd.ReadStringReply()
+		if err != nil {
+			return nil, err
+		}
+		if q.WithDist {
+			loc.Dist, err = rd.ReadFloatReply()
+			if err != nil {
+				return nil, err
+			}
+		}
+		if q.WithGeoHash {
+			loc.GeoHash, err = rd.ReadIntReply()
+			if err != nil {
+				return nil, err
+			}
+		}
+		if q.WithCoord {
+			n, err := rd.ReadArrayLen()
+			if err != nil {
+				return nil, err
+			}
+			if n != 2 {
+				return nil, fmt.Errorf("got %d coordinates, expected 2", n)
+			}
+
+			loc.Longitude, err = rd.ReadFloatReply()
+			if err != nil {
+				return nil, err
+			}
+			loc.Latitude, err = rd.ReadFloatReply()
+			if err != nil {
+				return nil, err
+			}
+		}
+
+		return &loc, nil
+	}
+}
+
+func newGeoLocationSliceParser(q *GeoRadiusQuery) proto.MultiBulkParse {
+	return func(rd *proto.Reader, n int64) (interface{}, error) {
+		locs := make([]GeoLocation, 0, n)
+		for i := int64(0); i < n; i++ {
+			v, err := rd.ReadReply(newGeoLocationParser(q))
+			if err != nil {
+				return nil, err
+			}
+			switch vv := v.(type) {
+			case []byte:
+				locs = append(locs, GeoLocation{
+					Name: string(vv),
+				})
+			case *GeoLocation:
+				locs = append(locs, *vv)
+			default:
+				return nil, fmt.Errorf("got %T, expected string or *GeoLocation", v)
+			}
+		}
+		return locs, nil
+	}
+}
+
+func geoPosParser(rd *proto.Reader, n int64) (interface{}, error) {
+	var pos GeoPos
+	var err error
+
+	pos.Longitude, err = rd.ReadFloatReply()
+	if err != nil {
+		return nil, err
+	}
+
+	pos.Latitude, err = rd.ReadFloatReply()
+	if err != nil {
+		return nil, err
+	}
+
+	return &pos, nil
+}
+
+func geoPosSliceParser(rd *proto.Reader, n int64) (interface{}, error) {
+	positions := make([]*GeoPos, 0, n)
+	for i := int64(0); i < n; i++ {
+		v, err := rd.ReadReply(geoPosParser)
+		if err != nil {
+			if err == Nil {
+				positions = append(positions, nil)
+				continue
+			}
+			return nil, err
+		}
+		switch v := v.(type) {
+		case *GeoPos:
+			positions = append(positions, v)
+		default:
+			return nil, fmt.Errorf("got %T, expected *GeoPos", v)
+		}
+	}
+	return positions, nil
+}
+
+func commandInfoParser(rd *proto.Reader, n int64) (interface{}, error) {
+	var cmd CommandInfo
+	var err error
+
+	if n != 6 {
+		return nil, fmt.Errorf("redis: got %d elements in COMMAND reply, wanted 6", n)
+	}
+
+	cmd.Name, err = rd.ReadStringReply()
+	if err != nil {
+		return nil, err
+	}
+
+	arity, err := rd.ReadIntReply()
+	if err != nil {
+		return nil, err
+	}
+	cmd.Arity = int8(arity)
+
+	flags, err := rd.ReadReply(stringSliceParser)
+	if err != nil {
+		return nil, err
+	}
+	cmd.Flags = flags.([]string)
+
+	firstKeyPos, err := rd.ReadIntReply()
+	if err != nil {
+		return nil, err
+	}
+	cmd.FirstKeyPos = int8(firstKeyPos)
+
+	lastKeyPos, err := rd.ReadIntReply()
+	if err != nil {
+		return nil, err
+	}
+	cmd.LastKeyPos = int8(lastKeyPos)
+
+	stepCount, err := rd.ReadIntReply()
+	if err != nil {
+		return nil, err
+	}
+	cmd.StepCount = int8(stepCount)
+
+	for _, flag := range cmd.Flags {
+		if flag == "readonly" {
+			cmd.ReadOnly = true
+			break
+		}
+	}
+
+	return &cmd, nil
+}
+
+// Implements proto.MultiBulkParse
+func commandInfoSliceParser(rd *proto.Reader, n int64) (interface{}, error) {
+	m := make(map[string]*CommandInfo, n)
+	for i := int64(0); i < n; i++ {
+		v, err := rd.ReadReply(commandInfoParser)
+		if err != nil {
+			return nil, err
+		}
+		vv := v.(*CommandInfo)
+		m[vv.Name] = vv
+
+	}
+	return m, nil
+}
+
+// Implements proto.MultiBulkParse
+func timeParser(rd *proto.Reader, n int64) (interface{}, error) {
+	if n != 2 {
+		return nil, fmt.Errorf("got %d elements, expected 2", n)
+	}
+
+	sec, err := rd.ReadInt()
+	if err != nil {
+		return nil, err
+	}
+
+	microsec, err := rd.ReadInt()
+	if err != nil {
+		return nil, err
+	}
+
+	return time.Unix(sec, microsec*1000), nil
+}

+ 88 - 0
vendor/gopkg.in/redis.v5/pipeline.go

@@ -0,0 +1,88 @@
+package redis
+
+import (
+	"errors"
+	"sync"
+
+	"gopkg.in/redis.v5/internal/pool"
+)
+
+type pipelineExecer func([]Cmder) error
+
+// Pipeline implements pipelining as described in
+// http://redis.io/topics/pipelining. It's safe for concurrent use
+// by multiple goroutines.
+type Pipeline struct {
+	cmdable
+	statefulCmdable
+
+	exec pipelineExecer
+
+	mu     sync.Mutex
+	cmds   []Cmder
+	closed bool
+}
+
+func (c *Pipeline) Process(cmd Cmder) error {
+	c.mu.Lock()
+	c.cmds = append(c.cmds, cmd)
+	c.mu.Unlock()
+	return nil
+}
+
+// Close closes the pipeline, releasing any open resources.
+func (c *Pipeline) Close() error {
+	c.mu.Lock()
+	c.discard()
+	c.closed = true
+	c.mu.Unlock()
+	return nil
+}
+
+// Discard resets the pipeline and discards queued commands.
+func (c *Pipeline) Discard() error {
+	c.mu.Lock()
+	err := c.discard()
+	c.mu.Unlock()
+	return err
+}
+
+func (c *Pipeline) discard() error {
+	if c.closed {
+		return pool.ErrClosed
+	}
+	c.cmds = c.cmds[:0]
+	return nil
+}
+
+// Exec executes all previously queued commands using one
+// client-server roundtrip.
+//
+// Exec always returns list of commands and error of the first failed
+// command if any.
+func (c *Pipeline) Exec() ([]Cmder, error) {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+
+	if c.closed {
+		return nil, pool.ErrClosed
+	}
+
+	if len(c.cmds) == 0 {
+		return nil, errors.New("redis: pipeline is empty")
+	}
+
+	cmds := c.cmds
+	c.cmds = nil
+
+	return cmds, c.exec(cmds)
+}
+
+func (c *Pipeline) pipelined(fn func(*Pipeline) error) ([]Cmder, error) {
+	if err := fn(c); err != nil {
+		return nil, err
+	}
+	cmds, err := c.Exec()
+	_ = c.Close()
+	return cmds, err
+}

+ 311 - 0
vendor/gopkg.in/redis.v5/pubsub.go

@@ -0,0 +1,311 @@
+package redis
+
+import (
+	"fmt"
+	"net"
+	"sync"
+	"time"
+
+	"gopkg.in/redis.v5/internal"
+	"gopkg.in/redis.v5/internal/pool"
+)
+
+// PubSub implements Pub/Sub commands as described in
+// http://redis.io/topics/pubsub. It's NOT safe for concurrent use by
+// multiple goroutines.
+type PubSub struct {
+	base baseClient
+	cmd  *Cmd
+
+	mu       sync.Mutex
+	channels []string
+	patterns []string
+}
+
+func (c *PubSub) conn() (*pool.Conn, bool, error) {
+	cn, isNew, err := c.base.conn()
+	if err != nil {
+		return nil, false, err
+	}
+	if isNew {
+		c.resubscribe()
+	}
+	return cn, isNew, nil
+}
+
+func (c *PubSub) putConn(cn *pool.Conn, err error) {
+	c.base.putConn(cn, err, true)
+}
+
+func (c *PubSub) subscribe(redisCmd string, channels ...string) error {
+	args := make([]interface{}, 1+len(channels))
+	args[0] = redisCmd
+	for i, channel := range channels {
+		args[1+i] = channel
+	}
+	cmd := NewSliceCmd(args...)
+
+	cn, _, err := c.conn()
+	if err != nil {
+		return err
+	}
+
+	cn.SetWriteTimeout(c.base.opt.WriteTimeout)
+	err = writeCmd(cn, cmd)
+	c.putConn(cn, err)
+	return err
+}
+
+// Subscribes the client to the specified channels.
+func (c *PubSub) Subscribe(channels ...string) error {
+	err := c.subscribe("SUBSCRIBE", channels...)
+	if err == nil {
+		c.channels = appendIfNotExists(c.channels, channels...)
+	}
+	return err
+}
+
+// Subscribes the client to the given patterns.
+func (c *PubSub) PSubscribe(patterns ...string) error {
+	err := c.subscribe("PSUBSCRIBE", patterns...)
+	if err == nil {
+		c.patterns = appendIfNotExists(c.patterns, patterns...)
+	}
+	return err
+}
+
+// Unsubscribes the client from the given channels, or from all of
+// them if none is given.
+func (c *PubSub) Unsubscribe(channels ...string) error {
+	err := c.subscribe("UNSUBSCRIBE", channels...)
+	if err == nil {
+		c.channels = remove(c.channels, channels...)
+	}
+	return err
+}
+
+// Unsubscribes the client from the given patterns, or from all of
+// them if none is given.
+func (c *PubSub) PUnsubscribe(patterns ...string) error {
+	err := c.subscribe("PUNSUBSCRIBE", patterns...)
+	if err == nil {
+		c.patterns = remove(c.patterns, patterns...)
+	}
+	return err
+}
+
+func (c *PubSub) Close() error {
+	return c.base.Close()
+}
+
+func (c *PubSub) Ping(payload ...string) error {
+	args := []interface{}{"PING"}
+	if len(payload) == 1 {
+		args = append(args, payload[0])
+	}
+	cmd := NewCmd(args...)
+
+	cn, _, err := c.conn()
+	if err != nil {
+		return err
+	}
+
+	cn.SetWriteTimeout(c.base.opt.WriteTimeout)
+	err = writeCmd(cn, cmd)
+	c.putConn(cn, err)
+	return err
+}
+
+// Message received after a successful subscription to channel.
+type Subscription struct {
+	// Can be "subscribe", "unsubscribe", "psubscribe" or "punsubscribe".
+	Kind string
+	// Channel name we have subscribed to.
+	Channel string
+	// Number of channels we are currently subscribed to.
+	Count int
+}
+
+func (m *Subscription) String() string {
+	return fmt.Sprintf("%s: %s", m.Kind, m.Channel)
+}
+
+// Message received as result of a PUBLISH command issued by another client.
+type Message struct {
+	Channel string
+	Pattern string
+	Payload string
+}
+
+func (m *Message) String() string {
+	return fmt.Sprintf("Message<%s: %s>", m.Channel, m.Payload)
+}
+
+// Pong received as result of a PING command issued by another client.
+type Pong struct {
+	Payload string
+}
+
+func (p *Pong) String() string {
+	if p.Payload != "" {
+		return fmt.Sprintf("Pong<%s>", p.Payload)
+	}
+	return "Pong"
+}
+
+func (c *PubSub) newMessage(reply interface{}) (interface{}, error) {
+	switch reply := reply.(type) {
+	case string:
+		return &Pong{
+			Payload: reply,
+		}, nil
+	case []interface{}:
+		switch kind := reply[0].(string); kind {
+		case "subscribe", "unsubscribe", "psubscribe", "punsubscribe":
+			return &Subscription{
+				Kind:    kind,
+				Channel: reply[1].(string),
+				Count:   int(reply[2].(int64)),
+			}, nil
+		case "message":
+			return &Message{
+				Channel: reply[1].(string),
+				Payload: reply[2].(string),
+			}, nil
+		case "pmessage":
+			return &Message{
+				Pattern: reply[1].(string),
+				Channel: reply[2].(string),
+				Payload: reply[3].(string),
+			}, nil
+		case "pong":
+			return &Pong{
+				Payload: reply[1].(string),
+			}, nil
+		default:
+			return nil, fmt.Errorf("redis: unsupported pubsub message: %q", kind)
+		}
+	default:
+		return nil, fmt.Errorf("redis: unsupported pubsub message: %#v", reply)
+	}
+}
+
+// ReceiveTimeout acts like Receive but returns an error if message
+// is not received in time. This is low-level API and most clients
+// should use ReceiveMessage.
+func (c *PubSub) ReceiveTimeout(timeout time.Duration) (interface{}, error) {
+	if c.cmd == nil {
+		c.cmd = NewCmd()
+	}
+
+	cn, _, err := c.conn()
+	if err != nil {
+		return nil, err
+	}
+
+	cn.SetReadTimeout(timeout)
+	err = c.cmd.readReply(cn)
+	c.putConn(cn, err)
+	if err != nil {
+		return nil, err
+	}
+
+	return c.newMessage(c.cmd.Val())
+}
+
+// Receive returns a message as a Subscription, Message, Pong or error.
+// See PubSub example for details. This is low-level API and most clients
+// should use ReceiveMessage.
+func (c *PubSub) Receive() (interface{}, error) {
+	return c.ReceiveTimeout(0)
+}
+
+// ReceiveMessage returns a Message or error ignoring Subscription or Pong
+// messages. It automatically reconnects to Redis Server and resubscribes
+// to channels in case of network errors.
+func (c *PubSub) ReceiveMessage() (*Message, error) {
+	return c.receiveMessage(5 * time.Second)
+}
+
+func (c *PubSub) receiveMessage(timeout time.Duration) (*Message, error) {
+	var errNum uint
+	for {
+		msgi, err := c.ReceiveTimeout(timeout)
+		if err != nil {
+			if !internal.IsNetworkError(err) {
+				return nil, err
+			}
+
+			errNum++
+			if errNum < 3 {
+				if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
+					err := c.Ping()
+					if err != nil {
+						internal.Logf("PubSub.Ping failed: %s", err)
+					}
+				}
+			} else {
+				// 3 consequent errors - connection is broken or
+				// Redis Server is down.
+				// Sleep to not exceed max number of open connections.
+				time.Sleep(time.Second)
+			}
+			continue
+		}
+
+		// Reset error number, because we received a message.
+		errNum = 0
+
+		switch msg := msgi.(type) {
+		case *Subscription:
+			// Ignore.
+		case *Pong:
+			// Ignore.
+		case *Message:
+			return msg, nil
+		default:
+			return nil, fmt.Errorf("redis: unknown message: %T", msgi)
+		}
+	}
+}
+
+func (c *PubSub) resubscribe() {
+	if len(c.channels) > 0 {
+		if err := c.Subscribe(c.channels...); err != nil {
+			internal.Logf("Subscribe failed: %s", err)
+		}
+	}
+	if len(c.patterns) > 0 {
+		if err := c.PSubscribe(c.patterns...); err != nil {
+			internal.Logf("PSubscribe failed: %s", err)
+		}
+	}
+}
+
+func remove(ss []string, es ...string) []string {
+	if len(es) == 0 {
+		return ss[:0]
+	}
+	for _, e := range es {
+		for i, s := range ss {
+			if s == e {
+				ss = append(ss[:i], ss[i+1:]...)
+				break
+			}
+		}
+	}
+	return ss
+}
+
+func appendIfNotExists(ss []string, es ...string) []string {
+loop:
+	for _, e := range es {
+		for _, s := range ss {
+			if s == e {
+				continue loop
+			}
+		}
+		ss = append(ss, e)
+	}
+	return ss
+}

+ 378 - 0
vendor/gopkg.in/redis.v5/redis.go

@@ -0,0 +1,378 @@
+package redis // import "gopkg.in/redis.v5"
+
+import (
+	"fmt"
+	"log"
+	"time"
+
+	"gopkg.in/redis.v5/internal"
+	"gopkg.in/redis.v5/internal/pool"
+	"gopkg.in/redis.v5/internal/proto"
+)
+
+// Redis nil reply, .e.g. when key does not exist.
+const Nil = internal.Nil
+
+func SetLogger(logger *log.Logger) {
+	internal.Logger = logger
+}
+
+func (c *baseClient) String() string {
+	return fmt.Sprintf("Redis<%s db:%d>", c.getAddr(), c.opt.DB)
+}
+
+func (c *baseClient) conn() (*pool.Conn, bool, error) {
+	cn, isNew, err := c.connPool.Get()
+	if err != nil {
+		return nil, false, err
+	}
+	if !cn.Inited {
+		if err := c.initConn(cn); err != nil {
+			_ = c.connPool.Remove(cn, err)
+			return nil, false, err
+		}
+	}
+	return cn, isNew, nil
+}
+
+func (c *baseClient) putConn(cn *pool.Conn, err error, allowTimeout bool) bool {
+	if internal.IsBadConn(err, allowTimeout) {
+		_ = c.connPool.Remove(cn, err)
+		return false
+	}
+
+	_ = c.connPool.Put(cn)
+	return true
+}
+
+func (c *baseClient) initConn(cn *pool.Conn) error {
+	cn.Inited = true
+
+	if c.opt.Password == "" && c.opt.DB == 0 && !c.opt.ReadOnly {
+		return nil
+	}
+
+	// Temp client for Auth and Select.
+	client := newClient(c.opt, pool.NewSingleConnPool(cn))
+	_, err := client.Pipelined(func(pipe *Pipeline) error {
+		if c.opt.Password != "" {
+			pipe.Auth(c.opt.Password)
+		}
+
+		if c.opt.DB > 0 {
+			pipe.Select(c.opt.DB)
+		}
+
+		if c.opt.ReadOnly {
+			pipe.ReadOnly()
+		}
+
+		return nil
+	})
+	return err
+}
+
+func (c *baseClient) Process(cmd Cmder) error {
+	if c.process != nil {
+		return c.process(cmd)
+	}
+	return c.defaultProcess(cmd)
+}
+
+// WrapProcess replaces the process func. It takes a function createWrapper
+// which is supplied by the user. createWrapper takes the old process func as
+// an input and returns the new wrapper process func. createWrapper should
+// use call the old process func within the new process func.
+func (c *baseClient) WrapProcess(fn func(oldProcess func(cmd Cmder) error) func(cmd Cmder) error) {
+	c.process = fn(c.defaultProcess)
+}
+
+func (c *baseClient) defaultProcess(cmd Cmder) error {
+	for i := 0; i <= c.opt.MaxRetries; i++ {
+		cn, _, err := c.conn()
+		if err != nil {
+			cmd.setErr(err)
+			return err
+		}
+
+		cn.SetWriteTimeout(c.opt.WriteTimeout)
+		if err := writeCmd(cn, cmd); err != nil {
+			c.putConn(cn, err, false)
+			cmd.setErr(err)
+			if err != nil && internal.IsRetryableError(err) {
+				continue
+			}
+			return err
+		}
+
+		cn.SetReadTimeout(c.cmdTimeout(cmd))
+		err = cmd.readReply(cn)
+		c.putConn(cn, err, false)
+		if err != nil && internal.IsRetryableError(err) {
+			continue
+		}
+
+		return err
+	}
+
+	return cmd.Err()
+}
+
+func (c *baseClient) cmdTimeout(cmd Cmder) time.Duration {
+	if timeout := cmd.readTimeout(); timeout != nil {
+		return *timeout
+	} else {
+		return c.opt.ReadTimeout
+	}
+}
+
+// Close closes the client, releasing any open resources.
+//
+// It is rare to Close a Client, as the Client is meant to be
+// long-lived and shared between many goroutines.
+func (c *baseClient) Close() error {
+	var firstErr error
+	if c.onClose != nil {
+		if err := c.onClose(); err != nil && firstErr == nil {
+			firstErr = err
+		}
+	}
+	if err := c.connPool.Close(); err != nil && firstErr == nil {
+		firstErr = err
+	}
+	return firstErr
+}
+
+func (c *baseClient) getAddr() string {
+	return c.opt.Addr
+}
+
+type pipelineProcessor func(*pool.Conn, []Cmder) (bool, error)
+
+func (c *baseClient) pipelineExecer(p pipelineProcessor) pipelineExecer {
+	return func(cmds []Cmder) error {
+		var firstErr error
+		for i := 0; i <= c.opt.MaxRetries; i++ {
+			cn, _, err := c.conn()
+			if err != nil {
+				setCmdsErr(cmds, err)
+				return err
+			}
+
+			canRetry, err := p(cn, cmds)
+			c.putConn(cn, err, false)
+			if err == nil {
+				return nil
+			}
+			if firstErr == nil {
+				firstErr = err
+			}
+			if !canRetry || !internal.IsRetryableError(err) {
+				break
+			}
+		}
+		return firstErr
+	}
+}
+
+func (c *baseClient) pipelineProcessCmds(cn *pool.Conn, cmds []Cmder) (retry bool, firstErr error) {
+	cn.SetWriteTimeout(c.opt.WriteTimeout)
+	if err := writeCmd(cn, cmds...); err != nil {
+		setCmdsErr(cmds, err)
+		return true, err
+	}
+
+	// Set read timeout for all commands.
+	cn.SetReadTimeout(c.opt.ReadTimeout)
+	return pipelineReadCmds(cn, cmds)
+}
+
+func pipelineReadCmds(cn *pool.Conn, cmds []Cmder) (retry bool, firstErr error) {
+	for i, cmd := range cmds {
+		err := cmd.readReply(cn)
+		if err == nil {
+			continue
+		}
+		if i == 0 {
+			retry = true
+		}
+		if firstErr == nil {
+			firstErr = err
+		}
+	}
+	return false, firstErr
+}
+
+func (c *baseClient) txPipelineProcessCmds(cn *pool.Conn, cmds []Cmder) (bool, error) {
+	cn.SetWriteTimeout(c.opt.WriteTimeout)
+	if err := txPipelineWriteMulti(cn, cmds); err != nil {
+		setCmdsErr(cmds, err)
+		return true, err
+	}
+
+	// Set read timeout for all commands.
+	cn.SetReadTimeout(c.opt.ReadTimeout)
+
+	if err := c.txPipelineReadQueued(cn, cmds); err != nil {
+		return false, err
+	}
+
+	_, err := pipelineReadCmds(cn, cmds)
+	return false, err
+}
+
+func txPipelineWriteMulti(cn *pool.Conn, cmds []Cmder) error {
+	multiExec := make([]Cmder, 0, len(cmds)+2)
+	multiExec = append(multiExec, NewStatusCmd("MULTI"))
+	multiExec = append(multiExec, cmds...)
+	multiExec = append(multiExec, NewSliceCmd("EXEC"))
+	return writeCmd(cn, multiExec...)
+}
+
+func (c *baseClient) txPipelineReadQueued(cn *pool.Conn, cmds []Cmder) error {
+	var firstErr error
+
+	// Parse queued replies.
+	var statusCmd StatusCmd
+	if err := statusCmd.readReply(cn); err != nil && firstErr == nil {
+		firstErr = err
+	}
+
+	for _, cmd := range cmds {
+		err := statusCmd.readReply(cn)
+		if err != nil {
+			cmd.setErr(err)
+			if firstErr == nil {
+				firstErr = err
+			}
+		}
+	}
+
+	// Parse number of replies.
+	line, err := cn.Rd.ReadLine()
+	if err != nil {
+		if err == Nil {
+			err = TxFailedErr
+		}
+		return err
+	}
+
+	switch line[0] {
+	case proto.ErrorReply:
+		return proto.ParseErrorReply(line)
+	case proto.ArrayReply:
+		// ok
+	default:
+		err := fmt.Errorf("redis: expected '*', but got line %q", line)
+		return err
+	}
+
+	return nil
+}
+
+//------------------------------------------------------------------------------
+
+// Client is a Redis client representing a pool of zero or more
+// underlying connections. It's safe for concurrent use by multiple
+// goroutines.
+type Client struct {
+	baseClient
+	cmdable
+}
+
+func newClient(opt *Options, pool pool.Pooler) *Client {
+	client := Client{
+		baseClient: baseClient{
+			opt:      opt,
+			connPool: pool,
+		},
+	}
+	client.cmdable.process = client.Process
+	return &client
+}
+
+// NewClient returns a client to the Redis Server specified by Options.
+func NewClient(opt *Options) *Client {
+	opt.init()
+	return newClient(opt, newConnPool(opt))
+}
+
+func (c *Client) copy() *Client {
+	c2 := new(Client)
+	*c2 = *c
+	c2.cmdable.process = c2.Process
+	return c2
+}
+
+// PoolStats returns connection pool stats.
+func (c *Client) PoolStats() *PoolStats {
+	s := c.connPool.Stats()
+	return &PoolStats{
+		Requests: s.Requests,
+		Hits:     s.Hits,
+		Timeouts: s.Timeouts,
+
+		TotalConns: s.TotalConns,
+		FreeConns:  s.FreeConns,
+	}
+}
+
+func (c *Client) Pipelined(fn func(*Pipeline) error) ([]Cmder, error) {
+	return c.Pipeline().pipelined(fn)
+}
+
+func (c *Client) Pipeline() *Pipeline {
+	pipe := Pipeline{
+		exec: c.pipelineExecer(c.pipelineProcessCmds),
+	}
+	pipe.cmdable.process = pipe.Process
+	pipe.statefulCmdable.process = pipe.Process
+	return &pipe
+}
+
+func (c *Client) TxPipelined(fn func(*Pipeline) error) ([]Cmder, error) {
+	return c.TxPipeline().pipelined(fn)
+}
+
+// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC.
+func (c *Client) TxPipeline() *Pipeline {
+	pipe := Pipeline{
+		exec: c.pipelineExecer(c.txPipelineProcessCmds),
+	}
+	pipe.cmdable.process = pipe.Process
+	pipe.statefulCmdable.process = pipe.Process
+	return &pipe
+}
+
+func (c *Client) pubSub() *PubSub {
+	return &PubSub{
+		base: baseClient{
+			opt:      c.opt,
+			connPool: pool.NewStickyConnPool(c.connPool.(*pool.ConnPool), false),
+		},
+	}
+}
+
+// Subscribe subscribes the client to the specified channels.
+func (c *Client) Subscribe(channels ...string) (*PubSub, error) {
+	pubsub := c.pubSub()
+	if len(channels) > 0 {
+		if err := pubsub.Subscribe(channels...); err != nil {
+			pubsub.Close()
+			return nil, err
+		}
+	}
+	return pubsub, nil
+}
+
+// PSubscribe subscribes the client to the given patterns.
+func (c *Client) PSubscribe(channels ...string) (*PubSub, error) {
+	pubsub := c.pubSub()
+	if len(channels) > 0 {
+		if err := pubsub.PSubscribe(channels...); err != nil {
+			pubsub.Close()
+			return nil, err
+		}
+	}
+	return pubsub, nil
+}

+ 35 - 0
vendor/gopkg.in/redis.v5/redis_context.go

@@ -0,0 +1,35 @@
+// +build go1.7
+
+package redis
+
+import (
+	"context"
+
+	"gopkg.in/redis.v5/internal/pool"
+)
+
+type baseClient struct {
+	connPool pool.Pooler
+	opt      *Options
+
+	process func(Cmder) error
+	onClose func() error // hook called when client is closed
+
+	ctx context.Context
+}
+
+func (c *Client) Context() context.Context {
+	if c.ctx != nil {
+		return c.ctx
+	}
+	return context.Background()
+}
+
+func (c *Client) WithContext(ctx context.Context) *Client {
+	if ctx == nil {
+		panic("nil context")
+	}
+	c2 := c.copy()
+	c2.ctx = ctx
+	return c2
+}

+ 15 - 0
vendor/gopkg.in/redis.v5/redis_no_context.go

@@ -0,0 +1,15 @@
+// +build !go1.7
+
+package redis
+
+import (
+	"gopkg.in/redis.v5/internal/pool"
+)
+
+type baseClient struct {
+	connPool pool.Pooler
+	opt      *Options
+
+	process func(Cmder) error
+	onClose func() error // hook called when client is closed
+}

+ 140 - 0
vendor/gopkg.in/redis.v5/result.go

@@ -0,0 +1,140 @@
+package redis
+
+import "time"
+
+// NewCmdResult returns a Cmd initalised with val and err for testing
+func NewCmdResult(val interface{}, err error) *Cmd {
+	var cmd Cmd
+	cmd.val = val
+	cmd.setErr(err)
+	return &cmd
+}
+
+// NewSliceResult returns a SliceCmd initalised with val and err for testing
+func NewSliceResult(val []interface{}, err error) *SliceCmd {
+	var cmd SliceCmd
+	cmd.val = val
+	cmd.setErr(err)
+	return &cmd
+}
+
+// NewStatusResult returns a StatusCmd initalised with val and err for testing
+func NewStatusResult(val string, err error) *StatusCmd {
+	var cmd StatusCmd
+	cmd.val = val
+	cmd.setErr(err)
+	return &cmd
+}
+
+// NewIntResult returns an IntCmd initalised with val and err for testing
+func NewIntResult(val int64, err error) *IntCmd {
+	var cmd IntCmd
+	cmd.val = val
+	cmd.setErr(err)
+	return &cmd
+}
+
+// NewDurationResult returns a DurationCmd initalised with val and err for testing
+func NewDurationResult(val time.Duration, err error) *DurationCmd {
+	var cmd DurationCmd
+	cmd.val = val
+	cmd.setErr(err)
+	return &cmd
+}
+
+// NewBoolResult returns a BoolCmd initalised with val and err for testing
+func NewBoolResult(val bool, err error) *BoolCmd {
+	var cmd BoolCmd
+	cmd.val = val
+	cmd.setErr(err)
+	return &cmd
+}
+
+// NewStringResult returns a StringCmd initalised with val and err for testing
+func NewStringResult(val string, err error) *StringCmd {
+	var cmd StringCmd
+	cmd.val = []byte(val)
+	cmd.setErr(err)
+	return &cmd
+}
+
+// NewFloatResult returns a FloatCmd initalised with val and err for testing
+func NewFloatResult(val float64, err error) *FloatCmd {
+	var cmd FloatCmd
+	cmd.val = val
+	cmd.setErr(err)
+	return &cmd
+}
+
+// NewStringSliceResult returns a StringSliceCmd initalised with val and err for testing
+func NewStringSliceResult(val []string, err error) *StringSliceCmd {
+	var cmd StringSliceCmd
+	cmd.val = val
+	cmd.setErr(err)
+	return &cmd
+}
+
+// NewBoolSliceResult returns a BoolSliceCmd initalised with val and err for testing
+func NewBoolSliceResult(val []bool, err error) *BoolSliceCmd {
+	var cmd BoolSliceCmd
+	cmd.val = val
+	cmd.setErr(err)
+	return &cmd
+}
+
+// NewStringStringMapResult returns a StringStringMapCmd initalised with val and err for testing
+func NewStringStringMapResult(val map[string]string, err error) *StringStringMapCmd {
+	var cmd StringStringMapCmd
+	cmd.val = val
+	cmd.setErr(err)
+	return &cmd
+}
+
+// NewStringIntMapCmdResult returns a StringIntMapCmd initalised with val and err for testing
+func NewStringIntMapCmdResult(val map[string]int64, err error) *StringIntMapCmd {
+	var cmd StringIntMapCmd
+	cmd.val = val
+	cmd.setErr(err)
+	return &cmd
+}
+
+// NewZSliceCmdResult returns a ZSliceCmd initalised with val and err for testing
+func NewZSliceCmdResult(val []Z, err error) *ZSliceCmd {
+	var cmd ZSliceCmd
+	cmd.val = val
+	cmd.setErr(err)
+	return &cmd
+}
+
+// NewScanCmdResult returns a ScanCmd initalised with val and err for testing
+func NewScanCmdResult(keys []string, cursor uint64, err error) *ScanCmd {
+	var cmd ScanCmd
+	cmd.page = keys
+	cmd.cursor = cursor
+	cmd.setErr(err)
+	return &cmd
+}
+
+// NewClusterSlotsCmdResult returns a ClusterSlotsCmd initalised with val and err for testing
+func NewClusterSlotsCmdResult(val []ClusterSlot, err error) *ClusterSlotsCmd {
+	var cmd ClusterSlotsCmd
+	cmd.val = val
+	cmd.setErr(err)
+	return &cmd
+}
+
+// NewGeoLocationCmdResult returns a GeoLocationCmd initalised with val and err for testing
+func NewGeoLocationCmdResult(val []GeoLocation, err error) *GeoLocationCmd {
+	var cmd GeoLocationCmd
+	cmd.locations = val
+	cmd.setErr(err)
+	return &cmd
+}
+
+// NewCommandsInfoCmdResult returns a CommandsInfoCmd initalised with val and err for testing
+func NewCommandsInfoCmdResult(val map[string]*CommandInfo, err error) *CommandsInfoCmd {
+	var cmd CommandsInfoCmd
+	cmd.val = val
+	cmd.setErr(err)
+	return &cmd
+}

+ 420 - 0
vendor/gopkg.in/redis.v5/ring.go

@@ -0,0 +1,420 @@
+package redis
+
+import (
+	"errors"
+	"fmt"
+	"math/rand"
+	"strconv"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"gopkg.in/redis.v5/internal"
+	"gopkg.in/redis.v5/internal/consistenthash"
+	"gopkg.in/redis.v5/internal/hashtag"
+	"gopkg.in/redis.v5/internal/pool"
+)
+
+var errRingShardsDown = errors.New("redis: all ring shards are down")
+
+// RingOptions are used to configure a ring client and should be
+// passed to NewRing.
+type RingOptions struct {
+	// Map of name => host:port addresses of ring shards.
+	Addrs map[string]string
+
+	// Frequency of PING commands sent to check shards availability.
+	// Shard is considered down after 3 subsequent failed checks.
+	HeartbeatFrequency time.Duration
+
+	// Following options are copied from Options struct.
+
+	DB       int
+	Password string
+
+	MaxRetries int
+
+	DialTimeout  time.Duration
+	ReadTimeout  time.Duration
+	WriteTimeout time.Duration
+
+	PoolSize           int
+	PoolTimeout        time.Duration
+	IdleTimeout        time.Duration
+	IdleCheckFrequency time.Duration
+}
+
+func (opt *RingOptions) init() {
+	if opt.HeartbeatFrequency == 0 {
+		opt.HeartbeatFrequency = 500 * time.Millisecond
+	}
+}
+
+func (opt *RingOptions) clientOptions() *Options {
+	return &Options{
+		DB:       opt.DB,
+		Password: opt.Password,
+
+		DialTimeout:  opt.DialTimeout,
+		ReadTimeout:  opt.ReadTimeout,
+		WriteTimeout: opt.WriteTimeout,
+
+		PoolSize:           opt.PoolSize,
+		PoolTimeout:        opt.PoolTimeout,
+		IdleTimeout:        opt.IdleTimeout,
+		IdleCheckFrequency: opt.IdleCheckFrequency,
+	}
+}
+
+type ringShard struct {
+	Client *Client
+	down   int32
+}
+
+func (shard *ringShard) String() string {
+	var state string
+	if shard.IsUp() {
+		state = "up"
+	} else {
+		state = "down"
+	}
+	return fmt.Sprintf("%s is %s", shard.Client, state)
+}
+
+func (shard *ringShard) IsDown() bool {
+	const threshold = 3
+	return atomic.LoadInt32(&shard.down) >= threshold
+}
+
+func (shard *ringShard) IsUp() bool {
+	return !shard.IsDown()
+}
+
+// Vote votes to set shard state and returns true if state was changed.
+func (shard *ringShard) Vote(up bool) bool {
+	if up {
+		changed := shard.IsDown()
+		atomic.StoreInt32(&shard.down, 0)
+		return changed
+	}
+
+	if shard.IsDown() {
+		return false
+	}
+
+	atomic.AddInt32(&shard.down, 1)
+	return shard.IsDown()
+}
+
+// Ring is a Redis client that uses constistent hashing to distribute
+// keys across multiple Redis servers (shards). It's safe for
+// concurrent use by multiple goroutines.
+//
+// Ring monitors the state of each shard and removes dead shards from
+// the ring. When shard comes online it is added back to the ring. This
+// gives you maximum availability and partition tolerance, but no
+// consistency between different shards or even clients. Each client
+// uses shards that are available to the client and does not do any
+// coordination when shard state is changed.
+//
+// Ring should be used when you need multiple Redis servers for caching
+// and can tolerate losing data when one of the servers dies.
+// Otherwise you should use Redis Cluster.
+type Ring struct {
+	cmdable
+
+	opt       *RingOptions
+	nreplicas int
+
+	mu     sync.RWMutex
+	hash   *consistenthash.Map
+	shards map[string]*ringShard
+
+	cmdsInfoOnce *sync.Once
+	cmdsInfo     map[string]*CommandInfo
+
+	closed bool
+}
+
+func NewRing(opt *RingOptions) *Ring {
+	const nreplicas = 100
+	opt.init()
+	ring := &Ring{
+		opt:       opt,
+		nreplicas: nreplicas,
+
+		hash:   consistenthash.New(nreplicas, nil),
+		shards: make(map[string]*ringShard),
+
+		cmdsInfoOnce: new(sync.Once),
+	}
+	ring.cmdable.process = ring.Process
+	for name, addr := range opt.Addrs {
+		clopt := opt.clientOptions()
+		clopt.Addr = addr
+		ring.addClient(name, NewClient(clopt))
+	}
+	go ring.heartbeat()
+	return ring
+}
+
+// PoolStats returns accumulated connection pool stats.
+func (c *Ring) PoolStats() *PoolStats {
+	var acc PoolStats
+	for _, shard := range c.shards {
+		s := shard.Client.connPool.Stats()
+		acc.Requests += s.Requests
+		acc.Hits += s.Hits
+		acc.Timeouts += s.Timeouts
+		acc.TotalConns += s.TotalConns
+		acc.FreeConns += s.FreeConns
+	}
+	return &acc
+}
+
+// ForEachShard concurrently calls the fn on each live shard in the ring.
+// It returns the first error if any.
+func (c *Ring) ForEachShard(fn func(client *Client) error) error {
+	var wg sync.WaitGroup
+	errCh := make(chan error, 1)
+	for _, shard := range c.shards {
+		if shard.IsDown() {
+			continue
+		}
+
+		wg.Add(1)
+		go func(shard *ringShard) {
+			defer wg.Done()
+			err := fn(shard.Client)
+			if err != nil {
+				select {
+				case errCh <- err:
+				default:
+				}
+			}
+		}(shard)
+	}
+	wg.Wait()
+
+	select {
+	case err := <-errCh:
+		return err
+	default:
+		return nil
+	}
+}
+
+func (c *Ring) cmdInfo(name string) *CommandInfo {
+	c.cmdsInfoOnce.Do(func() {
+		for _, shard := range c.shards {
+			cmdsInfo, err := shard.Client.Command().Result()
+			if err == nil {
+				c.cmdsInfo = cmdsInfo
+				return
+			}
+		}
+		c.cmdsInfoOnce = &sync.Once{}
+	})
+	if c.cmdsInfo == nil {
+		return nil
+	}
+	return c.cmdsInfo[name]
+}
+
+func (c *Ring) addClient(name string, cl *Client) {
+	c.mu.Lock()
+	c.hash.Add(name)
+	c.shards[name] = &ringShard{Client: cl}
+	c.mu.Unlock()
+}
+
+func (c *Ring) shardByKey(key string) (*ringShard, error) {
+	key = hashtag.Key(key)
+
+	c.mu.RLock()
+
+	if c.closed {
+		c.mu.RUnlock()
+		return nil, pool.ErrClosed
+	}
+
+	name := c.hash.Get(key)
+	if name == "" {
+		c.mu.RUnlock()
+		return nil, errRingShardsDown
+	}
+
+	shard := c.shards[name]
+	c.mu.RUnlock()
+	return shard, nil
+}
+
+func (c *Ring) randomShard() (*ringShard, error) {
+	return c.shardByKey(strconv.Itoa(rand.Int()))
+}
+
+func (c *Ring) shardByName(name string) (*ringShard, error) {
+	if name == "" {
+		return c.randomShard()
+	}
+
+	c.mu.RLock()
+	shard := c.shards[name]
+	c.mu.RUnlock()
+	return shard, nil
+}
+
+func (c *Ring) cmdShard(cmd Cmder) (*ringShard, error) {
+	cmdInfo := c.cmdInfo(cmd.name())
+	firstKey := cmd.arg(cmdFirstKeyPos(cmd, cmdInfo))
+	return c.shardByKey(firstKey)
+}
+
+func (c *Ring) Process(cmd Cmder) error {
+	shard, err := c.cmdShard(cmd)
+	if err != nil {
+		cmd.setErr(err)
+		return err
+	}
+	return shard.Client.Process(cmd)
+}
+
+// rebalance removes dead shards from the Ring.
+func (c *Ring) rebalance() {
+	hash := consistenthash.New(c.nreplicas, nil)
+	for name, shard := range c.shards {
+		if shard.IsUp() {
+			hash.Add(name)
+		}
+	}
+
+	c.mu.Lock()
+	c.hash = hash
+	c.mu.Unlock()
+}
+
+// heartbeat monitors state of each shard in the ring.
+func (c *Ring) heartbeat() {
+	ticker := time.NewTicker(c.opt.HeartbeatFrequency)
+	defer ticker.Stop()
+	for _ = range ticker.C {
+		var rebalance bool
+
+		c.mu.RLock()
+
+		if c.closed {
+			c.mu.RUnlock()
+			break
+		}
+
+		for _, shard := range c.shards {
+			err := shard.Client.Ping().Err()
+			if shard.Vote(err == nil || err == pool.ErrPoolTimeout) {
+				internal.Logf("ring shard state changed: %s", shard)
+				rebalance = true
+			}
+		}
+
+		c.mu.RUnlock()
+
+		if rebalance {
+			c.rebalance()
+		}
+	}
+}
+
+// Close closes the ring client, releasing any open resources.
+//
+// It is rare to Close a Ring, as the Ring is meant to be long-lived
+// and shared between many goroutines.
+func (c *Ring) Close() error {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+
+	if c.closed {
+		return nil
+	}
+	c.closed = true
+
+	var firstErr error
+	for _, shard := range c.shards {
+		if err := shard.Client.Close(); err != nil && firstErr == nil {
+			firstErr = err
+		}
+	}
+	c.hash = nil
+	c.shards = nil
+
+	return firstErr
+}
+
+func (c *Ring) Pipeline() *Pipeline {
+	pipe := Pipeline{
+		exec: c.pipelineExec,
+	}
+	pipe.cmdable.process = pipe.Process
+	pipe.statefulCmdable.process = pipe.Process
+	return &pipe
+}
+
+func (c *Ring) Pipelined(fn func(*Pipeline) error) ([]Cmder, error) {
+	return c.Pipeline().pipelined(fn)
+}
+
+func (c *Ring) pipelineExec(cmds []Cmder) (firstErr error) {
+	cmdsMap := make(map[string][]Cmder)
+	for _, cmd := range cmds {
+		cmdInfo := c.cmdInfo(cmd.name())
+		name := cmd.arg(cmdFirstKeyPos(cmd, cmdInfo))
+		if name != "" {
+			name = c.hash.Get(hashtag.Key(name))
+		}
+		cmdsMap[name] = append(cmdsMap[name], cmd)
+	}
+
+	for i := 0; i <= c.opt.MaxRetries; i++ {
+		var failedCmdsMap map[string][]Cmder
+
+		for name, cmds := range cmdsMap {
+			shard, err := c.shardByName(name)
+			if err != nil {
+				setCmdsErr(cmds, err)
+				if firstErr == nil {
+					firstErr = err
+				}
+				continue
+			}
+
+			cn, _, err := shard.Client.conn()
+			if err != nil {
+				setCmdsErr(cmds, err)
+				if firstErr == nil {
+					firstErr = err
+				}
+				continue
+			}
+
+			canRetry, err := shard.Client.pipelineProcessCmds(cn, cmds)
+			shard.Client.putConn(cn, err, false)
+			if err == nil {
+				continue
+			}
+			if firstErr == nil {
+				firstErr = err
+			}
+			if canRetry && internal.IsRetryableError(err) {
+				if failedCmdsMap == nil {
+					failedCmdsMap = make(map[string][]Cmder)
+				}
+				failedCmdsMap[name] = cmds
+			}
+		}
+
+		if len(failedCmdsMap) == 0 {
+			break
+		}
+		cmdsMap = failedCmdsMap
+	}
+
+	return firstErr
+}

+ 13 - 9
vendor/gopkg.in/redis.v2/script.go → vendor/gopkg.in/redis.v5/script.go

@@ -8,12 +8,16 @@ import (
 )
 
 type scripter interface {
-	Eval(script string, keys []string, args []string) *Cmd
-	EvalSha(sha1 string, keys []string, args []string) *Cmd
+	Eval(script string, keys []string, args ...interface{}) *Cmd
+	EvalSha(sha1 string, keys []string, args ...interface{}) *Cmd
 	ScriptExists(scripts ...string) *BoolSliceCmd
 	ScriptLoad(script string) *StringCmd
 }
 
+var _ scripter = (*Client)(nil)
+var _ scripter = (*Ring)(nil)
+var _ scripter = (*ClusterClient)(nil)
+
 type Script struct {
 	src, hash string
 }
@@ -35,18 +39,18 @@ func (s *Script) Exists(c scripter) *BoolSliceCmd {
 	return c.ScriptExists(s.src)
 }
 
-func (s *Script) Eval(c scripter, keys []string, args []string) *Cmd {
-	return c.Eval(s.src, keys, args)
+func (s *Script) Eval(c scripter, keys []string, args ...interface{}) *Cmd {
+	return c.Eval(s.src, keys, args...)
 }
 
-func (s *Script) EvalSha(c scripter, keys []string, args []string) *Cmd {
-	return c.EvalSha(s.hash, keys, args)
+func (s *Script) EvalSha(c scripter, keys []string, args ...interface{}) *Cmd {
+	return c.EvalSha(s.hash, keys, args...)
 }
 
-func (s *Script) Run(c *Client, keys []string, args []string) *Cmd {
-	r := s.EvalSha(c, keys, args)
+func (s *Script) Run(c scripter, keys []string, args ...interface{}) *Cmd {
+	r := s.EvalSha(c, keys, args...)
 	if err := r.Err(); err != nil && strings.HasPrefix(err.Error(), "NOSCRIPT ") {
-		return s.Eval(c, keys, args)
+		return s.Eval(c, keys, args...)
 	}
 	return r
 }

+ 335 - 0
vendor/gopkg.in/redis.v5/sentinel.go

@@ -0,0 +1,335 @@
+package redis
+
+import (
+	"errors"
+	"fmt"
+	"net"
+	"strings"
+	"sync"
+	"time"
+
+	"gopkg.in/redis.v5/internal"
+	"gopkg.in/redis.v5/internal/pool"
+)
+
+//------------------------------------------------------------------------------
+
+// FailoverOptions are used to configure a failover client and should
+// be passed to NewFailoverClient.
+type FailoverOptions struct {
+	// The master name.
+	MasterName string
+	// A seed list of host:port addresses of sentinel nodes.
+	SentinelAddrs []string
+
+	// Following options are copied from Options struct.
+
+	Password string
+	DB       int
+
+	MaxRetries int
+
+	DialTimeout  time.Duration
+	ReadTimeout  time.Duration
+	WriteTimeout time.Duration
+
+	PoolSize           int
+	PoolTimeout        time.Duration
+	IdleTimeout        time.Duration
+	IdleCheckFrequency time.Duration
+}
+
+func (opt *FailoverOptions) options() *Options {
+	return &Options{
+		Addr: "FailoverClient",
+
+		DB:       opt.DB,
+		Password: opt.Password,
+
+		MaxRetries: opt.MaxRetries,
+
+		DialTimeout:  opt.DialTimeout,
+		ReadTimeout:  opt.ReadTimeout,
+		WriteTimeout: opt.WriteTimeout,
+
+		PoolSize:           opt.PoolSize,
+		PoolTimeout:        opt.PoolTimeout,
+		IdleTimeout:        opt.IdleTimeout,
+		IdleCheckFrequency: opt.IdleCheckFrequency,
+	}
+}
+
+// NewFailoverClient returns a Redis client that uses Redis Sentinel
+// for automatic failover. It's safe for concurrent use by multiple
+// goroutines.
+func NewFailoverClient(failoverOpt *FailoverOptions) *Client {
+	opt := failoverOpt.options()
+	opt.init()
+
+	failover := &sentinelFailover{
+		masterName:    failoverOpt.MasterName,
+		sentinelAddrs: failoverOpt.SentinelAddrs,
+
+		opt: opt,
+	}
+
+	client := Client{
+		baseClient: baseClient{
+			opt:      opt,
+			connPool: failover.Pool(),
+
+			onClose: func() error {
+				return failover.Close()
+			},
+		},
+	}
+	client.cmdable.process = client.Process
+
+	return &client
+}
+
+//------------------------------------------------------------------------------
+
+type sentinelClient struct {
+	cmdable
+	baseClient
+}
+
+func newSentinel(opt *Options) *sentinelClient {
+	opt.init()
+	client := sentinelClient{
+		baseClient: baseClient{
+			opt:      opt,
+			connPool: newConnPool(opt),
+		},
+	}
+	client.cmdable = cmdable{client.Process}
+	return &client
+}
+
+func (c *sentinelClient) PubSub() *PubSub {
+	return &PubSub{
+		base: baseClient{
+			opt:      c.opt,
+			connPool: pool.NewStickyConnPool(c.connPool.(*pool.ConnPool), false),
+		},
+	}
+}
+
+func (c *sentinelClient) GetMasterAddrByName(name string) *StringSliceCmd {
+	cmd := NewStringSliceCmd("SENTINEL", "get-master-addr-by-name", name)
+	c.Process(cmd)
+	return cmd
+}
+
+func (c *sentinelClient) Sentinels(name string) *SliceCmd {
+	cmd := NewSliceCmd("SENTINEL", "sentinels", name)
+	c.Process(cmd)
+	return cmd
+}
+
+type sentinelFailover struct {
+	masterName    string
+	sentinelAddrs []string
+
+	opt *Options
+
+	pool     *pool.ConnPool
+	poolOnce sync.Once
+
+	mu       sync.RWMutex
+	sentinel *sentinelClient
+}
+
+func (d *sentinelFailover) Close() error {
+	return d.resetSentinel()
+}
+
+func (d *sentinelFailover) dial() (net.Conn, error) {
+	addr, err := d.MasterAddr()
+	if err != nil {
+		return nil, err
+	}
+	return net.DialTimeout("tcp", addr, d.opt.DialTimeout)
+}
+
+func (d *sentinelFailover) Pool() *pool.ConnPool {
+	d.poolOnce.Do(func() {
+		d.opt.Dialer = d.dial
+		d.pool = newConnPool(d.opt)
+	})
+	return d.pool
+}
+
+func (d *sentinelFailover) MasterAddr() (string, error) {
+	d.mu.Lock()
+	defer d.mu.Unlock()
+
+	// Try last working sentinel.
+	if d.sentinel != nil {
+		addr, err := d.sentinel.GetMasterAddrByName(d.masterName).Result()
+		if err != nil {
+			internal.Logf("sentinel: GetMasterAddrByName %q failed: %s", d.masterName, err)
+			d._resetSentinel()
+		} else {
+			addr := net.JoinHostPort(addr[0], addr[1])
+			internal.Logf("sentinel: %q addr is %s", d.masterName, addr)
+			return addr, nil
+		}
+	}
+
+	for i, sentinelAddr := range d.sentinelAddrs {
+		sentinel := newSentinel(&Options{
+			Addr: sentinelAddr,
+
+			DialTimeout:  d.opt.DialTimeout,
+			ReadTimeout:  d.opt.ReadTimeout,
+			WriteTimeout: d.opt.WriteTimeout,
+
+			PoolSize:    d.opt.PoolSize,
+			PoolTimeout: d.opt.PoolTimeout,
+			IdleTimeout: d.opt.IdleTimeout,
+		})
+		masterAddr, err := sentinel.GetMasterAddrByName(d.masterName).Result()
+		if err != nil {
+			internal.Logf("sentinel: GetMasterAddrByName %q failed: %s", d.masterName, err)
+			sentinel.Close()
+			continue
+		}
+
+		// Push working sentinel to the top.
+		d.sentinelAddrs[0], d.sentinelAddrs[i] = d.sentinelAddrs[i], d.sentinelAddrs[0]
+
+		d.setSentinel(sentinel)
+		addr := net.JoinHostPort(masterAddr[0], masterAddr[1])
+		internal.Logf("sentinel: %q addr is %s", d.masterName, addr)
+		return addr, nil
+	}
+
+	return "", errors.New("redis: all sentinels are unreachable")
+}
+
+func (d *sentinelFailover) setSentinel(sentinel *sentinelClient) {
+	d.discoverSentinels(sentinel)
+	d.sentinel = sentinel
+	go d.listen(sentinel)
+}
+
+func (d *sentinelFailover) resetSentinel() error {
+	d.mu.Lock()
+	err := d._resetSentinel()
+	d.mu.Unlock()
+	return err
+}
+
+func (d *sentinelFailover) _resetSentinel() error {
+	var err error
+	if d.sentinel != nil {
+		err = d.sentinel.Close()
+		d.sentinel = nil
+	}
+	return err
+}
+
+func (d *sentinelFailover) discoverSentinels(sentinel *sentinelClient) {
+	sentinels, err := sentinel.Sentinels(d.masterName).Result()
+	if err != nil {
+		internal.Logf("sentinel: Sentinels %q failed: %s", d.masterName, err)
+		return
+	}
+	for _, sentinel := range sentinels {
+		vals := sentinel.([]interface{})
+		for i := 0; i < len(vals); i += 2 {
+			key := vals[i].(string)
+			if key == "name" {
+				sentinelAddr := vals[i+1].(string)
+				if !contains(d.sentinelAddrs, sentinelAddr) {
+					internal.Logf(
+						"sentinel: discovered new %q sentinel: %s",
+						d.masterName, sentinelAddr,
+					)
+					d.sentinelAddrs = append(d.sentinelAddrs, sentinelAddr)
+				}
+			}
+		}
+	}
+}
+
+// closeOldConns closes connections to the old master after failover switch.
+func (d *sentinelFailover) closeOldConns(newMaster string) {
+	// Good connections that should be put back to the pool. They
+	// can't be put immediately, because pool.PopFree will return them
+	// again on next iteration.
+	cnsToPut := make([]*pool.Conn, 0)
+
+	for {
+		cn := d.pool.PopFree()
+		if cn == nil {
+			break
+		}
+		if cn.RemoteAddr().String() != newMaster {
+			err := fmt.Errorf(
+				"sentinel: closing connection to the old master %s",
+				cn.RemoteAddr(),
+			)
+			internal.Logf(err.Error())
+			d.pool.Remove(cn, err)
+		} else {
+			cnsToPut = append(cnsToPut, cn)
+		}
+	}
+
+	for _, cn := range cnsToPut {
+		d.pool.Put(cn)
+	}
+}
+
+func (d *sentinelFailover) listen(sentinel *sentinelClient) {
+	var pubsub *PubSub
+	for {
+		if pubsub == nil {
+			pubsub = sentinel.PubSub()
+
+			if err := pubsub.Subscribe("+switch-master"); err != nil {
+				internal.Logf("sentinel: Subscribe failed: %s", err)
+				pubsub.Close()
+				d.resetSentinel()
+				return
+			}
+		}
+
+		msg, err := pubsub.ReceiveMessage()
+		if err != nil {
+			internal.Logf("sentinel: ReceiveMessage failed: %s", err)
+			pubsub.Close()
+			d.resetSentinel()
+			return
+		}
+
+		switch msg.Channel {
+		case "+switch-master":
+			parts := strings.Split(msg.Payload, " ")
+			if parts[0] != d.masterName {
+				internal.Logf("sentinel: ignore new %s addr", parts[0])
+				continue
+			}
+
+			addr := net.JoinHostPort(parts[3], parts[4])
+			internal.Logf(
+				"sentinel: new %q addr is %s",
+				d.masterName, addr,
+			)
+
+			d.closeOldConns(addr)
+		}
+	}
+}
+
+func contains(slice []string, str string) bool {
+	for _, s := range slice {
+		if s == str {
+			return true
+		}
+	}
+	return false
+}

+ 99 - 0
vendor/gopkg.in/redis.v5/tx.go

@@ -0,0 +1,99 @@
+package redis
+
+import (
+	"gopkg.in/redis.v5/internal"
+	"gopkg.in/redis.v5/internal/pool"
+)
+
+// Redis transaction failed.
+const TxFailedErr = internal.RedisError("redis: transaction failed")
+
+// Tx implements Redis transactions as described in
+// http://redis.io/topics/transactions. It's NOT safe for concurrent use
+// by multiple goroutines, because Exec resets list of watched keys.
+// If you don't need WATCH it is better to use Pipeline.
+type Tx struct {
+	cmdable
+	statefulCmdable
+	baseClient
+}
+
+func (c *Client) newTx() *Tx {
+	tx := Tx{
+		baseClient: baseClient{
+			opt:      c.opt,
+			connPool: pool.NewStickyConnPool(c.connPool.(*pool.ConnPool), true),
+		},
+	}
+	tx.cmdable.process = tx.Process
+	tx.statefulCmdable.process = tx.Process
+	return &tx
+}
+
+func (c *Client) Watch(fn func(*Tx) error, keys ...string) error {
+	tx := c.newTx()
+	if len(keys) > 0 {
+		if err := tx.Watch(keys...).Err(); err != nil {
+			_ = tx.Close()
+			return err
+		}
+	}
+	firstErr := fn(tx)
+	if err := tx.Close(); err != nil && firstErr == nil {
+		firstErr = err
+	}
+	return firstErr
+}
+
+// close closes the transaction, releasing any open resources.
+func (c *Tx) Close() error {
+	_ = c.Unwatch().Err()
+	return c.baseClient.Close()
+}
+
+// Watch marks the keys to be watched for conditional execution
+// of a transaction.
+func (c *Tx) Watch(keys ...string) *StatusCmd {
+	args := make([]interface{}, 1+len(keys))
+	args[0] = "WATCH"
+	for i, key := range keys {
+		args[1+i] = key
+	}
+	cmd := NewStatusCmd(args...)
+	c.Process(cmd)
+	return cmd
+}
+
+// Unwatch flushes all the previously watched keys for a transaction.
+func (c *Tx) Unwatch(keys ...string) *StatusCmd {
+	args := make([]interface{}, 1+len(keys))
+	args[0] = "UNWATCH"
+	for i, key := range keys {
+		args[1+i] = key
+	}
+	cmd := NewStatusCmd(args...)
+	c.Process(cmd)
+	return cmd
+}
+
+func (c *Tx) Pipeline() *Pipeline {
+	pipe := Pipeline{
+		exec: c.pipelineExecer(c.txPipelineProcessCmds),
+	}
+	pipe.cmdable.process = pipe.Process
+	pipe.statefulCmdable.process = pipe.Process
+	return &pipe
+}
+
+// Pipelined executes commands queued in the fn in a transaction
+// and restores the connection state to normal.
+//
+// When using WATCH, EXEC will execute commands only if the watched keys
+// were not modified, allowing for a check-and-set mechanism.
+//
+// Exec always returns list of commands. If transaction fails
+// TxFailedErr is returned. Otherwise Exec returns error of the first
+// failed command or nil.
+func (c *Tx) Pipelined(fn func(*Pipeline) error) ([]Cmder, error) {
+	return c.Pipeline().pipelined(fn)
+}

+ 7 - 4
vendor/modules.txt

@@ -314,8 +314,6 @@ google.golang.org/grpc/balancer/base
 gopkg.in/alexcesaro/quotedprintable.v3
 # gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d
 gopkg.in/asn1-ber.v1
-# gopkg.in/bufio.v1 v1.0.0-20140618132640-567b2bfa514e
-gopkg.in/bufio.v1
 # gopkg.in/ini.v1 v1.42.0
 gopkg.in/ini.v1
 # gopkg.in/ldap.v3 v3.0.2
@@ -324,8 +322,13 @@ gopkg.in/ldap.v3
 gopkg.in/macaron.v1
 # gopkg.in/mail.v2 v2.3.1
 gopkg.in/mail.v2
-# gopkg.in/redis.v2 v2.3.2
-gopkg.in/redis.v2
+# gopkg.in/redis.v5 v5.2.9
+gopkg.in/redis.v5
+gopkg.in/redis.v5/internal
+gopkg.in/redis.v5/internal/consistenthash
+gopkg.in/redis.v5/internal/hashtag
+gopkg.in/redis.v5/internal/pool
+gopkg.in/redis.v5/internal/proto
 # gopkg.in/square/go-jose.v2 v2.3.0
 gopkg.in/square/go-jose.v2
 gopkg.in/square/go-jose.v2/cipher