Browse Source

feat(mqe): add token cache

bergquist 9 years ago
parent
commit
362162d6fa

+ 7 - 7
pkg/tsdb/mqe/model_parser.go

@@ -6,14 +6,14 @@ import (
 	"github.com/grafana/grafana/pkg/tsdb"
 	"github.com/grafana/grafana/pkg/tsdb"
 )
 )
 
 
-func NewQueryParser() *MQEQueryParser {
-	return &MQEQueryParser{}
+func NewQueryParser() *QueryParser {
+	return &QueryParser{}
 }
 }
 
 
-type MQEQueryParser struct{}
+type QueryParser struct{}
 
 
-func (qp *MQEQueryParser) Parse(model *simplejson.Json, dsInfo *models.DataSource, queryContext *tsdb.QueryContext) (*MQEQuery, error) {
-	query := &MQEQuery{TimeRange: queryContext.TimeRange}
+func (qp *QueryParser) Parse(model *simplejson.Json, dsInfo *models.DataSource, queryContext *tsdb.QueryContext) (*Query, error) {
+	query := &Query{TimeRange: queryContext.TimeRange}
 	query.AddAppToAlias = model.Get("addAppToAlias").MustBool(false)
 	query.AddAppToAlias = model.Get("addAppToAlias").MustBool(false)
 	query.AddHostToAlias = model.Get("addHostToAlias").MustBool(false)
 	query.AddHostToAlias = model.Get("addHostToAlias").MustBool(false)
 	query.UseRawQuery = model.Get("rawQuery").MustBool(false)
 	query.UseRawQuery = model.Get("rawQuery").MustBool(false)
@@ -22,11 +22,11 @@ func (qp *MQEQueryParser) Parse(model *simplejson.Json, dsInfo *models.DataSourc
 	query.Apps = model.Get("apps").MustStringArray([]string{})
 	query.Apps = model.Get("apps").MustStringArray([]string{})
 	query.Hosts = model.Get("hosts").MustStringArray([]string{})
 	query.Hosts = model.Get("hosts").MustStringArray([]string{})
 
 
-	var metrics []MQEMetric
+	var metrics []Metric
 	var err error
 	var err error
 	for _, metricsObj := range model.Get("metrics").MustArray() {
 	for _, metricsObj := range model.Get("metrics").MustArray() {
 		metricJson := simplejson.NewFromAny(metricsObj)
 		metricJson := simplejson.NewFromAny(metricsObj)
-		var m MQEMetric
+		var m Metric
 
 
 		m.Alias = metricJson.Get("alias").MustString("")
 		m.Alias = metricJson.Get("alias").MustString("")
 		m.Metric, err = metricJson.Get("metric").String()
 		m.Metric, err = metricJson.Get("metric").String()

+ 1 - 1
pkg/tsdb/mqe/model_parser_test.go

@@ -11,7 +11,7 @@ import (
 
 
 func TestMQEQueryParser(t *testing.T) {
 func TestMQEQueryParser(t *testing.T) {
 	Convey("MQE query parser", t, func() {
 	Convey("MQE query parser", t, func() {
-		parser := &MQEQueryParser{}
+		parser := &QueryParser{}
 
 
 		dsInfo := &models.DataSource{JsonData: simplejson.New()}
 		dsInfo := &models.DataSource{JsonData: simplejson.New()}
 		queryContext := &tsdb.QueryContext{}
 		queryContext := &tsdb.QueryContext{}

+ 17 - 9
pkg/tsdb/mqe/mqe.go

@@ -12,19 +12,20 @@ import (
 	"github.com/grafana/grafana/pkg/components/simplejson"
 	"github.com/grafana/grafana/pkg/components/simplejson"
 	"github.com/grafana/grafana/pkg/log"
 	"github.com/grafana/grafana/pkg/log"
 	"github.com/grafana/grafana/pkg/models"
 	"github.com/grafana/grafana/pkg/models"
+	"github.com/grafana/grafana/pkg/setting"
 	"github.com/grafana/grafana/pkg/tsdb"
 	"github.com/grafana/grafana/pkg/tsdb"
 )
 )
 
 
 /*
 /*
   TODO:
   TODO:
-  * response serie names with wildcards
-  * real caching
+  * performance. outgoing requests in pararell.
+  * frontend plugin. targetContainsTemplates
 */
 */
 
 
 type MQEExecutor struct {
 type MQEExecutor struct {
 	*models.DataSource
 	*models.DataSource
-	queryParser    *MQEQueryParser
-	responseParser *MQEResponseParser
+	queryParser    *QueryParser
+	responseParser *ResponseParser
 	httpClient     *http.Client
 	httpClient     *http.Client
 	log            log.Logger
 	log            log.Logger
 	tokenClient    *TokenClient
 	tokenClient    *TokenClient
@@ -52,7 +53,7 @@ func init() {
 
 
 type QueryToSend struct {
 type QueryToSend struct {
 	RawQuery string
 	RawQuery string
-	QueryRef *MQEQuery
+	QueryRef *Query
 }
 }
 
 
 func (e *MQEExecutor) Execute(ctx context.Context, queries tsdb.QuerySlice, queryContext *tsdb.QueryContext) *tsdb.BatchResult {
 func (e *MQEExecutor) Execute(ctx context.Context, queries tsdb.QuerySlice, queryContext *tsdb.QueryContext) *tsdb.BatchResult {
@@ -63,7 +64,7 @@ func (e *MQEExecutor) Execute(ctx context.Context, queries tsdb.QuerySlice, quer
 		return result.WithError(err)
 		return result.WithError(err)
 	}
 	}
 
 
-	var mqeQueries []*MQEQuery
+	var mqeQueries []*Query
 	for _, v := range queries {
 	for _, v := range queries {
 		q, err := e.queryParser.Parse(v.Model, e.DataSource, queryContext)
 		q, err := e.queryParser.Parse(v.Model, e.DataSource, queryContext)
 		if err != nil {
 		if err != nil {
@@ -82,9 +83,13 @@ func (e *MQEExecutor) Execute(ctx context.Context, queries tsdb.QuerySlice, quer
 		rawQueries = append(rawQueries, queries...)
 		rawQueries = append(rawQueries, queries...)
 	}
 	}
 
 
+	e.log.Debug("Sending request", "url", e.DataSource.Url)
+
 	queryResult := &tsdb.QueryResult{}
 	queryResult := &tsdb.QueryResult{}
 	for _, v := range rawQueries {
 	for _, v := range rawQueries {
-		e.log.Info("Mqe executor", "query", v)
+		if setting.Env == setting.DEV {
+			e.log.Debug("Executing", "query", v)
+		}
 
 
 		req, err := e.createRequest(v.RawQuery)
 		req, err := e.createRequest(v.RawQuery)
 
 
@@ -108,7 +113,11 @@ func (e *MQEExecutor) Execute(ctx context.Context, queries tsdb.QuerySlice, quer
 }
 }
 
 
 func (e *MQEExecutor) createRequest(query string) (*http.Request, error) {
 func (e *MQEExecutor) createRequest(query string) (*http.Request, error) {
-	u, _ := url.Parse(e.Url)
+	u, err := url.Parse(e.Url)
+	if err != nil {
+		return nil, err
+	}
+
 	u.Path = path.Join(u.Path, "query")
 	u.Path = path.Join(u.Path, "query")
 
 
 	payload := simplejson.New()
 	payload := simplejson.New()
@@ -131,6 +140,5 @@ func (e *MQEExecutor) createRequest(query string) (*http.Request, error) {
 		req.SetBasicAuth(e.BasicAuthUser, e.BasicAuthPassword)
 		req.SetBasicAuth(e.BasicAuthUser, e.BasicAuthPassword)
 	}
 	}
 
 
-	e.log.Debug("Mqe request", "url", req.URL.String())
 	return req, nil
 	return req, nil
 }
 }

+ 6 - 6
pkg/tsdb/mqe/response_parser.go

@@ -13,8 +13,8 @@ import (
 	"github.com/grafana/grafana/pkg/tsdb"
 	"github.com/grafana/grafana/pkg/tsdb"
 )
 )
 
 
-func NewResponseParser() *MQEResponseParser {
-	return &MQEResponseParser{
+func NewResponseParser() *ResponseParser {
+	return &ResponseParser{
 		log: log.New("tsdb.mqe"),
 		log: log.New("tsdb.mqe"),
 	}
 	}
 }
 }
@@ -44,11 +44,11 @@ type MQESerie struct {
 	Tagset map[string]string `json:"tagset"`
 	Tagset map[string]string `json:"tagset"`
 }
 }
 
 
-type MQEResponseParser struct {
+type ResponseParser struct {
 	log log.Logger
 	log log.Logger
 }
 }
 
 
-func (parser *MQEResponseParser) Parse(res *http.Response, queryRef *MQEQuery) (*tsdb.QueryResult, error) {
+func (parser *ResponseParser) Parse(res *http.Response, queryRef *Query) (*tsdb.QueryResult, error) {
 	body, err := ioutil.ReadAll(res.Body)
 	body, err := ioutil.ReadAll(res.Body)
 	defer res.Body.Close()
 	defer res.Body.Close()
 	if err != nil {
 	if err != nil {
@@ -63,12 +63,12 @@ func (parser *MQEResponseParser) Parse(res *http.Response, queryRef *MQEQuery) (
 	var data *MQEResponse = &MQEResponse{}
 	var data *MQEResponse = &MQEResponse{}
 	err = json.Unmarshal(body, data)
 	err = json.Unmarshal(body, data)
 	if err != nil {
 	if err != nil {
-		parser.log.Info("Failed to unmarshal mqe response", "error", err, "status", res.Status, "body", string(body))
+		parser.log.Info("Failed to unmarshal response", "error", err, "status", res.Status, "body", string(body))
 		return nil, err
 		return nil, err
 	}
 	}
 
 
 	if !data.Success {
 	if !data.Success {
-		return nil, fmt.Errorf("MQE request failed.")
+		return nil, fmt.Errorf("Request failed.")
 	}
 	}
 
 
 	var series tsdb.TimeSeriesSlice
 	var series tsdb.TimeSeriesSlice

+ 1 - 1
pkg/tsdb/mqe/response_parser_test.go

@@ -20,7 +20,7 @@ func TestMQEResponseParser(t *testing.T) {
 		parser := NewResponseParser()
 		parser := NewResponseParser()
 
 
 		Convey("Can parse response", func() {
 		Convey("Can parse response", func() {
-			queryRef := &MQEQuery{
+			queryRef := &Query{
 				AddAppToAlias:  true,
 				AddAppToAlias:  true,
 				AddHostToAlias: true,
 				AddHostToAlias: true,
 			}
 			}

+ 28 - 15
pkg/tsdb/mqe/token_client.go

@@ -8,15 +8,25 @@ import (
 	"net/http"
 	"net/http"
 	"net/url"
 	"net/url"
 	"path"
 	"path"
+	"time"
 
 
 	"golang.org/x/net/context/ctxhttp"
 	"golang.org/x/net/context/ctxhttp"
 
 
+	"strconv"
+
 	"github.com/grafana/grafana/pkg/log"
 	"github.com/grafana/grafana/pkg/log"
 	"github.com/grafana/grafana/pkg/models"
 	"github.com/grafana/grafana/pkg/models"
+	"github.com/patrickmn/go-cache"
 )
 )
 
 
+var tokenCache *cache.Cache
+
+func init() {
+	tokenCache = cache.New(5*time.Minute, 30*time.Second)
+}
+
 type TokenClient struct {
 type TokenClient struct {
-	tlog       log.Logger
+	log        log.Logger
 	Datasource *models.DataSource
 	Datasource *models.DataSource
 	HttpClient *http.Client
 	HttpClient *http.Client
 }
 }
@@ -25,27 +35,30 @@ func NewTokenClient(datasource *models.DataSource) *TokenClient {
 	httpClient, _ := datasource.GetHttpClient()
 	httpClient, _ := datasource.GetHttpClient()
 
 
 	return &TokenClient{
 	return &TokenClient{
-		tlog:       log.New("tsdb.mqe.tokenclient"),
+		log:        log.New("tsdb.mqe.tokenclient"),
 		Datasource: datasource,
 		Datasource: datasource,
 		HttpClient: httpClient,
 		HttpClient: httpClient,
 	}
 	}
 }
 }
 
 
-var cache map[int64]*TokenBody = map[int64]*TokenBody{}
-
-//Replace this stupid cache with internal cache from grafana master before merging
 func (client *TokenClient) GetTokenData(ctx context.Context) (*TokenBody, error) {
 func (client *TokenClient) GetTokenData(ctx context.Context) (*TokenBody, error) {
-	_, excist := cache[client.Datasource.Id]
-	if !excist {
-		b, err := client.RequestTokenData(ctx)
-		if err != nil {
-			return nil, err
+	key := strconv.FormatInt(client.Datasource.Id, 10)
+
+	item, found := tokenCache.Get(key)
+	if found {
+		if result, ok := item.(*TokenBody); ok {
+			return result, nil
 		}
 		}
+	}
 
 
-		cache[client.Datasource.Id] = b
+	b, err := client.RequestTokenData(ctx)
+	if err != nil {
+		return nil, err
 	}
 	}
 
 
-	return cache[client.Datasource.Id], nil
+	tokenCache.Set(key, b, cache.DefaultExpiration)
+
+	return b, nil
 }
 }
 
 
 func (client *TokenClient) RequestTokenData(ctx context.Context) (*TokenBody, error) {
 func (client *TokenClient) RequestTokenData(ctx context.Context) (*TokenBody, error) {
@@ -54,7 +67,7 @@ func (client *TokenClient) RequestTokenData(ctx context.Context) (*TokenBody, er
 
 
 	req, err := http.NewRequest(http.MethodGet, u.String(), nil)
 	req, err := http.NewRequest(http.MethodGet, u.String(), nil)
 	if err != nil {
 	if err != nil {
-		client.tlog.Info("Failed to create request", "error", err)
+		client.log.Info("Failed to create request", "error", err)
 	}
 	}
 
 
 	res, err := ctxhttp.Do(ctx, client.HttpClient, req)
 	res, err := ctxhttp.Do(ctx, client.HttpClient, req)
@@ -69,14 +82,14 @@ func (client *TokenClient) RequestTokenData(ctx context.Context) (*TokenBody, er
 	}
 	}
 
 
 	if res.StatusCode/100 != 2 {
 	if res.StatusCode/100 != 2 {
-		client.tlog.Info("Request failed", "status", res.Status, "body", string(body))
+		client.log.Info("Request failed", "status", res.Status, "body", string(body))
 		return nil, fmt.Errorf("Request failed status: %v", res.Status)
 		return nil, fmt.Errorf("Request failed status: %v", res.Status)
 	}
 	}
 
 
 	var result *TokenResponse
 	var result *TokenResponse
 	err = json.Unmarshal(body, &result)
 	err = json.Unmarshal(body, &result)
 	if err != nil {
 	if err != nil {
-		client.tlog.Info("Failed to unmarshal graphite response", "error", err, "status", res.Status, "body", string(body))
+		client.log.Info("Failed to unmarshal response", "error", err, "status", res.Status, "body", string(body))
 		return nil, err
 		return nil, err
 	}
 	}
 
 

+ 5 - 5
pkg/tsdb/mqe/types.go

@@ -11,13 +11,13 @@ import (
 	"github.com/grafana/grafana/pkg/tsdb"
 	"github.com/grafana/grafana/pkg/tsdb"
 )
 )
 
 
-type MQEMetric struct {
+type Metric struct {
 	Metric string
 	Metric string
 	Alias  string
 	Alias  string
 }
 }
 
 
-type MQEQuery struct {
-	Metrics        []MQEMetric
+type Query struct {
+	Metrics        []Metric
 	Hosts          []string
 	Hosts          []string
 	Apps           []string
 	Apps           []string
 	AddAppToAlias  bool
 	AddAppToAlias  bool
@@ -32,7 +32,7 @@ var (
 	containsWildcardPattern *regexp.Regexp = regexp.MustCompile(`\*`)
 	containsWildcardPattern *regexp.Regexp = regexp.MustCompile(`\*`)
 )
 )
 
 
-func (q *MQEQuery) Build(availableSeries []string) ([]QueryToSend, error) {
+func (q *Query) Build(availableSeries []string) ([]QueryToSend, error) {
 	var queriesToSend []QueryToSend
 	var queriesToSend []QueryToSend
 	where := q.buildWhereClause()
 	where := q.buildWhereClause()
 
 
@@ -90,7 +90,7 @@ func (q *MQEQuery) Build(availableSeries []string) ([]QueryToSend, error) {
 	return queriesToSend, nil
 	return queriesToSend, nil
 }
 }
 
 
-func (q *MQEQuery) buildWhereClause() string {
+func (q *Query) buildWhereClause() string {
 	hasApps := len(q.Apps) > 0
 	hasApps := len(q.Apps) > 0
 	hasHosts := len(q.Hosts) > 0
 	hasHosts := len(q.Hosts) > 0
 
 

+ 8 - 8
pkg/tsdb/mqe/types_test.go

@@ -25,11 +25,11 @@ func TestWildcardExpansion(t *testing.T) {
 
 
 	Convey("Can expanding query", t, func() {
 	Convey("Can expanding query", t, func() {
 		Convey("Without wildcard series", func() {
 		Convey("Without wildcard series", func() {
-			query := &MQEQuery{
-				Metrics: []MQEMetric{
-					MQEMetric{Metric: "os.cpu.3.idle", Alias: ""},
-					MQEMetric{Metric: "os.cpu.2.idle", Alias: ""},
-					MQEMetric{Metric: "os.cpu.1.idle", Alias: "cpu"},
+			query := &Query{
+				Metrics: []Metric{
+					Metric{Metric: "os.cpu.3.idle", Alias: ""},
+					Metric{Metric: "os.cpu.2.idle", Alias: ""},
+					Metric{Metric: "os.cpu.1.idle", Alias: "cpu"},
 				},
 				},
 				Hosts:          []string{"staples-lab-1", "staples-lab-2"},
 				Hosts:          []string{"staples-lab-1", "staples-lab-2"},
 				Apps:           []string{"demoapp-1", "demoapp-2"},
 				Apps:           []string{"demoapp-1", "demoapp-2"},
@@ -47,9 +47,9 @@ func TestWildcardExpansion(t *testing.T) {
 		})
 		})
 
 
 		Convey("Containg wildcard series", func() {
 		Convey("Containg wildcard series", func() {
-			query := &MQEQuery{
-				Metrics: []MQEMetric{
-					MQEMetric{Metric: "os.cpu*", Alias: ""},
+			query := &Query{
+				Metrics: []Metric{
+					Metric{Metric: "os.cpu*", Alias: ""},
 				},
 				},
 				Hosts:          []string{"staples-lab-1"},
 				Hosts:          []string{"staples-lab-1"},
 				AddAppToAlias:  false,
 				AddAppToAlias:  false,

+ 8 - 0
vendor/github.com/patrickmn/go-cache/CONTRIBUTORS

@@ -0,0 +1,8 @@
+This is a list of people who have contributed code to go-cache. They, or their
+employers, are the copyright holders of the contributed code. Contributed code
+is subject to the license restrictions listed in LICENSE (as they were when the
+code was contributed.)
+
+Dustin Sallings <dustin@spy.net>
+Jason Mooberry <jasonmoo@me.com>
+Sergey Shepelev <temotor@gmail.com>

+ 19 - 0
vendor/github.com/patrickmn/go-cache/LICENSE

@@ -0,0 +1,19 @@
+Copyright (c) 2012-2016 Patrick Mylund Nielsen and the go-cache contributors
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.

+ 107 - 0
vendor/github.com/patrickmn/go-cache/README.md

@@ -0,0 +1,107 @@
+# go-cache
+
+go-cache is an in-memory key:value store/cache similar to memcached that is
+suitable for applications running on a single machine. Its major advantage is
+that, being essentially a thread-safe `map[string]interface{}` with expiration
+times, it doesn't need to serialize or transmit its contents over the network.
+
+Any object can be stored, for a given duration or forever, and the cache can be
+safely used by multiple goroutines.
+
+Although go-cache isn't meant to be used as a persistent datastore, the entire
+cache can be saved to and loaded from a file (using `c.Items()` to retrieve the
+items map to serialize, and `NewFrom()` to create a cache from a deserialized
+one) to recover from downtime quickly. (See the docs for `NewFrom()` for caveats.)
+
+### Installation
+
+`go get github.com/patrickmn/go-cache`
+
+### Usage
+
+```go
+	import (
+		"fmt"
+		"github.com/patrickmn/go-cache"
+		"time"
+	)
+
+	func main() {
+
+		// Create a cache with a default expiration time of 5 minutes, and which
+		// purges expired items every 30 seconds
+		c := cache.New(5*time.Minute, 30*time.Second)
+
+		// Set the value of the key "foo" to "bar", with the default expiration time
+		c.Set("foo", "bar", cache.DefaultExpiration)
+
+		// Set the value of the key "baz" to 42, with no expiration time
+		// (the item won't be removed until it is re-set, or removed using
+		// c.Delete("baz")
+		c.Set("baz", 42, cache.NoExpiration)
+
+		// Get the string associated with the key "foo" from the cache
+		foo, found := c.Get("foo")
+		if found {
+			fmt.Println(foo)
+		}
+
+		// Since Go is statically typed, and cache values can be anything, type
+		// assertion is needed when values are being passed to functions that don't
+		// take arbitrary types, (i.e. interface{}). The simplest way to do this for
+		// values which will only be used once--e.g. for passing to another
+		// function--is:
+		foo, found := c.Get("foo")
+		if found {
+			MyFunction(foo.(string))
+		}
+
+		// This gets tedious if the value is used several times in the same function.
+		// You might do either of the following instead:
+		if x, found := c.Get("foo"); found {
+			foo := x.(string)
+			// ...
+		}
+		// or
+		var foo string
+		if x, found := c.Get("foo"); found {
+			foo = x.(string)
+		}
+		// ...
+		// foo can then be passed around freely as a string
+
+		// Want performance? Store pointers!
+		c.Set("foo", &MyStruct, cache.DefaultExpiration)
+		if x, found := c.Get("foo"); found {
+			foo := x.(*MyStruct)
+			// ...
+		}
+
+		// If you store a reference type like a pointer, slice, map or channel, you
+		// do not need to run Set if you modify the underlying data. The cached
+		// reference points to the same memory, so if you modify a struct whose
+		// pointer you've stored in the cache, retrieving that pointer with Get will
+		// point you to the same data:
+		foo := &MyStruct{Num: 1}
+		c.Set("foo", foo, cache.DefaultExpiration)
+		// ...
+		x, _ := c.Get("foo")
+		foo := x.(*MyStruct)
+		fmt.Println(foo.Num)
+		// ...
+		foo.Num++
+		// ...
+		x, _ := c.Get("foo")
+		foo := x.(*MyStruct)
+		foo.Println(foo.Num)
+
+		// will print:
+		// 1
+		// 2
+
+	}
+```
+
+### Reference
+
+`godoc` or [http://godoc.org/github.com/patrickmn/go-cache](http://godoc.org/github.com/patrickmn/go-cache)

+ 1131 - 0
vendor/github.com/patrickmn/go-cache/cache.go

@@ -0,0 +1,1131 @@
+package cache
+
+import (
+	"encoding/gob"
+	"fmt"
+	"io"
+	"os"
+	"runtime"
+	"sync"
+	"time"
+)
+
+type Item struct {
+	Object     interface{}
+	Expiration int64
+}
+
+// Returns true if the item has expired.
+func (item Item) Expired() bool {
+	if item.Expiration == 0 {
+		return false
+	}
+	return time.Now().UnixNano() > item.Expiration
+}
+
+const (
+	// For use with functions that take an expiration time.
+	NoExpiration time.Duration = -1
+	// For use with functions that take an expiration time. Equivalent to
+	// passing in the same expiration duration as was given to New() or
+	// NewFrom() when the cache was created (e.g. 5 minutes.)
+	DefaultExpiration time.Duration = 0
+)
+
+type Cache struct {
+	*cache
+	// If this is confusing, see the comment at the bottom of New()
+}
+
+type cache struct {
+	defaultExpiration time.Duration
+	items             map[string]Item
+	mu                sync.RWMutex
+	onEvicted         func(string, interface{})
+	janitor           *janitor
+}
+
+// Add an item to the cache, replacing any existing item. If the duration is 0
+// (DefaultExpiration), the cache's default expiration time is used. If it is -1
+// (NoExpiration), the item never expires.
+func (c *cache) Set(k string, x interface{}, d time.Duration) {
+	// "Inlining" of set
+	var e int64
+	if d == DefaultExpiration {
+		d = c.defaultExpiration
+	}
+	if d > 0 {
+		e = time.Now().Add(d).UnixNano()
+	}
+	c.mu.Lock()
+	c.items[k] = Item{
+		Object:     x,
+		Expiration: e,
+	}
+	// TODO: Calls to mu.Unlock are currently not deferred because defer
+	// adds ~200 ns (as of go1.)
+	c.mu.Unlock()
+}
+
+func (c *cache) set(k string, x interface{}, d time.Duration) {
+	var e int64
+	if d == DefaultExpiration {
+		d = c.defaultExpiration
+	}
+	if d > 0 {
+		e = time.Now().Add(d).UnixNano()
+	}
+	c.items[k] = Item{
+		Object:     x,
+		Expiration: e,
+	}
+}
+
+// Add an item to the cache, replacing any existing item, using the default
+// expiration.
+func (c *cache) SetDefault(k string, x interface{}) {
+	c.Set(k, x, DefaultExpiration)
+}
+
+// Add an item to the cache only if an item doesn't already exist for the given
+// key, or if the existing item has expired. Returns an error otherwise.
+func (c *cache) Add(k string, x interface{}, d time.Duration) error {
+	c.mu.Lock()
+	_, found := c.get(k)
+	if found {
+		c.mu.Unlock()
+		return fmt.Errorf("Item %s already exists", k)
+	}
+	c.set(k, x, d)
+	c.mu.Unlock()
+	return nil
+}
+
+// Set a new value for the cache key only if it already exists, and the existing
+// item hasn't expired. Returns an error otherwise.
+func (c *cache) Replace(k string, x interface{}, d time.Duration) error {
+	c.mu.Lock()
+	_, found := c.get(k)
+	if !found {
+		c.mu.Unlock()
+		return fmt.Errorf("Item %s doesn't exist", k)
+	}
+	c.set(k, x, d)
+	c.mu.Unlock()
+	return nil
+}
+
+// Get an item from the cache. Returns the item or nil, and a bool indicating
+// whether the key was found.
+func (c *cache) Get(k string) (interface{}, bool) {
+	c.mu.RLock()
+	// "Inlining" of get and Expired
+	item, found := c.items[k]
+	if !found {
+		c.mu.RUnlock()
+		return nil, false
+	}
+	if item.Expiration > 0 {
+		if time.Now().UnixNano() > item.Expiration {
+			c.mu.RUnlock()
+			return nil, false
+		}
+	}
+	c.mu.RUnlock()
+	return item.Object, true
+}
+
+func (c *cache) get(k string) (interface{}, bool) {
+	item, found := c.items[k]
+	if !found {
+		return nil, false
+	}
+	// "Inlining" of Expired
+	if item.Expiration > 0 {
+		if time.Now().UnixNano() > item.Expiration {
+			return nil, false
+		}
+	}
+	return item.Object, true
+}
+
+// Increment an item of type int, int8, int16, int32, int64, uintptr, uint,
+// uint8, uint32, or uint64, float32 or float64 by n. Returns an error if the
+// item's value is not an integer, if it was not found, or if it is not
+// possible to increment it by n. To retrieve the incremented value, use one
+// of the specialized methods, e.g. IncrementInt64.
+func (c *cache) Increment(k string, n int64) error {
+	c.mu.Lock()
+	v, found := c.items[k]
+	if !found || v.Expired() {
+		c.mu.Unlock()
+		return fmt.Errorf("Item %s not found", k)
+	}
+	switch v.Object.(type) {
+	case int:
+		v.Object = v.Object.(int) + int(n)
+	case int8:
+		v.Object = v.Object.(int8) + int8(n)
+	case int16:
+		v.Object = v.Object.(int16) + int16(n)
+	case int32:
+		v.Object = v.Object.(int32) + int32(n)
+	case int64:
+		v.Object = v.Object.(int64) + n
+	case uint:
+		v.Object = v.Object.(uint) + uint(n)
+	case uintptr:
+		v.Object = v.Object.(uintptr) + uintptr(n)
+	case uint8:
+		v.Object = v.Object.(uint8) + uint8(n)
+	case uint16:
+		v.Object = v.Object.(uint16) + uint16(n)
+	case uint32:
+		v.Object = v.Object.(uint32) + uint32(n)
+	case uint64:
+		v.Object = v.Object.(uint64) + uint64(n)
+	case float32:
+		v.Object = v.Object.(float32) + float32(n)
+	case float64:
+		v.Object = v.Object.(float64) + float64(n)
+	default:
+		c.mu.Unlock()
+		return fmt.Errorf("The value for %s is not an integer", k)
+	}
+	c.items[k] = v
+	c.mu.Unlock()
+	return nil
+}
+
+// Increment an item of type float32 or float64 by n. Returns an error if the
+// item's value is not floating point, if it was not found, or if it is not
+// possible to increment it by n. Pass a negative number to decrement the
+// value. To retrieve the incremented value, use one of the specialized methods,
+// e.g. IncrementFloat64.
+func (c *cache) IncrementFloat(k string, n float64) error {
+	c.mu.Lock()
+	v, found := c.items[k]
+	if !found || v.Expired() {
+		c.mu.Unlock()
+		return fmt.Errorf("Item %s not found", k)
+	}
+	switch v.Object.(type) {
+	case float32:
+		v.Object = v.Object.(float32) + float32(n)
+	case float64:
+		v.Object = v.Object.(float64) + n
+	default:
+		c.mu.Unlock()
+		return fmt.Errorf("The value for %s does not have type float32 or float64", k)
+	}
+	c.items[k] = v
+	c.mu.Unlock()
+	return nil
+}
+
+// Increment an item of type int by n. Returns an error if the item's value is
+// not an int, or if it was not found. If there is no error, the incremented
+// value is returned.
+func (c *cache) IncrementInt(k string, n int) (int, error) {
+	c.mu.Lock()
+	v, found := c.items[k]
+	if !found || v.Expired() {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("Item %s not found", k)
+	}
+	rv, ok := v.Object.(int)
+	if !ok {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("The value for %s is not an int", k)
+	}
+	nv := rv + n
+	v.Object = nv
+	c.items[k] = v
+	c.mu.Unlock()
+	return nv, nil
+}
+
+// Increment an item of type int8 by n. Returns an error if the item's value is
+// not an int8, or if it was not found. If there is no error, the incremented
+// value is returned.
+func (c *cache) IncrementInt8(k string, n int8) (int8, error) {
+	c.mu.Lock()
+	v, found := c.items[k]
+	if !found || v.Expired() {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("Item %s not found", k)
+	}
+	rv, ok := v.Object.(int8)
+	if !ok {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("The value for %s is not an int8", k)
+	}
+	nv := rv + n
+	v.Object = nv
+	c.items[k] = v
+	c.mu.Unlock()
+	return nv, nil
+}
+
+// Increment an item of type int16 by n. Returns an error if the item's value is
+// not an int16, or if it was not found. If there is no error, the incremented
+// value is returned.
+func (c *cache) IncrementInt16(k string, n int16) (int16, error) {
+	c.mu.Lock()
+	v, found := c.items[k]
+	if !found || v.Expired() {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("Item %s not found", k)
+	}
+	rv, ok := v.Object.(int16)
+	if !ok {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("The value for %s is not an int16", k)
+	}
+	nv := rv + n
+	v.Object = nv
+	c.items[k] = v
+	c.mu.Unlock()
+	return nv, nil
+}
+
+// Increment an item of type int32 by n. Returns an error if the item's value is
+// not an int32, or if it was not found. If there is no error, the incremented
+// value is returned.
+func (c *cache) IncrementInt32(k string, n int32) (int32, error) {
+	c.mu.Lock()
+	v, found := c.items[k]
+	if !found || v.Expired() {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("Item %s not found", k)
+	}
+	rv, ok := v.Object.(int32)
+	if !ok {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("The value for %s is not an int32", k)
+	}
+	nv := rv + n
+	v.Object = nv
+	c.items[k] = v
+	c.mu.Unlock()
+	return nv, nil
+}
+
+// Increment an item of type int64 by n. Returns an error if the item's value is
+// not an int64, or if it was not found. If there is no error, the incremented
+// value is returned.
+func (c *cache) IncrementInt64(k string, n int64) (int64, error) {
+	c.mu.Lock()
+	v, found := c.items[k]
+	if !found || v.Expired() {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("Item %s not found", k)
+	}
+	rv, ok := v.Object.(int64)
+	if !ok {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("The value for %s is not an int64", k)
+	}
+	nv := rv + n
+	v.Object = nv
+	c.items[k] = v
+	c.mu.Unlock()
+	return nv, nil
+}
+
+// Increment an item of type uint by n. Returns an error if the item's value is
+// not an uint, or if it was not found. If there is no error, the incremented
+// value is returned.
+func (c *cache) IncrementUint(k string, n uint) (uint, error) {
+	c.mu.Lock()
+	v, found := c.items[k]
+	if !found || v.Expired() {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("Item %s not found", k)
+	}
+	rv, ok := v.Object.(uint)
+	if !ok {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("The value for %s is not an uint", k)
+	}
+	nv := rv + n
+	v.Object = nv
+	c.items[k] = v
+	c.mu.Unlock()
+	return nv, nil
+}
+
+// Increment an item of type uintptr by n. Returns an error if the item's value
+// is not an uintptr, or if it was not found. If there is no error, the
+// incremented value is returned.
+func (c *cache) IncrementUintptr(k string, n uintptr) (uintptr, error) {
+	c.mu.Lock()
+	v, found := c.items[k]
+	if !found || v.Expired() {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("Item %s not found", k)
+	}
+	rv, ok := v.Object.(uintptr)
+	if !ok {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("The value for %s is not an uintptr", k)
+	}
+	nv := rv + n
+	v.Object = nv
+	c.items[k] = v
+	c.mu.Unlock()
+	return nv, nil
+}
+
+// Increment an item of type uint8 by n. Returns an error if the item's value
+// is not an uint8, or if it was not found. If there is no error, the
+// incremented value is returned.
+func (c *cache) IncrementUint8(k string, n uint8) (uint8, error) {
+	c.mu.Lock()
+	v, found := c.items[k]
+	if !found || v.Expired() {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("Item %s not found", k)
+	}
+	rv, ok := v.Object.(uint8)
+	if !ok {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("The value for %s is not an uint8", k)
+	}
+	nv := rv + n
+	v.Object = nv
+	c.items[k] = v
+	c.mu.Unlock()
+	return nv, nil
+}
+
+// Increment an item of type uint16 by n. Returns an error if the item's value
+// is not an uint16, or if it was not found. If there is no error, the
+// incremented value is returned.
+func (c *cache) IncrementUint16(k string, n uint16) (uint16, error) {
+	c.mu.Lock()
+	v, found := c.items[k]
+	if !found || v.Expired() {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("Item %s not found", k)
+	}
+	rv, ok := v.Object.(uint16)
+	if !ok {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("The value for %s is not an uint16", k)
+	}
+	nv := rv + n
+	v.Object = nv
+	c.items[k] = v
+	c.mu.Unlock()
+	return nv, nil
+}
+
+// Increment an item of type uint32 by n. Returns an error if the item's value
+// is not an uint32, or if it was not found. If there is no error, the
+// incremented value is returned.
+func (c *cache) IncrementUint32(k string, n uint32) (uint32, error) {
+	c.mu.Lock()
+	v, found := c.items[k]
+	if !found || v.Expired() {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("Item %s not found", k)
+	}
+	rv, ok := v.Object.(uint32)
+	if !ok {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("The value for %s is not an uint32", k)
+	}
+	nv := rv + n
+	v.Object = nv
+	c.items[k] = v
+	c.mu.Unlock()
+	return nv, nil
+}
+
+// Increment an item of type uint64 by n. Returns an error if the item's value
+// is not an uint64, or if it was not found. If there is no error, the
+// incremented value is returned.
+func (c *cache) IncrementUint64(k string, n uint64) (uint64, error) {
+	c.mu.Lock()
+	v, found := c.items[k]
+	if !found || v.Expired() {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("Item %s not found", k)
+	}
+	rv, ok := v.Object.(uint64)
+	if !ok {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("The value for %s is not an uint64", k)
+	}
+	nv := rv + n
+	v.Object = nv
+	c.items[k] = v
+	c.mu.Unlock()
+	return nv, nil
+}
+
+// Increment an item of type float32 by n. Returns an error if the item's value
+// is not an float32, or if it was not found. If there is no error, the
+// incremented value is returned.
+func (c *cache) IncrementFloat32(k string, n float32) (float32, error) {
+	c.mu.Lock()
+	v, found := c.items[k]
+	if !found || v.Expired() {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("Item %s not found", k)
+	}
+	rv, ok := v.Object.(float32)
+	if !ok {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("The value for %s is not an float32", k)
+	}
+	nv := rv + n
+	v.Object = nv
+	c.items[k] = v
+	c.mu.Unlock()
+	return nv, nil
+}
+
+// Increment an item of type float64 by n. Returns an error if the item's value
+// is not an float64, or if it was not found. If there is no error, the
+// incremented value is returned.
+func (c *cache) IncrementFloat64(k string, n float64) (float64, error) {
+	c.mu.Lock()
+	v, found := c.items[k]
+	if !found || v.Expired() {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("Item %s not found", k)
+	}
+	rv, ok := v.Object.(float64)
+	if !ok {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("The value for %s is not an float64", k)
+	}
+	nv := rv + n
+	v.Object = nv
+	c.items[k] = v
+	c.mu.Unlock()
+	return nv, nil
+}
+
+// Decrement an item of type int, int8, int16, int32, int64, uintptr, uint,
+// uint8, uint32, or uint64, float32 or float64 by n. Returns an error if the
+// item's value is not an integer, if it was not found, or if it is not
+// possible to decrement it by n. To retrieve the decremented value, use one
+// of the specialized methods, e.g. DecrementInt64.
+func (c *cache) Decrement(k string, n int64) error {
+	// TODO: Implement Increment and Decrement more cleanly.
+	// (Cannot do Increment(k, n*-1) for uints.)
+	c.mu.Lock()
+	v, found := c.items[k]
+	if !found || v.Expired() {
+		c.mu.Unlock()
+		return fmt.Errorf("Item not found")
+	}
+	switch v.Object.(type) {
+	case int:
+		v.Object = v.Object.(int) - int(n)
+	case int8:
+		v.Object = v.Object.(int8) - int8(n)
+	case int16:
+		v.Object = v.Object.(int16) - int16(n)
+	case int32:
+		v.Object = v.Object.(int32) - int32(n)
+	case int64:
+		v.Object = v.Object.(int64) - n
+	case uint:
+		v.Object = v.Object.(uint) - uint(n)
+	case uintptr:
+		v.Object = v.Object.(uintptr) - uintptr(n)
+	case uint8:
+		v.Object = v.Object.(uint8) - uint8(n)
+	case uint16:
+		v.Object = v.Object.(uint16) - uint16(n)
+	case uint32:
+		v.Object = v.Object.(uint32) - uint32(n)
+	case uint64:
+		v.Object = v.Object.(uint64) - uint64(n)
+	case float32:
+		v.Object = v.Object.(float32) - float32(n)
+	case float64:
+		v.Object = v.Object.(float64) - float64(n)
+	default:
+		c.mu.Unlock()
+		return fmt.Errorf("The value for %s is not an integer", k)
+	}
+	c.items[k] = v
+	c.mu.Unlock()
+	return nil
+}
+
+// Decrement an item of type float32 or float64 by n. Returns an error if the
+// item's value is not floating point, if it was not found, or if it is not
+// possible to decrement it by n. Pass a negative number to decrement the
+// value. To retrieve the decremented value, use one of the specialized methods,
+// e.g. DecrementFloat64.
+func (c *cache) DecrementFloat(k string, n float64) error {
+	c.mu.Lock()
+	v, found := c.items[k]
+	if !found || v.Expired() {
+		c.mu.Unlock()
+		return fmt.Errorf("Item %s not found", k)
+	}
+	switch v.Object.(type) {
+	case float32:
+		v.Object = v.Object.(float32) - float32(n)
+	case float64:
+		v.Object = v.Object.(float64) - n
+	default:
+		c.mu.Unlock()
+		return fmt.Errorf("The value for %s does not have type float32 or float64", k)
+	}
+	c.items[k] = v
+	c.mu.Unlock()
+	return nil
+}
+
+// Decrement an item of type int by n. Returns an error if the item's value is
+// not an int, or if it was not found. If there is no error, the decremented
+// value is returned.
+func (c *cache) DecrementInt(k string, n int) (int, error) {
+	c.mu.Lock()
+	v, found := c.items[k]
+	if !found || v.Expired() {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("Item %s not found", k)
+	}
+	rv, ok := v.Object.(int)
+	if !ok {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("The value for %s is not an int", k)
+	}
+	nv := rv - n
+	v.Object = nv
+	c.items[k] = v
+	c.mu.Unlock()
+	return nv, nil
+}
+
+// Decrement an item of type int8 by n. Returns an error if the item's value is
+// not an int8, or if it was not found. If there is no error, the decremented
+// value is returned.
+func (c *cache) DecrementInt8(k string, n int8) (int8, error) {
+	c.mu.Lock()
+	v, found := c.items[k]
+	if !found || v.Expired() {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("Item %s not found", k)
+	}
+	rv, ok := v.Object.(int8)
+	if !ok {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("The value for %s is not an int8", k)
+	}
+	nv := rv - n
+	v.Object = nv
+	c.items[k] = v
+	c.mu.Unlock()
+	return nv, nil
+}
+
+// Decrement an item of type int16 by n. Returns an error if the item's value is
+// not an int16, or if it was not found. If there is no error, the decremented
+// value is returned.
+func (c *cache) DecrementInt16(k string, n int16) (int16, error) {
+	c.mu.Lock()
+	v, found := c.items[k]
+	if !found || v.Expired() {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("Item %s not found", k)
+	}
+	rv, ok := v.Object.(int16)
+	if !ok {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("The value for %s is not an int16", k)
+	}
+	nv := rv - n
+	v.Object = nv
+	c.items[k] = v
+	c.mu.Unlock()
+	return nv, nil
+}
+
+// Decrement an item of type int32 by n. Returns an error if the item's value is
+// not an int32, or if it was not found. If there is no error, the decremented
+// value is returned.
+func (c *cache) DecrementInt32(k string, n int32) (int32, error) {
+	c.mu.Lock()
+	v, found := c.items[k]
+	if !found || v.Expired() {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("Item %s not found", k)
+	}
+	rv, ok := v.Object.(int32)
+	if !ok {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("The value for %s is not an int32", k)
+	}
+	nv := rv - n
+	v.Object = nv
+	c.items[k] = v
+	c.mu.Unlock()
+	return nv, nil
+}
+
+// Decrement an item of type int64 by n. Returns an error if the item's value is
+// not an int64, or if it was not found. If there is no error, the decremented
+// value is returned.
+func (c *cache) DecrementInt64(k string, n int64) (int64, error) {
+	c.mu.Lock()
+	v, found := c.items[k]
+	if !found || v.Expired() {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("Item %s not found", k)
+	}
+	rv, ok := v.Object.(int64)
+	if !ok {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("The value for %s is not an int64", k)
+	}
+	nv := rv - n
+	v.Object = nv
+	c.items[k] = v
+	c.mu.Unlock()
+	return nv, nil
+}
+
+// Decrement an item of type uint by n. Returns an error if the item's value is
+// not an uint, or if it was not found. If there is no error, the decremented
+// value is returned.
+func (c *cache) DecrementUint(k string, n uint) (uint, error) {
+	c.mu.Lock()
+	v, found := c.items[k]
+	if !found || v.Expired() {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("Item %s not found", k)
+	}
+	rv, ok := v.Object.(uint)
+	if !ok {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("The value for %s is not an uint", k)
+	}
+	nv := rv - n
+	v.Object = nv
+	c.items[k] = v
+	c.mu.Unlock()
+	return nv, nil
+}
+
+// Decrement an item of type uintptr by n. Returns an error if the item's value
+// is not an uintptr, or if it was not found. If there is no error, the
+// decremented value is returned.
+func (c *cache) DecrementUintptr(k string, n uintptr) (uintptr, error) {
+	c.mu.Lock()
+	v, found := c.items[k]
+	if !found || v.Expired() {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("Item %s not found", k)
+	}
+	rv, ok := v.Object.(uintptr)
+	if !ok {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("The value for %s is not an uintptr", k)
+	}
+	nv := rv - n
+	v.Object = nv
+	c.items[k] = v
+	c.mu.Unlock()
+	return nv, nil
+}
+
+// Decrement an item of type uint8 by n. Returns an error if the item's value is
+// not an uint8, or if it was not found. If there is no error, the decremented
+// value is returned.
+func (c *cache) DecrementUint8(k string, n uint8) (uint8, error) {
+	c.mu.Lock()
+	v, found := c.items[k]
+	if !found || v.Expired() {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("Item %s not found", k)
+	}
+	rv, ok := v.Object.(uint8)
+	if !ok {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("The value for %s is not an uint8", k)
+	}
+	nv := rv - n
+	v.Object = nv
+	c.items[k] = v
+	c.mu.Unlock()
+	return nv, nil
+}
+
+// Decrement an item of type uint16 by n. Returns an error if the item's value
+// is not an uint16, or if it was not found. If there is no error, the
+// decremented value is returned.
+func (c *cache) DecrementUint16(k string, n uint16) (uint16, error) {
+	c.mu.Lock()
+	v, found := c.items[k]
+	if !found || v.Expired() {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("Item %s not found", k)
+	}
+	rv, ok := v.Object.(uint16)
+	if !ok {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("The value for %s is not an uint16", k)
+	}
+	nv := rv - n
+	v.Object = nv
+	c.items[k] = v
+	c.mu.Unlock()
+	return nv, nil
+}
+
+// Decrement an item of type uint32 by n. Returns an error if the item's value
+// is not an uint32, or if it was not found. If there is no error, the
+// decremented value is returned.
+func (c *cache) DecrementUint32(k string, n uint32) (uint32, error) {
+	c.mu.Lock()
+	v, found := c.items[k]
+	if !found || v.Expired() {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("Item %s not found", k)
+	}
+	rv, ok := v.Object.(uint32)
+	if !ok {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("The value for %s is not an uint32", k)
+	}
+	nv := rv - n
+	v.Object = nv
+	c.items[k] = v
+	c.mu.Unlock()
+	return nv, nil
+}
+
+// Decrement an item of type uint64 by n. Returns an error if the item's value
+// is not an uint64, or if it was not found. If there is no error, the
+// decremented value is returned.
+func (c *cache) DecrementUint64(k string, n uint64) (uint64, error) {
+	c.mu.Lock()
+	v, found := c.items[k]
+	if !found || v.Expired() {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("Item %s not found", k)
+	}
+	rv, ok := v.Object.(uint64)
+	if !ok {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("The value for %s is not an uint64", k)
+	}
+	nv := rv - n
+	v.Object = nv
+	c.items[k] = v
+	c.mu.Unlock()
+	return nv, nil
+}
+
+// Decrement an item of type float32 by n. Returns an error if the item's value
+// is not an float32, or if it was not found. If there is no error, the
+// decremented value is returned.
+func (c *cache) DecrementFloat32(k string, n float32) (float32, error) {
+	c.mu.Lock()
+	v, found := c.items[k]
+	if !found || v.Expired() {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("Item %s not found", k)
+	}
+	rv, ok := v.Object.(float32)
+	if !ok {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("The value for %s is not an float32", k)
+	}
+	nv := rv - n
+	v.Object = nv
+	c.items[k] = v
+	c.mu.Unlock()
+	return nv, nil
+}
+
+// Decrement an item of type float64 by n. Returns an error if the item's value
+// is not an float64, or if it was not found. If there is no error, the
+// decremented value is returned.
+func (c *cache) DecrementFloat64(k string, n float64) (float64, error) {
+	c.mu.Lock()
+	v, found := c.items[k]
+	if !found || v.Expired() {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("Item %s not found", k)
+	}
+	rv, ok := v.Object.(float64)
+	if !ok {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("The value for %s is not an float64", k)
+	}
+	nv := rv - n
+	v.Object = nv
+	c.items[k] = v
+	c.mu.Unlock()
+	return nv, nil
+}
+
+// Delete an item from the cache. Does nothing if the key is not in the cache.
+func (c *cache) Delete(k string) {
+	c.mu.Lock()
+	v, evicted := c.delete(k)
+	c.mu.Unlock()
+	if evicted {
+		c.onEvicted(k, v)
+	}
+}
+
+func (c *cache) delete(k string) (interface{}, bool) {
+	if c.onEvicted != nil {
+		if v, found := c.items[k]; found {
+			delete(c.items, k)
+			return v.Object, true
+		}
+	}
+	delete(c.items, k)
+	return nil, false
+}
+
+type keyAndValue struct {
+	key   string
+	value interface{}
+}
+
+// Delete all expired items from the cache.
+func (c *cache) DeleteExpired() {
+	var evictedItems []keyAndValue
+	now := time.Now().UnixNano()
+	c.mu.Lock()
+	for k, v := range c.items {
+		// "Inlining" of expired
+		if v.Expiration > 0 && now > v.Expiration {
+			ov, evicted := c.delete(k)
+			if evicted {
+				evictedItems = append(evictedItems, keyAndValue{k, ov})
+			}
+		}
+	}
+	c.mu.Unlock()
+	for _, v := range evictedItems {
+		c.onEvicted(v.key, v.value)
+	}
+}
+
+// Sets an (optional) function that is called with the key and value when an
+// item is evicted from the cache. (Including when it is deleted manually, but
+// not when it is overwritten.) Set to nil to disable.
+func (c *cache) OnEvicted(f func(string, interface{})) {
+	c.mu.Lock()
+	c.onEvicted = f
+	c.mu.Unlock()
+}
+
+// Write the cache's items (using Gob) to an io.Writer.
+//
+// NOTE: This method is deprecated in favor of c.Items() and NewFrom() (see the
+// documentation for NewFrom().)
+func (c *cache) Save(w io.Writer) (err error) {
+	enc := gob.NewEncoder(w)
+	defer func() {
+		if x := recover(); x != nil {
+			err = fmt.Errorf("Error registering item types with Gob library")
+		}
+	}()
+	c.mu.RLock()
+	defer c.mu.RUnlock()
+	for _, v := range c.items {
+		gob.Register(v.Object)
+	}
+	err = enc.Encode(&c.items)
+	return
+}
+
+// Save the cache's items to the given filename, creating the file if it
+// doesn't exist, and overwriting it if it does.
+//
+// NOTE: This method is deprecated in favor of c.Items() and NewFrom() (see the
+// documentation for NewFrom().)
+func (c *cache) SaveFile(fname string) error {
+	fp, err := os.Create(fname)
+	if err != nil {
+		return err
+	}
+	err = c.Save(fp)
+	if err != nil {
+		fp.Close()
+		return err
+	}
+	return fp.Close()
+}
+
+// Add (Gob-serialized) cache items from an io.Reader, excluding any items with
+// keys that already exist (and haven't expired) in the current cache.
+//
+// NOTE: This method is deprecated in favor of c.Items() and NewFrom() (see the
+// documentation for NewFrom().)
+func (c *cache) Load(r io.Reader) error {
+	dec := gob.NewDecoder(r)
+	items := map[string]Item{}
+	err := dec.Decode(&items)
+	if err == nil {
+		c.mu.Lock()
+		defer c.mu.Unlock()
+		for k, v := range items {
+			ov, found := c.items[k]
+			if !found || ov.Expired() {
+				c.items[k] = v
+			}
+		}
+	}
+	return err
+}
+
+// Load and add cache items from the given filename, excluding any items with
+// keys that already exist in the current cache.
+//
+// NOTE: This method is deprecated in favor of c.Items() and NewFrom() (see the
+// documentation for NewFrom().)
+func (c *cache) LoadFile(fname string) error {
+	fp, err := os.Open(fname)
+	if err != nil {
+		return err
+	}
+	err = c.Load(fp)
+	if err != nil {
+		fp.Close()
+		return err
+	}
+	return fp.Close()
+}
+
+// Copies all unexpired items in the cache into a new map and returns it.
+func (c *cache) Items() map[string]Item {
+	c.mu.RLock()
+	defer c.mu.RUnlock()
+	m := make(map[string]Item, len(c.items))
+	now := time.Now().UnixNano()
+	for k, v := range c.items {
+		// "Inlining" of Expired
+		if v.Expiration > 0 {
+			if now > v.Expiration {
+				continue
+			}
+		}
+		m[k] = v
+	}
+	return m
+}
+
+// Returns the number of items in the cache. This may include items that have
+// expired, but have not yet been cleaned up.
+func (c *cache) ItemCount() int {
+	c.mu.RLock()
+	n := len(c.items)
+	c.mu.RUnlock()
+	return n
+}
+
+// Delete all items from the cache.
+func (c *cache) Flush() {
+	c.mu.Lock()
+	c.items = map[string]Item{}
+	c.mu.Unlock()
+}
+
+type janitor struct {
+	Interval time.Duration
+	stop     chan bool
+}
+
+func (j *janitor) Run(c *cache) {
+	j.stop = make(chan bool)
+	ticker := time.NewTicker(j.Interval)
+	for {
+		select {
+		case <-ticker.C:
+			c.DeleteExpired()
+		case <-j.stop:
+			ticker.Stop()
+			return
+		}
+	}
+}
+
+func stopJanitor(c *Cache) {
+	c.janitor.stop <- true
+}
+
+func runJanitor(c *cache, ci time.Duration) {
+	j := &janitor{
+		Interval: ci,
+	}
+	c.janitor = j
+	go j.Run(c)
+}
+
+func newCache(de time.Duration, m map[string]Item) *cache {
+	if de == 0 {
+		de = -1
+	}
+	c := &cache{
+		defaultExpiration: de,
+		items:             m,
+	}
+	return c
+}
+
+func newCacheWithJanitor(de time.Duration, ci time.Duration, m map[string]Item) *Cache {
+	c := newCache(de, m)
+	// This trick ensures that the janitor goroutine (which--granted it
+	// was enabled--is running DeleteExpired on c forever) does not keep
+	// the returned C object from being garbage collected. When it is
+	// garbage collected, the finalizer stops the janitor goroutine, after
+	// which c can be collected.
+	C := &Cache{c}
+	if ci > 0 {
+		runJanitor(c, ci)
+		runtime.SetFinalizer(C, stopJanitor)
+	}
+	return C
+}
+
+// Return a new cache with a given default expiration duration and cleanup
+// interval. If the expiration duration is less than one (or NoExpiration),
+// the items in the cache never expire (by default), and must be deleted
+// manually. If the cleanup interval is less than one, expired items are not
+// deleted from the cache before calling c.DeleteExpired().
+func New(defaultExpiration, cleanupInterval time.Duration) *Cache {
+	items := make(map[string]Item)
+	return newCacheWithJanitor(defaultExpiration, cleanupInterval, items)
+}
+
+// Return a new cache with a given default expiration duration and cleanup
+// interval. If the expiration duration is less than one (or NoExpiration),
+// the items in the cache never expire (by default), and must be deleted
+// manually. If the cleanup interval is less than one, expired items are not
+// deleted from the cache before calling c.DeleteExpired().
+//
+// NewFrom() also accepts an items map which will serve as the underlying map
+// for the cache. This is useful for starting from a deserialized cache
+// (serialized using e.g. gob.Encode() on c.Items()), or passing in e.g.
+// make(map[string]Item, 500) to improve startup performance when the cache
+// is expected to reach a certain minimum size.
+//
+// Only the cache's methods synchronize access to this map, so it is not
+// recommended to keep any references to the map around after creating a cache.
+// If need be, the map can be accessed at a later point using c.Items() (subject
+// to the same caveat.)
+//
+// Note regarding serialization: When using e.g. gob, make sure to
+// gob.Register() the individual types stored in the cache before encoding a
+// map retrieved with c.Items(), and to register those same types before
+// decoding a blob containing an items map.
+func NewFrom(defaultExpiration, cleanupInterval time.Duration, items map[string]Item) *Cache {
+	return newCacheWithJanitor(defaultExpiration, cleanupInterval, items)
+}

+ 192 - 0
vendor/github.com/patrickmn/go-cache/sharded.go

@@ -0,0 +1,192 @@
+package cache
+
+import (
+	"crypto/rand"
+	"math"
+	"math/big"
+	insecurerand "math/rand"
+	"os"
+	"runtime"
+	"time"
+)
+
+// This is an experimental and unexported (for now) attempt at making a cache
+// with better algorithmic complexity than the standard one, namely by
+// preventing write locks of the entire cache when an item is added. As of the
+// time of writing, the overhead of selecting buckets results in cache
+// operations being about twice as slow as for the standard cache with small
+// total cache sizes, and faster for larger ones.
+//
+// See cache_test.go for a few benchmarks.
+
+type unexportedShardedCache struct {
+	*shardedCache
+}
+
+type shardedCache struct {
+	seed    uint32
+	m       uint32
+	cs      []*cache
+	janitor *shardedJanitor
+}
+
+// djb2 with better shuffling. 5x faster than FNV with the hash.Hash overhead.
+func djb33(seed uint32, k string) uint32 {
+	var (
+		l = uint32(len(k))
+		d = 5381 + seed + l
+		i = uint32(0)
+	)
+	// Why is all this 5x faster than a for loop?
+	if l >= 4 {
+		for i < l-4 {
+			d = (d * 33) ^ uint32(k[i])
+			d = (d * 33) ^ uint32(k[i+1])
+			d = (d * 33) ^ uint32(k[i+2])
+			d = (d * 33) ^ uint32(k[i+3])
+			i += 4
+		}
+	}
+	switch l - i {
+	case 1:
+	case 2:
+		d = (d * 33) ^ uint32(k[i])
+	case 3:
+		d = (d * 33) ^ uint32(k[i])
+		d = (d * 33) ^ uint32(k[i+1])
+	case 4:
+		d = (d * 33) ^ uint32(k[i])
+		d = (d * 33) ^ uint32(k[i+1])
+		d = (d * 33) ^ uint32(k[i+2])
+	}
+	return d ^ (d >> 16)
+}
+
+func (sc *shardedCache) bucket(k string) *cache {
+	return sc.cs[djb33(sc.seed, k)%sc.m]
+}
+
+func (sc *shardedCache) Set(k string, x interface{}, d time.Duration) {
+	sc.bucket(k).Set(k, x, d)
+}
+
+func (sc *shardedCache) Add(k string, x interface{}, d time.Duration) error {
+	return sc.bucket(k).Add(k, x, d)
+}
+
+func (sc *shardedCache) Replace(k string, x interface{}, d time.Duration) error {
+	return sc.bucket(k).Replace(k, x, d)
+}
+
+func (sc *shardedCache) Get(k string) (interface{}, bool) {
+	return sc.bucket(k).Get(k)
+}
+
+func (sc *shardedCache) Increment(k string, n int64) error {
+	return sc.bucket(k).Increment(k, n)
+}
+
+func (sc *shardedCache) IncrementFloat(k string, n float64) error {
+	return sc.bucket(k).IncrementFloat(k, n)
+}
+
+func (sc *shardedCache) Decrement(k string, n int64) error {
+	return sc.bucket(k).Decrement(k, n)
+}
+
+func (sc *shardedCache) Delete(k string) {
+	sc.bucket(k).Delete(k)
+}
+
+func (sc *shardedCache) DeleteExpired() {
+	for _, v := range sc.cs {
+		v.DeleteExpired()
+	}
+}
+
+// Returns the items in the cache. This may include items that have expired,
+// but have not yet been cleaned up. If this is significant, the Expiration
+// fields of the items should be checked. Note that explicit synchronization
+// is needed to use a cache and its corresponding Items() return values at
+// the same time, as the maps are shared.
+func (sc *shardedCache) Items() []map[string]Item {
+	res := make([]map[string]Item, len(sc.cs))
+	for i, v := range sc.cs {
+		res[i] = v.Items()
+	}
+	return res
+}
+
+func (sc *shardedCache) Flush() {
+	for _, v := range sc.cs {
+		v.Flush()
+	}
+}
+
+type shardedJanitor struct {
+	Interval time.Duration
+	stop     chan bool
+}
+
+func (j *shardedJanitor) Run(sc *shardedCache) {
+	j.stop = make(chan bool)
+	tick := time.Tick(j.Interval)
+	for {
+		select {
+		case <-tick:
+			sc.DeleteExpired()
+		case <-j.stop:
+			return
+		}
+	}
+}
+
+func stopShardedJanitor(sc *unexportedShardedCache) {
+	sc.janitor.stop <- true
+}
+
+func runShardedJanitor(sc *shardedCache, ci time.Duration) {
+	j := &shardedJanitor{
+		Interval: ci,
+	}
+	sc.janitor = j
+	go j.Run(sc)
+}
+
+func newShardedCache(n int, de time.Duration) *shardedCache {
+	max := big.NewInt(0).SetUint64(uint64(math.MaxUint32))
+	rnd, err := rand.Int(rand.Reader, max)
+	var seed uint32
+	if err != nil {
+		os.Stderr.Write([]byte("WARNING: go-cache's newShardedCache failed to read from the system CSPRNG (/dev/urandom or equivalent.) Your system's security may be compromised. Continuing with an insecure seed.\n"))
+		seed = insecurerand.Uint32()
+	} else {
+		seed = uint32(rnd.Uint64())
+	}
+	sc := &shardedCache{
+		seed: seed,
+		m:    uint32(n),
+		cs:   make([]*cache, n),
+	}
+	for i := 0; i < n; i++ {
+		c := &cache{
+			defaultExpiration: de,
+			items:             map[string]Item{},
+		}
+		sc.cs[i] = c
+	}
+	return sc
+}
+
+func unexportedNewSharded(defaultExpiration, cleanupInterval time.Duration, shards int) *unexportedShardedCache {
+	if defaultExpiration == 0 {
+		defaultExpiration = -1
+	}
+	sc := newShardedCache(shards, defaultExpiration)
+	SC := &unexportedShardedCache{sc}
+	if cleanupInterval > 0 {
+		runShardedJanitor(sc, cleanupInterval)
+		runtime.SetFinalizer(SC, stopShardedJanitor)
+	}
+	return SC
+}

+ 6 - 0
vendor/vendor.json

@@ -314,6 +314,12 @@
 			"version": "v1.21.1",
 			"version": "v1.21.1",
 			"versionExact": "v1.21.1"
 			"versionExact": "v1.21.1"
 		},
 		},
+		{
+			"checksumSHA1": "8z32QKTSDusa4QQyunKE4kyYXZ8=",
+			"path": "github.com/patrickmn/go-cache",
+			"revision": "e7a9def80f35fe1b170b7b8b68871d59dea117e1",
+			"revisionTime": "2016-11-25T23:48:19Z"
+		},
 		{
 		{
 			"checksumSHA1": "SMUvX2B8eoFd9wnPofwBKlN6btE=",
 			"checksumSHA1": "SMUvX2B8eoFd9wnPofwBKlN6btE=",
 			"path": "github.com/prometheus/client_golang/api/prometheus",
 			"path": "github.com/prometheus/client_golang/api/prometheus",