Ver Fonte

Merge branch 'metrics_reporting'

Torkel Ödegaard há 9 anos atrás
pai
commit
064e474b0a
45 ficheiros alterados com 5456 adições e 204 exclusões
  1. 16 0
      Godeps/Godeps.json
  2. 20 0
      Godeps/_workspace/src/github.com/influxdata/influxdb/LICENSE
  3. 27 0
      Godeps/_workspace/src/github.com/influxdata/influxdb/LICENSE_OF_DEPENDENCIES.md
  4. 267 0
      Godeps/_workspace/src/github.com/influxdata/influxdb/client/README.md
  5. 789 0
      Godeps/_workspace/src/github.com/influxdata/influxdb/client/influxdb.go
  6. 46 0
      Godeps/_workspace/src/github.com/influxdata/influxdb/models/consistency.go
  7. 1576 0
      Godeps/_workspace/src/github.com/influxdata/influxdb/models/points.go
  8. 60 0
      Godeps/_workspace/src/github.com/influxdata/influxdb/models/rows.go
  9. 51 0
      Godeps/_workspace/src/github.com/influxdata/influxdb/models/time.go
  10. 53 0
      Godeps/_workspace/src/github.com/influxdata/influxdb/pkg/escape/bytes.go
  11. 34 0
      Godeps/_workspace/src/github.com/influxdata/influxdb/pkg/escape/strings.go
  12. 14 0
      conf/defaults.ini
  13. 3 0
      conf/sample.ini
  14. 4 1
      pkg/api/api.go
  15. 1 1
      pkg/api/app_routes.go
  16. 7 3
      pkg/api/common.go
  17. 2 4
      pkg/api/dashboard.go
  18. 4 0
      pkg/api/dataproxy.go
  19. 50 2
      pkg/api/metrics.go
  20. 2 0
      pkg/api/search.go
  21. 2 4
      pkg/cmd/grafana-server/main.go
  22. 1 1
      pkg/cmd/grafana-server/web.go
  23. 122 0
      pkg/metrics/EMWA.go
  24. 46 0
      pkg/metrics/combos.go
  25. 61 0
      pkg/metrics/common.go
  26. 18 28
      pkg/metrics/counter.go
  27. 11 0
      pkg/metrics/delta.go
  28. 82 0
      pkg/metrics/gauge.go
  29. 91 0
      pkg/metrics/graphite.go
  30. 189 0
      pkg/metrics/histogram.go
  31. 90 0
      pkg/metrics/histogram_test.go
  32. 221 0
      pkg/metrics/meter.go
  33. 0 39
      pkg/metrics/metric_ref.go
  34. 66 26
      pkg/metrics/metrics.go
  35. 27 13
      pkg/metrics/publish.go
  36. 13 78
      pkg/metrics/registry.go
  37. 607 0
      pkg/metrics/sample.go
  38. 367 0
      pkg/metrics/sample_test.go
  39. 46 0
      pkg/metrics/settings.go
  40. 309 0
      pkg/metrics/timer.go
  41. 9 1
      pkg/middleware/logger.go
  42. 4 0
      pkg/middleware/middleware.go
  43. 12 0
      pkg/middleware/perf.go
  44. 27 3
      pkg/setting/setting.go
  45. 9 0
      pkg/setting/setting_test.go

+ 16 - 0
Godeps/Godeps.json

@@ -1,6 +1,7 @@
 {
 	"ImportPath": "github.com/grafana/grafana",
 	"GoVersion": "go1.5.1",
+	"GodepVersion": "v60",
 	"Packages": [
 		"./pkg/..."
 	],
@@ -226,6 +227,21 @@
 			"ImportPath": "github.com/hashicorp/go-version",
 			"Rev": "7e3c02b30806fa5779d3bdfc152ce4c6f40e7b38"
 		},
+		{
+			"ImportPath": "github.com/influxdata/influxdb/client",
+			"Comment": "v0.13.0-74-g2c9d0fc",
+			"Rev": "2c9d0fcc04eba3ffc88f2aafe8466874e384d80d"
+		},
+		{
+			"ImportPath": "github.com/influxdata/influxdb/models",
+			"Comment": "v0.13.0-74-g2c9d0fc",
+			"Rev": "2c9d0fcc04eba3ffc88f2aafe8466874e384d80d"
+		},
+		{
+			"ImportPath": "github.com/influxdata/influxdb/pkg/escape",
+			"Comment": "v0.13.0-74-g2c9d0fc",
+			"Rev": "2c9d0fcc04eba3ffc88f2aafe8466874e384d80d"
+		},
 		{
 			"ImportPath": "github.com/jmespath/go-jmespath",
 			"Comment": "0.2.2",

+ 20 - 0
Godeps/_workspace/src/github.com/influxdata/influxdb/LICENSE

@@ -0,0 +1,20 @@
+The MIT License (MIT)
+
+Copyright (c) 2013-2016 Errplane Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

+ 27 - 0
Godeps/_workspace/src/github.com/influxdata/influxdb/LICENSE_OF_DEPENDENCIES.md

@@ -0,0 +1,27 @@
+# List
+- bootstrap 3.3.5 [MIT LICENSE](https://github.com/twbs/bootstrap/blob/master/LICENSE)
+- collectd.org [ISC LICENSE](https://github.com/collectd/go-collectd/blob/master/LICENSE)
+- github.com/armon/go-metrics [MIT LICENSE](https://github.com/armon/go-metrics/blob/master/LICENSE)
+- github.com/BurntSushi/toml [WTFPL LICENSE](https://github.com/BurntSushi/toml/blob/master/COPYING)
+- github.com/bmizerany/pat [MIT LICENSE](https://github.com/bmizerany/pat#license)
+- github.com/boltdb/bolt [MIT LICENSE](https://github.com/boltdb/bolt/blob/master/LICENSE)
+- github.com/dgryski/go-bits [MIT LICENSE](https://github.com/dgryski/go-bits/blob/master/LICENSE)
+- github.com/dgryski/go-bitstream [MIT LICENSE](https://github.com/dgryski/go-bitstream/blob/master/LICENSE)
+- github.com/gogo/protobuf/proto [BSD LICENSE](https://github.com/gogo/protobuf/blob/master/LICENSE)
+- github.com/davecgh/go-spew/spew [ISC LICENSE](https://github.com/davecgh/go-spew/blob/master/LICENSE)
+- github.com/golang/snappy [BSD LICENSE](https://github.com/golang/snappy/blob/master/LICENSE)
+- github.com/hashicorp/go-msgpack [BSD LICENSE](https://github.com/hashicorp/go-msgpack/blob/master/LICENSE)
+- github.com/hashicorp/raft [MPL LICENSE](https://github.com/hashicorp/raft/blob/master/LICENSE)
+- github.com/hashicorp/raft-boltdb [MOZILLA PUBLIC LICENSE](https://github.com/hashicorp/raft-boltdb/blob/master/LICENSE)
+- github.com/influxdata/usage-client [MIT LICENSE](https://github.com/influxdata/usage-client/blob/master/LICENSE.txt)
+- github.com/jwilder/encoding [MIT LICENSE](https://github.com/jwilder/encoding/blob/master/LICENSE)
+- github.com/kimor79/gollectd [BSD LICENSE](https://github.com/kimor79/gollectd/blob/master/LICENSE)
+- github.com/paulbellamy/ratecounter [MIT LICENSE](https://github.com/paulbellamy/ratecounter/blob/master/LICENSE)
+- github.com/peterh/liner [MIT LICENSE](https://github.com/peterh/liner/blob/master/COPYING)
+- github.com/rakyll/statik [APACHE LICENSE](https://github.com/rakyll/statik/blob/master/LICENSE)
+- glyphicons [LICENSE](http://glyphicons.com/license/)
+- golang.org/x/crypto [BSD LICENSE](https://github.com/golang/crypto/blob/master/LICENSE)
+- golang.org/x/tools [BSD LICENSE](https://github.com/golang/tools/blob/master/LICENSE)
+- gopkg.in/fatih/pool.v2 [MIT LICENSE](https://github.com/fatih/pool/blob/v2.0.0/LICENSE)
+- jquery 2.1.4 [MIT LICENSE](https://github.com/jquery/jquery/blob/master/LICENSE.txt)
+- react 0.13.3 [BSD LICENSE](https://github.com/facebook/react/blob/master/LICENSE)

+ 267 - 0
Godeps/_workspace/src/github.com/influxdata/influxdb/client/README.md

@@ -0,0 +1,267 @@
+# InfluxDB Client
+
+[![GoDoc](https://godoc.org/github.com/influxdata/influxdb?status.svg)](http://godoc.org/github.com/influxdata/influxdb/client/v2)
+
+## Description
+
+**NOTE:** The Go client library now has a "v2" version, with the old version
+being deprecated. The new version can be imported at
+`import "github.com/influxdata/influxdb/client/v2"`. It is not backwards-compatible.
+
+A Go client library written and maintained by the **InfluxDB** team.
+This package provides convenience functions to read and write time series data.
+It uses the HTTP protocol to communicate with your **InfluxDB** cluster.
+
+
+## Getting Started
+
+### Connecting To Your Database
+
+Connecting to an **InfluxDB** database is straightforward. You will need a host
+name, a port and the cluster user credentials if applicable. The default port is
+8086. You can customize these settings to your specific installation via the
+**InfluxDB** configuration file.
+
+Though not necessary for experimentation, you may want to create a new user
+and authenticate the connection to your database.
+
+For more information please check out the
+[Admin Docs](https://docs.influxdata.com/influxdb/latest/administration/).
+
+For the impatient, you can create a new admin user _bubba_ by firing off the
+[InfluxDB CLI](https://github.com/influxdata/influxdb/blob/master/cmd/influx/main.go).
+
+```shell
+influx
+> create user bubba with password 'bumblebeetuna'
+> grant all privileges to bubba
+```
+
+And now for good measure set the credentials in you shell environment.
+In the example below we will use $INFLUX_USER and $INFLUX_PWD
+
+Now with the administrivia out of the way, let's connect to our database.
+
+NOTE: If you've opted out of creating a user, you can omit Username and Password in
+the configuration below.
+
+```go
+package main
+
+import (
+	"log"
+	"time"
+
+	"github.com/influxdata/influxdb/client/v2"
+)
+
+const (
+	MyDB = "square_holes"
+	username = "bubba"
+	password = "bumblebeetuna"
+)
+
+func main() {
+	// Make client
+	c, err := client.NewHTTPClient(client.HTTPConfig{
+		Addr: "http://localhost:8086",
+		Username: username,
+		Password: password,
+	})
+
+	if err != nil {
+	    log.Fatalln("Error: ", err)
+	}
+
+	// Create a new point batch
+	bp, err := client.NewBatchPoints(client.BatchPointsConfig{
+		Database:  MyDB,
+		Precision: "s",
+	})
+
+	if err != nil {
+	    log.Fatalln("Error: ", err)
+	}
+
+	// Create a point and add to batch
+	tags := map[string]string{"cpu": "cpu-total"}
+	fields := map[string]interface{}{
+		"idle":   10.1,
+		"system": 53.3,
+		"user":   46.6,
+	}
+	pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now())
+
+	if err != nil {
+	    log.Fatalln("Error: ", err)
+	}
+
+	bp.AddPoint(pt)
+
+	// Write the batch
+	c.Write(bp)
+}
+
+```
+
+### Inserting Data
+
+Time series data aka *points* are written to the database using batch inserts.
+The mechanism is to create one or more points and then create a batch aka
+*batch points* and write these to a given database and series. A series is a
+combination of a measurement (time/values) and a set of tags.
+
+In this sample we will create a batch of a 1,000 points. Each point has a time and
+a single value as well as 2 tags indicating a shape and color. We write these points
+to a database called _square_holes_ using a measurement named _shapes_.
+
+NOTE: You can specify a RetentionPolicy as part of the batch points. If not
+provided InfluxDB will use the database _default_ retention policy.
+
+```go
+func writePoints(clnt client.Client) {
+	sampleSize := 1000
+	rand.Seed(42)
+
+	bp, _ := client.NewBatchPoints(client.BatchPointsConfig{
+		Database:  "systemstats",
+		Precision: "us",
+	})
+
+	for i := 0; i < sampleSize; i++ {
+		regions := []string{"us-west1", "us-west2", "us-west3", "us-east1"}
+		tags := map[string]string{
+			"cpu":    "cpu-total",
+			"host":   fmt.Sprintf("host%d", rand.Intn(1000)),
+			"region": regions[rand.Intn(len(regions))],
+		}
+
+		idle := rand.Float64() * 100.0
+		fields := map[string]interface{}{
+			"idle": idle,
+			"busy": 100.0 - idle,
+		}
+
+		bp.AddPoint(client.NewPoint(
+			"cpu_usage",
+			tags,
+			fields,
+			time.Now(),
+		))
+	}
+
+	err := clnt.Write(bp)
+	if err != nil {
+		log.Fatal(err)
+	}
+}
+```
+
+
+### Querying Data
+
+One nice advantage of using **InfluxDB** the ability to query your data using familiar
+SQL constructs. In this example we can create a convenience function to query the database
+as follows:
+
+```go
+// queryDB convenience function to query the database
+func queryDB(clnt client.Client, cmd string) (res []client.Result, err error) {
+	q := client.Query{
+		Command:  cmd,
+		Database: MyDB,
+	}
+	if response, err := clnt.Query(q); err == nil {
+		if response.Error() != nil {
+			return res, response.Error()
+		}
+		res = response.Results
+	} else {
+		return res, err
+	}
+	return res, nil
+}
+```
+
+#### Creating a Database
+
+```go
+_, err := queryDB(clnt, fmt.Sprintf("CREATE DATABASE %s", MyDB))
+if err != nil {
+	log.Fatal(err)
+}
+```
+
+#### Count Records
+
+```go
+q := fmt.Sprintf("SELECT count(%s) FROM %s", "value", MyMeasurement)
+res, err := queryDB(clnt, q)
+if err != nil {
+	log.Fatal(err)
+}
+count := res[0].Series[0].Values[0][1]
+log.Printf("Found a total of %v records\n", count)
+```
+
+#### Find the last 10 _shapes_ records
+
+```go
+q := fmt.Sprintf("SELECT * FROM %s LIMIT %d", MyMeasurement, 20)
+res, err = queryDB(clnt, q)
+if err != nil {
+	log.Fatal(err)
+}
+
+for i, row := range res[0].Series[0].Values {
+	t, err := time.Parse(time.RFC3339, row[0].(string))
+	if err != nil {
+		log.Fatal(err)
+	}
+	val := row[1].(string)
+	log.Printf("[%2d] %s: %s\n", i, t.Format(time.Stamp), val)
+}
+```
+
+### Using the UDP Client
+
+The **InfluxDB** client also supports writing over UDP.
+
+```go
+func WriteUDP() {
+	// Make client
+	c := client.NewUDPClient("localhost:8089")
+
+	// Create a new point batch
+	bp, _ := client.NewBatchPoints(client.BatchPointsConfig{
+		Precision: "s",
+	})
+
+	// Create a point and add to batch
+	tags := map[string]string{"cpu": "cpu-total"}
+	fields := map[string]interface{}{
+		"idle":   10.1,
+		"system": 53.3,
+		"user":   46.6,
+	}
+	pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now())
+	if err != nil {
+		panic(err.Error())
+	}
+	bp.AddPoint(pt)
+
+	// Write the batch
+	c.Write(bp)
+}
+```
+
+## Go Docs
+
+Please refer to
+[http://godoc.org/github.com/influxdata/influxdb/client/v2](http://godoc.org/github.com/influxdata/influxdb/client/v2)
+for documentation.
+
+## See Also
+
+You can also examine how the client library is used by the
+[InfluxDB CLI](https://github.com/influxdata/influxdb/blob/master/cmd/influx/main.go).

+ 789 - 0
Godeps/_workspace/src/github.com/influxdata/influxdb/client/influxdb.go

@@ -0,0 +1,789 @@
+package client
+
+import (
+	"bytes"
+	"crypto/tls"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"net"
+	"net/http"
+	"net/url"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/influxdata/influxdb/models"
+)
+
+const (
+	// DefaultHost is the default host used to connect to an InfluxDB instance
+	DefaultHost = "localhost"
+
+	// DefaultPort is the default port used to connect to an InfluxDB instance
+	DefaultPort = 8086
+
+	// DefaultTimeout is the default connection timeout used to connect to an InfluxDB instance
+	DefaultTimeout = 0
+)
+
+// Query is used to send a command to the server. Both Command and Database are required.
+type Query struct {
+	Command  string
+	Database string
+
+	// Chunked tells the server to send back chunked responses. This places
+	// less load on the server by sending back chunks of the response rather
+	// than waiting for the entire response all at once.
+	Chunked bool
+
+	// ChunkSize sets the maximum number of rows that will be returned per
+	// chunk. Chunks are either divided based on their series or if they hit
+	// the chunk size limit.
+	//
+	// Chunked must be set to true for this option to be used.
+	ChunkSize int
+}
+
+// ParseConnectionString will parse a string to create a valid connection URL
+func ParseConnectionString(path string, ssl bool) (url.URL, error) {
+	var host string
+	var port int
+
+	h, p, err := net.SplitHostPort(path)
+	if err != nil {
+		if path == "" {
+			host = DefaultHost
+		} else {
+			host = path
+		}
+		// If they didn't specify a port, always use the default port
+		port = DefaultPort
+	} else {
+		host = h
+		port, err = strconv.Atoi(p)
+		if err != nil {
+			return url.URL{}, fmt.Errorf("invalid port number %q: %s\n", path, err)
+		}
+	}
+
+	u := url.URL{
+		Scheme: "http",
+	}
+	if ssl {
+		u.Scheme = "https"
+	}
+
+	u.Host = net.JoinHostPort(host, strconv.Itoa(port))
+
+	return u, nil
+}
+
+// Config is used to specify what server to connect to.
+// URL: The URL of the server connecting to.
+// Username/Password are optional. They will be passed via basic auth if provided.
+// UserAgent: If not provided, will default "InfluxDBClient",
+// Timeout: If not provided, will default to 0 (no timeout)
+type Config struct {
+	URL       url.URL
+	Username  string
+	Password  string
+	UserAgent string
+	Timeout   time.Duration
+	Precision string
+	UnsafeSsl bool
+}
+
+// NewConfig will create a config to be used in connecting to the client
+func NewConfig() Config {
+	return Config{
+		Timeout: DefaultTimeout,
+	}
+}
+
+// Client is used to make calls to the server.
+type Client struct {
+	url        url.URL
+	username   string
+	password   string
+	httpClient *http.Client
+	userAgent  string
+	precision  string
+}
+
+const (
+	// ConsistencyOne requires at least one data node acknowledged a write.
+	ConsistencyOne = "one"
+
+	// ConsistencyAll requires all data nodes to acknowledge a write.
+	ConsistencyAll = "all"
+
+	// ConsistencyQuorum requires a quorum of data nodes to acknowledge a write.
+	ConsistencyQuorum = "quorum"
+
+	// ConsistencyAny allows for hinted hand off, potentially no write happened yet.
+	ConsistencyAny = "any"
+)
+
+// NewClient will instantiate and return a connected client to issue commands to the server.
+func NewClient(c Config) (*Client, error) {
+	tlsConfig := &tls.Config{
+		InsecureSkipVerify: c.UnsafeSsl,
+	}
+
+	tr := &http.Transport{
+		TLSClientConfig: tlsConfig,
+	}
+
+	client := Client{
+		url:        c.URL,
+		username:   c.Username,
+		password:   c.Password,
+		httpClient: &http.Client{Timeout: c.Timeout, Transport: tr},
+		userAgent:  c.UserAgent,
+		precision:  c.Precision,
+	}
+	if client.userAgent == "" {
+		client.userAgent = "InfluxDBClient"
+	}
+	return &client, nil
+}
+
+// SetAuth will update the username and passwords
+func (c *Client) SetAuth(u, p string) {
+	c.username = u
+	c.password = p
+}
+
+// SetPrecision will update the precision
+func (c *Client) SetPrecision(precision string) {
+	c.precision = precision
+}
+
+// Query sends a command to the server and returns the Response
+func (c *Client) Query(q Query) (*Response, error) {
+	u := c.url
+
+	u.Path = "query"
+	values := u.Query()
+	values.Set("q", q.Command)
+	values.Set("db", q.Database)
+	if q.Chunked {
+		values.Set("chunked", "true")
+		if q.ChunkSize > 0 {
+			values.Set("chunk_size", strconv.Itoa(q.ChunkSize))
+		}
+	}
+	if c.precision != "" {
+		values.Set("epoch", c.precision)
+	}
+	u.RawQuery = values.Encode()
+
+	req, err := http.NewRequest("POST", u.String(), nil)
+	if err != nil {
+		return nil, err
+	}
+	req.Header.Set("User-Agent", c.userAgent)
+	if c.username != "" {
+		req.SetBasicAuth(c.username, c.password)
+	}
+
+	resp, err := c.httpClient.Do(req)
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+
+	var response Response
+	if q.Chunked {
+		cr := NewChunkedResponse(resp.Body)
+		for {
+			r, err := cr.NextResponse()
+			if err != nil {
+				// If we got an error while decoding the response, send that back.
+				return nil, err
+			}
+
+			if r == nil {
+				break
+			}
+
+			response.Results = append(response.Results, r.Results...)
+			if r.Err != nil {
+				response.Err = r.Err
+				break
+			}
+		}
+	} else {
+		dec := json.NewDecoder(resp.Body)
+		dec.UseNumber()
+		if err := dec.Decode(&response); err != nil {
+			// Ignore EOF errors if we got an invalid status code.
+			if !(err == io.EOF && resp.StatusCode != http.StatusOK) {
+				return nil, err
+			}
+		}
+	}
+
+	// If we don't have an error in our json response, and didn't get StatusOK,
+	// then send back an error.
+	if resp.StatusCode != http.StatusOK && response.Error() == nil {
+		return &response, fmt.Errorf("received status code %d from server", resp.StatusCode)
+	}
+	return &response, nil
+}
+
+// Write takes BatchPoints and allows for writing of multiple points with defaults
+// If successful, error is nil and Response is nil
+// If an error occurs, Response may contain additional information if populated.
+func (c *Client) Write(bp BatchPoints) (*Response, error) {
+	u := c.url
+	u.Path = "write"
+
+	var b bytes.Buffer
+	for _, p := range bp.Points {
+		err := checkPointTypes(p)
+		if err != nil {
+			return nil, err
+		}
+		if p.Raw != "" {
+			if _, err := b.WriteString(p.Raw); err != nil {
+				return nil, err
+			}
+		} else {
+			for k, v := range bp.Tags {
+				if p.Tags == nil {
+					p.Tags = make(map[string]string, len(bp.Tags))
+				}
+				p.Tags[k] = v
+			}
+
+			if _, err := b.WriteString(p.MarshalString()); err != nil {
+				return nil, err
+			}
+		}
+
+		if err := b.WriteByte('\n'); err != nil {
+			return nil, err
+		}
+	}
+
+	req, err := http.NewRequest("POST", u.String(), &b)
+	if err != nil {
+		return nil, err
+	}
+	req.Header.Set("Content-Type", "")
+	req.Header.Set("User-Agent", c.userAgent)
+	if c.username != "" {
+		req.SetBasicAuth(c.username, c.password)
+	}
+
+	precision := bp.Precision
+	if precision == "" {
+		precision = c.precision
+	}
+
+	params := req.URL.Query()
+	params.Set("db", bp.Database)
+	params.Set("rp", bp.RetentionPolicy)
+	params.Set("precision", precision)
+	params.Set("consistency", bp.WriteConsistency)
+	req.URL.RawQuery = params.Encode()
+
+	resp, err := c.httpClient.Do(req)
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+
+	var response Response
+	body, err := ioutil.ReadAll(resp.Body)
+	if err != nil {
+		return nil, err
+	}
+
+	if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK {
+		var err = fmt.Errorf(string(body))
+		response.Err = err
+		return &response, err
+	}
+
+	return nil, nil
+}
+
+// WriteLineProtocol takes a string with line returns to delimit each write
+// If successful, error is nil and Response is nil
+// If an error occurs, Response may contain additional information if populated.
+func (c *Client) WriteLineProtocol(data, database, retentionPolicy, precision, writeConsistency string) (*Response, error) {
+	u := c.url
+	u.Path = "write"
+
+	r := strings.NewReader(data)
+
+	req, err := http.NewRequest("POST", u.String(), r)
+	if err != nil {
+		return nil, err
+	}
+	req.Header.Set("Content-Type", "")
+	req.Header.Set("User-Agent", c.userAgent)
+	if c.username != "" {
+		req.SetBasicAuth(c.username, c.password)
+	}
+	params := req.URL.Query()
+	params.Set("db", database)
+	params.Set("rp", retentionPolicy)
+	params.Set("precision", precision)
+	params.Set("consistency", writeConsistency)
+	req.URL.RawQuery = params.Encode()
+
+	resp, err := c.httpClient.Do(req)
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+
+	var response Response
+	body, err := ioutil.ReadAll(resp.Body)
+	if err != nil {
+		return nil, err
+	}
+
+	if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK {
+		err := fmt.Errorf(string(body))
+		response.Err = err
+		return &response, err
+	}
+
+	return nil, nil
+}
+
+// Ping will check to see if the server is up
+// Ping returns how long the request took, the version of the server it connected to, and an error if one occurred.
+func (c *Client) Ping() (time.Duration, string, error) {
+	now := time.Now()
+	u := c.url
+	u.Path = "ping"
+
+	req, err := http.NewRequest("GET", u.String(), nil)
+	if err != nil {
+		return 0, "", err
+	}
+	req.Header.Set("User-Agent", c.userAgent)
+	if c.username != "" {
+		req.SetBasicAuth(c.username, c.password)
+	}
+
+	resp, err := c.httpClient.Do(req)
+	if err != nil {
+		return 0, "", err
+	}
+	defer resp.Body.Close()
+
+	version := resp.Header.Get("X-Influxdb-Version")
+	return time.Since(now), version, nil
+}
+
+// Structs
+
+// Message represents a user message.
+type Message struct {
+	Level string `json:"level,omitempty"`
+	Text  string `json:"text,omitempty"`
+}
+
+// Result represents a resultset returned from a single statement.
+type Result struct {
+	Series   []models.Row
+	Messages []*Message
+	Err      error
+}
+
+// MarshalJSON encodes the result into JSON.
+func (r *Result) MarshalJSON() ([]byte, error) {
+	// Define a struct that outputs "error" as a string.
+	var o struct {
+		Series   []models.Row `json:"series,omitempty"`
+		Messages []*Message   `json:"messages,omitempty"`
+		Err      string       `json:"error,omitempty"`
+	}
+
+	// Copy fields to output struct.
+	o.Series = r.Series
+	o.Messages = r.Messages
+	if r.Err != nil {
+		o.Err = r.Err.Error()
+	}
+
+	return json.Marshal(&o)
+}
+
+// UnmarshalJSON decodes the data into the Result struct
+func (r *Result) UnmarshalJSON(b []byte) error {
+	var o struct {
+		Series   []models.Row `json:"series,omitempty"`
+		Messages []*Message   `json:"messages,omitempty"`
+		Err      string       `json:"error,omitempty"`
+	}
+
+	dec := json.NewDecoder(bytes.NewBuffer(b))
+	dec.UseNumber()
+	err := dec.Decode(&o)
+	if err != nil {
+		return err
+	}
+	r.Series = o.Series
+	r.Messages = o.Messages
+	if o.Err != "" {
+		r.Err = errors.New(o.Err)
+	}
+	return nil
+}
+
+// Response represents a list of statement results.
+type Response struct {
+	Results []Result
+	Err     error
+}
+
+// MarshalJSON encodes the response into JSON.
+func (r *Response) MarshalJSON() ([]byte, error) {
+	// Define a struct that outputs "error" as a string.
+	var o struct {
+		Results []Result `json:"results,omitempty"`
+		Err     string   `json:"error,omitempty"`
+	}
+
+	// Copy fields to output struct.
+	o.Results = r.Results
+	if r.Err != nil {
+		o.Err = r.Err.Error()
+	}
+
+	return json.Marshal(&o)
+}
+
+// UnmarshalJSON decodes the data into the Response struct
+func (r *Response) UnmarshalJSON(b []byte) error {
+	var o struct {
+		Results []Result `json:"results,omitempty"`
+		Err     string   `json:"error,omitempty"`
+	}
+
+	dec := json.NewDecoder(bytes.NewBuffer(b))
+	dec.UseNumber()
+	err := dec.Decode(&o)
+	if err != nil {
+		return err
+	}
+	r.Results = o.Results
+	if o.Err != "" {
+		r.Err = errors.New(o.Err)
+	}
+	return nil
+}
+
+// Error returns the first error from any statement.
+// Returns nil if no errors occurred on any statements.
+func (r *Response) Error() error {
+	if r.Err != nil {
+		return r.Err
+	}
+	for _, result := range r.Results {
+		if result.Err != nil {
+			return result.Err
+		}
+	}
+	return nil
+}
+
+// ChunkedResponse represents a response from the server that
+// uses chunking to stream the output.
+type ChunkedResponse struct {
+	dec *json.Decoder
+}
+
+// NewChunkedResponse reads a stream and produces responses from the stream.
+func NewChunkedResponse(r io.Reader) *ChunkedResponse {
+	dec := json.NewDecoder(r)
+	dec.UseNumber()
+	return &ChunkedResponse{dec: dec}
+}
+
+// NextResponse reads the next line of the stream and returns a response.
+func (r *ChunkedResponse) NextResponse() (*Response, error) {
+	var response Response
+	if err := r.dec.Decode(&response); err != nil {
+		if err == io.EOF {
+			return nil, nil
+		}
+		return nil, err
+	}
+	return &response, nil
+}
+
+// Point defines the fields that will be written to the database
+// Measurement, Time, and Fields are required
+// Precision can be specified if the time is in epoch format (integer).
+// Valid values for Precision are n, u, ms, s, m, and h
+type Point struct {
+	Measurement string
+	Tags        map[string]string
+	Time        time.Time
+	Fields      map[string]interface{}
+	Precision   string
+	Raw         string
+}
+
+// MarshalJSON will format the time in RFC3339Nano
+// Precision is also ignored as it is only used for writing, not reading
+// Or another way to say it is we always send back in nanosecond precision
+func (p *Point) MarshalJSON() ([]byte, error) {
+	point := struct {
+		Measurement string                 `json:"measurement,omitempty"`
+		Tags        map[string]string      `json:"tags,omitempty"`
+		Time        string                 `json:"time,omitempty"`
+		Fields      map[string]interface{} `json:"fields,omitempty"`
+		Precision   string                 `json:"precision,omitempty"`
+	}{
+		Measurement: p.Measurement,
+		Tags:        p.Tags,
+		Fields:      p.Fields,
+		Precision:   p.Precision,
+	}
+	// Let it omit empty if it's really zero
+	if !p.Time.IsZero() {
+		point.Time = p.Time.UTC().Format(time.RFC3339Nano)
+	}
+	return json.Marshal(&point)
+}
+
+// MarshalString renders string representation of a Point with specified
+// precision. The default precision is nanoseconds.
+func (p *Point) MarshalString() string {
+	pt, err := models.NewPoint(p.Measurement, p.Tags, p.Fields, p.Time)
+	if err != nil {
+		return "# ERROR: " + err.Error() + " " + p.Measurement
+	}
+	if p.Precision == "" || p.Precision == "ns" || p.Precision == "n" {
+		return pt.String()
+	}
+	return pt.PrecisionString(p.Precision)
+}
+
+// UnmarshalJSON decodes the data into the Point struct
+func (p *Point) UnmarshalJSON(b []byte) error {
+	var normal struct {
+		Measurement string                 `json:"measurement"`
+		Tags        map[string]string      `json:"tags"`
+		Time        time.Time              `json:"time"`
+		Precision   string                 `json:"precision"`
+		Fields      map[string]interface{} `json:"fields"`
+	}
+	var epoch struct {
+		Measurement string                 `json:"measurement"`
+		Tags        map[string]string      `json:"tags"`
+		Time        *int64                 `json:"time"`
+		Precision   string                 `json:"precision"`
+		Fields      map[string]interface{} `json:"fields"`
+	}
+
+	if err := func() error {
+		var err error
+		dec := json.NewDecoder(bytes.NewBuffer(b))
+		dec.UseNumber()
+		if err = dec.Decode(&epoch); err != nil {
+			return err
+		}
+		// Convert from epoch to time.Time, but only if Time
+		// was actually set.
+		var ts time.Time
+		if epoch.Time != nil {
+			ts, err = EpochToTime(*epoch.Time, epoch.Precision)
+			if err != nil {
+				return err
+			}
+		}
+		p.Measurement = epoch.Measurement
+		p.Tags = epoch.Tags
+		p.Time = ts
+		p.Precision = epoch.Precision
+		p.Fields = normalizeFields(epoch.Fields)
+		return nil
+	}(); err == nil {
+		return nil
+	}
+
+	dec := json.NewDecoder(bytes.NewBuffer(b))
+	dec.UseNumber()
+	if err := dec.Decode(&normal); err != nil {
+		return err
+	}
+	normal.Time = SetPrecision(normal.Time, normal.Precision)
+	p.Measurement = normal.Measurement
+	p.Tags = normal.Tags
+	p.Time = normal.Time
+	p.Precision = normal.Precision
+	p.Fields = normalizeFields(normal.Fields)
+
+	return nil
+}
+
+// Remove any notion of json.Number
+func normalizeFields(fields map[string]interface{}) map[string]interface{} {
+	newFields := map[string]interface{}{}
+
+	for k, v := range fields {
+		switch v := v.(type) {
+		case json.Number:
+			jv, e := v.Float64()
+			if e != nil {
+				panic(fmt.Sprintf("unable to convert json.Number to float64: %s", e))
+			}
+			newFields[k] = jv
+		default:
+			newFields[k] = v
+		}
+	}
+	return newFields
+}
+
+// BatchPoints is used to send batched data in a single write.
+// Database and Points are required
+// If no retention policy is specified, it will use the databases default retention policy.
+// If tags are specified, they will be "merged" with all points. If a point already has that tag, it will be ignored.
+// If time is specified, it will be applied to any point with an empty time.
+// Precision can be specified if the time is in epoch format (integer).
+// Valid values for Precision are n, u, ms, s, m, and h
+type BatchPoints struct {
+	Points           []Point           `json:"points,omitempty"`
+	Database         string            `json:"database,omitempty"`
+	RetentionPolicy  string            `json:"retentionPolicy,omitempty"`
+	Tags             map[string]string `json:"tags,omitempty"`
+	Time             time.Time         `json:"time,omitempty"`
+	Precision        string            `json:"precision,omitempty"`
+	WriteConsistency string            `json:"-"`
+}
+
+// UnmarshalJSON decodes the data into the BatchPoints struct
+func (bp *BatchPoints) UnmarshalJSON(b []byte) error {
+	var normal struct {
+		Points          []Point           `json:"points"`
+		Database        string            `json:"database"`
+		RetentionPolicy string            `json:"retentionPolicy"`
+		Tags            map[string]string `json:"tags"`
+		Time            time.Time         `json:"time"`
+		Precision       string            `json:"precision"`
+	}
+	var epoch struct {
+		Points          []Point           `json:"points"`
+		Database        string            `json:"database"`
+		RetentionPolicy string            `json:"retentionPolicy"`
+		Tags            map[string]string `json:"tags"`
+		Time            *int64            `json:"time"`
+		Precision       string            `json:"precision"`
+	}
+
+	if err := func() error {
+		var err error
+		if err = json.Unmarshal(b, &epoch); err != nil {
+			return err
+		}
+		// Convert from epoch to time.Time
+		var ts time.Time
+		if epoch.Time != nil {
+			ts, err = EpochToTime(*epoch.Time, epoch.Precision)
+			if err != nil {
+				return err
+			}
+		}
+		bp.Points = epoch.Points
+		bp.Database = epoch.Database
+		bp.RetentionPolicy = epoch.RetentionPolicy
+		bp.Tags = epoch.Tags
+		bp.Time = ts
+		bp.Precision = epoch.Precision
+		return nil
+	}(); err == nil {
+		return nil
+	}
+
+	if err := json.Unmarshal(b, &normal); err != nil {
+		return err
+	}
+	normal.Time = SetPrecision(normal.Time, normal.Precision)
+	bp.Points = normal.Points
+	bp.Database = normal.Database
+	bp.RetentionPolicy = normal.RetentionPolicy
+	bp.Tags = normal.Tags
+	bp.Time = normal.Time
+	bp.Precision = normal.Precision
+
+	return nil
+}
+
+// utility functions
+
+// Addr provides the current url as a string of the server the client is connected to.
+func (c *Client) Addr() string {
+	return c.url.String()
+}
+
+// checkPointTypes ensures no unsupported types are submitted to influxdb, returning error if they are found.
+func checkPointTypes(p Point) error {
+	for _, v := range p.Fields {
+		switch v.(type) {
+		case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, float32, float64, bool, string, nil:
+			return nil
+		default:
+			return fmt.Errorf("unsupported point type: %T", v)
+		}
+	}
+	return nil
+}
+
+// helper functions
+
+// EpochToTime takes a unix epoch time and uses precision to return back a time.Time
+func EpochToTime(epoch int64, precision string) (time.Time, error) {
+	if precision == "" {
+		precision = "s"
+	}
+	var t time.Time
+	switch precision {
+	case "h":
+		t = time.Unix(0, epoch*int64(time.Hour))
+	case "m":
+		t = time.Unix(0, epoch*int64(time.Minute))
+	case "s":
+		t = time.Unix(0, epoch*int64(time.Second))
+	case "ms":
+		t = time.Unix(0, epoch*int64(time.Millisecond))
+	case "u":
+		t = time.Unix(0, epoch*int64(time.Microsecond))
+	case "n":
+		t = time.Unix(0, epoch)
+	default:
+		return time.Time{}, fmt.Errorf("Unknown precision %q", precision)
+	}
+	return t, nil
+}
+
+// SetPrecision will round a time to the specified precision
+func SetPrecision(t time.Time, precision string) time.Time {
+	switch precision {
+	case "n":
+	case "u":
+		return t.Round(time.Microsecond)
+	case "ms":
+		return t.Round(time.Millisecond)
+	case "s":
+		return t.Round(time.Second)
+	case "m":
+		return t.Round(time.Minute)
+	case "h":
+		return t.Round(time.Hour)
+	}
+	return t
+}

+ 46 - 0
Godeps/_workspace/src/github.com/influxdata/influxdb/models/consistency.go

@@ -0,0 +1,46 @@
+package models
+
+import (
+	"errors"
+	"strings"
+)
+
+// ConsistencyLevel represent a required replication criteria before a write can
+// be returned as successful
+type ConsistencyLevel int
+
+const (
+	// ConsistencyLevelAny allows for hinted hand off, potentially no write happened yet
+	ConsistencyLevelAny ConsistencyLevel = iota
+
+	// ConsistencyLevelOne requires at least one data node acknowledged a write
+	ConsistencyLevelOne
+
+	// ConsistencyLevelQuorum requires a quorum of data nodes to acknowledge a write
+	ConsistencyLevelQuorum
+
+	// ConsistencyLevelAll requires all data nodes to acknowledge a write
+	ConsistencyLevelAll
+)
+
+var (
+	// ErrInvalidConsistencyLevel is returned when parsing the string version
+	// of a consistency level.
+	ErrInvalidConsistencyLevel = errors.New("invalid consistency level")
+)
+
+// ParseConsistencyLevel converts a consistency level string to the corresponding ConsistencyLevel const
+func ParseConsistencyLevel(level string) (ConsistencyLevel, error) {
+	switch strings.ToLower(level) {
+	case "any":
+		return ConsistencyLevelAny, nil
+	case "one":
+		return ConsistencyLevelOne, nil
+	case "quorum":
+		return ConsistencyLevelQuorum, nil
+	case "all":
+		return ConsistencyLevelAll, nil
+	default:
+		return 0, ErrInvalidConsistencyLevel
+	}
+}

+ 1576 - 0
Godeps/_workspace/src/github.com/influxdata/influxdb/models/points.go

@@ -0,0 +1,1576 @@
+package models
+
+import (
+	"bytes"
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"hash/fnv"
+	"math"
+	"sort"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/influxdata/influxdb/pkg/escape"
+)
+
+var (
+	measurementEscapeCodes = map[byte][]byte{
+		',': []byte(`\,`),
+		' ': []byte(`\ `),
+	}
+
+	tagEscapeCodes = map[byte][]byte{
+		',': []byte(`\,`),
+		' ': []byte(`\ `),
+		'=': []byte(`\=`),
+	}
+
+	ErrPointMustHaveAField  = errors.New("point without fields is unsupported")
+	ErrInvalidNumber        = errors.New("invalid number")
+	ErrMaxKeyLengthExceeded = errors.New("max key length exceeded")
+)
+
+const (
+	MaxKeyLength = 65535
+)
+
+// Point defines the values that will be written to the database
+type Point interface {
+	Name() string
+	SetName(string)
+
+	Tags() Tags
+	AddTag(key, value string)
+	SetTags(tags Tags)
+
+	Fields() Fields
+
+	Time() time.Time
+	SetTime(t time.Time)
+	UnixNano() int64
+
+	HashID() uint64
+	Key() []byte
+
+	Data() []byte
+	SetData(buf []byte)
+
+	// String returns a string representation of the point, if there is a
+	// timestamp associated with the point then it will be specified with the default
+	// precision of nanoseconds
+	String() string
+
+	// Bytes returns a []byte representation of the point similar to string.
+	MarshalBinary() ([]byte, error)
+
+	// PrecisionString returns a string representation of the point, if there
+	// is a timestamp associated with the point then it will be specified in the
+	// given unit
+	PrecisionString(precision string) string
+
+	// RoundedString returns a string representation of the point, if there
+	// is a timestamp associated with the point, then it will be rounded to the
+	// given duration
+	RoundedString(d time.Duration) string
+}
+
+// Points represents a sortable list of points by timestamp.
+type Points []Point
+
+func (a Points) Len() int           { return len(a) }
+func (a Points) Less(i, j int) bool { return a[i].Time().Before(a[j].Time()) }
+func (a Points) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
+
+// point is the default implementation of Point.
+type point struct {
+	time time.Time
+
+	// text encoding of measurement and tags
+	// key must always be stored sorted by tags, if the original line was not sorted,
+	// we need to resort it
+	key []byte
+
+	// text encoding of field data
+	fields []byte
+
+	// text encoding of timestamp
+	ts []byte
+
+	// binary encoded field data
+	data []byte
+
+	// cached version of parsed fields from data
+	cachedFields map[string]interface{}
+
+	// cached version of parsed name from key
+	cachedName string
+}
+
+const (
+	// the number of characters for the largest possible int64 (9223372036854775807)
+	maxInt64Digits = 19
+
+	// the number of characters for the smallest possible int64 (-9223372036854775808)
+	minInt64Digits = 20
+
+	// the number of characters required for the largest float64 before a range check
+	// would occur during parsing
+	maxFloat64Digits = 25
+
+	// the number of characters required for smallest float64 before a range check occur
+	// would occur during parsing
+	minFloat64Digits = 27
+)
+
+// ParsePoints returns a slice of Points from a text representation of a point
+// with each point separated by newlines.  If any points fail to parse, a non-nil error
+// will be returned in addition to the points that parsed successfully.
+func ParsePoints(buf []byte) ([]Point, error) {
+	return ParsePointsWithPrecision(buf, time.Now().UTC(), "n")
+}
+
+// ParsePointsString is identical to ParsePoints but accepts a string
+// buffer.
+func ParsePointsString(buf string) ([]Point, error) {
+	return ParsePoints([]byte(buf))
+}
+
+// ParseKey returns the measurement name and tags from a point.
+func ParseKey(buf string) (string, Tags, error) {
+	// Ignore the error because scanMeasurement returns "missing fields" which we ignore
+	// when just parsing a key
+	state, i, _ := scanMeasurement([]byte(buf), 0)
+
+	var tags Tags
+	if state == tagKeyState {
+		tags = parseTags([]byte(buf))
+		// scanMeasurement returns the location of the comma if there are tags, strip that off
+		return string(buf[:i-1]), tags, nil
+	}
+	return string(buf[:i]), tags, nil
+}
+
+// ParsePointsWithPrecision is similar to ParsePoints, but allows the
+// caller to provide a precision for time.
+func ParsePointsWithPrecision(buf []byte, defaultTime time.Time, precision string) ([]Point, error) {
+	points := []Point{}
+	var (
+		pos    int
+		block  []byte
+		failed []string
+	)
+	for {
+		pos, block = scanLine(buf, pos)
+		pos++
+
+		if len(block) == 0 {
+			break
+		}
+
+		// lines which start with '#' are comments
+		start := skipWhitespace(block, 0)
+
+		// If line is all whitespace, just skip it
+		if start >= len(block) {
+			continue
+		}
+
+		if block[start] == '#' {
+			continue
+		}
+
+		// strip the newline if one is present
+		if block[len(block)-1] == '\n' {
+			block = block[:len(block)-1]
+		}
+
+		pt, err := parsePoint(block[start:len(block)], defaultTime, precision)
+		if err != nil {
+			failed = append(failed, fmt.Sprintf("unable to parse '%s': %v", string(block[start:len(block)]), err))
+		} else {
+			points = append(points, pt)
+		}
+
+		if pos >= len(buf) {
+			break
+		}
+
+	}
+	if len(failed) > 0 {
+		return points, fmt.Errorf("%s", strings.Join(failed, "\n"))
+	}
+	return points, nil
+
+}
+
+func parsePoint(buf []byte, defaultTime time.Time, precision string) (Point, error) {
+	// scan the first block which is measurement[,tag1=value1,tag2=value=2...]
+	pos, key, err := scanKey(buf, 0)
+	if err != nil {
+		return nil, err
+	}
+
+	// measurement name is required
+	if len(key) == 0 {
+		return nil, fmt.Errorf("missing measurement")
+	}
+
+	if len(key) > MaxKeyLength {
+		return nil, fmt.Errorf("max key length exceeded: %v > %v", len(key), MaxKeyLength)
+	}
+
+	// scan the second block is which is field1=value1[,field2=value2,...]
+	pos, fields, err := scanFields(buf, pos)
+	if err != nil {
+		return nil, err
+	}
+
+	// at least one field is required
+	if len(fields) == 0 {
+		return nil, fmt.Errorf("missing fields")
+	}
+
+	// scan the last block which is an optional integer timestamp
+	pos, ts, err := scanTime(buf, pos)
+
+	if err != nil {
+		return nil, err
+	}
+
+	pt := &point{
+		key:    key,
+		fields: fields,
+		ts:     ts,
+	}
+
+	if len(ts) == 0 {
+		pt.time = defaultTime
+		pt.SetPrecision(precision)
+	} else {
+		ts, err := strconv.ParseInt(string(ts), 10, 64)
+		if err != nil {
+			return nil, err
+		}
+		pt.time, err = SafeCalcTime(ts, precision)
+		if err != nil {
+			return nil, err
+		}
+	}
+	return pt, nil
+}
+
+// GetPrecisionMultiplier will return a multiplier for the precision specified
+func GetPrecisionMultiplier(precision string) int64 {
+	d := time.Nanosecond
+	switch precision {
+	case "u":
+		d = time.Microsecond
+	case "ms":
+		d = time.Millisecond
+	case "s":
+		d = time.Second
+	case "m":
+		d = time.Minute
+	case "h":
+		d = time.Hour
+	}
+	return int64(d)
+}
+
+// scanKey scans buf starting at i for the measurement and tag portion of the point.
+// It returns the ending position and the byte slice of key within buf.  If there
+// are tags, they will be sorted if they are not already.
+func scanKey(buf []byte, i int) (int, []byte, error) {
+	start := skipWhitespace(buf, i)
+
+	i = start
+
+	// Determines whether the tags are sort, assume they are
+	sorted := true
+
+	// indices holds the indexes within buf of the start of each tag.  For example,
+	// a buf of 'cpu,host=a,region=b,zone=c' would have indices slice of [4,11,20]
+	// which indicates that the first tag starts at buf[4], seconds at buf[11], and
+	// last at buf[20]
+	indices := make([]int, 100)
+
+	// tracks how many commas we've seen so we know how many values are indices.
+	// Since indices is an arbitrarily large slice,
+	// we need to know how many values in the buffer are in use.
+	commas := 0
+
+	// First scan the Point's measurement.
+	state, i, err := scanMeasurement(buf, i)
+	if err != nil {
+		return i, buf[start:i], err
+	}
+
+	// Optionally scan tags if needed.
+	if state == tagKeyState {
+		i, commas, indices, err = scanTags(buf, i, indices)
+		if err != nil {
+			return i, buf[start:i], err
+		}
+	}
+
+	// Now we know where the key region is within buf, and the locations of tags, we
+	// need to determine if duplicate tags exist and if the tags are sorted.  This iterates
+	// 1/2 of the list comparing each end with each other, walking towards the center from
+	// both sides.
+	for j := 0; j < commas/2; j++ {
+		// get the left and right tags
+		_, left := scanTo(buf[indices[j]:indices[j+1]-1], 0, '=')
+		_, right := scanTo(buf[indices[commas-j-1]:indices[commas-j]-1], 0, '=')
+
+		// If the tags are equal, then there are duplicate tags, and we should abort
+		if bytes.Equal(left, right) {
+			return i, buf[start:i], fmt.Errorf("duplicate tags")
+		}
+
+		// If left is greater than right, the tags are not sorted.  We must continue
+		// since their could be duplicate tags still.
+		if bytes.Compare(left, right) > 0 {
+			sorted = false
+		}
+	}
+
+	// If the tags are not sorted, then sort them.  This sort is inline and
+	// uses the tag indices we created earlier.  The actual buffer is not sorted, the
+	// indices are using the buffer for value comparison.  After the indices are sorted,
+	// the buffer is reconstructed from the sorted indices.
+	if !sorted && commas > 0 {
+		// Get the measurement name for later
+		measurement := buf[start : indices[0]-1]
+
+		// Sort the indices
+		indices := indices[:commas]
+		insertionSort(0, commas, buf, indices)
+
+		// Create a new key using the measurement and sorted indices
+		b := make([]byte, len(buf[start:i]))
+		pos := copy(b, measurement)
+		for _, i := range indices {
+			b[pos] = ','
+			pos++
+			_, v := scanToSpaceOr(buf, i, ',')
+			pos += copy(b[pos:], v)
+		}
+
+		return i, b, nil
+	}
+
+	return i, buf[start:i], nil
+}
+
+// The following constants allow us to specify which state to move to
+// next, when scanning sections of a Point.
+const (
+	tagKeyState = iota
+	tagValueState
+	fieldsState
+)
+
+// scanMeasurement examines the measurement part of a Point, returning
+// the next state to move to, and the current location in the buffer.
+func scanMeasurement(buf []byte, i int) (int, int, error) {
+	// Check first byte of measurement, anything except a comma is fine.
+	// It can't be a space, since whitespace is stripped prior to this
+	// function call.
+	if buf[i] == ',' {
+		return -1, i, fmt.Errorf("missing measurement")
+	}
+
+	for {
+		i++
+		if i >= len(buf) {
+			// cpu
+			return -1, i, fmt.Errorf("missing fields")
+		}
+
+		if buf[i-1] == '\\' {
+			// Skip character (it's escaped).
+			continue
+		}
+
+		// Unescaped comma; move onto scanning the tags.
+		if buf[i] == ',' {
+			return tagKeyState, i + 1, nil
+		}
+
+		// Unescaped space; move onto scanning the fields.
+		if buf[i] == ' ' {
+			// cpu value=1.0
+			return fieldsState, i, nil
+		}
+	}
+}
+
+// scanTags examines all the tags in a Point, keeping track of and
+// returning the updated indices slice, number of commas and location
+// in buf where to start examining the Point fields.
+func scanTags(buf []byte, i int, indices []int) (int, int, []int, error) {
+	var (
+		err    error
+		commas int
+		state  = tagKeyState
+	)
+
+	for {
+		switch state {
+		case tagKeyState:
+			// Grow our indices slice if we have too many tags.
+			if commas >= len(indices) {
+				newIndics := make([]int, cap(indices)*2)
+				copy(newIndics, indices)
+				indices = newIndics
+			}
+			indices[commas] = i
+			commas++
+
+			i, err = scanTagsKey(buf, i)
+			state = tagValueState // tag value always follows a tag key
+		case tagValueState:
+			state, i, err = scanTagsValue(buf, i)
+		case fieldsState:
+			indices[commas] = i + 1
+			return i, commas, indices, nil
+		}
+
+		if err != nil {
+			return i, commas, indices, err
+		}
+	}
+}
+
+// scanTagsKey scans each character in a tag key.
+func scanTagsKey(buf []byte, i int) (int, error) {
+	// First character of the key.
+	if i >= len(buf) || buf[i] == ' ' || buf[i] == ',' || buf[i] == '=' {
+		// cpu,{'', ' ', ',', '='}
+		return i, fmt.Errorf("missing tag key")
+	}
+
+	// Examine each character in the tag key until we hit an unescaped
+	// equals (the tag value), or we hit an error (i.e., unescaped
+	// space or comma).
+	for {
+		i++
+
+		// Either we reached the end of the buffer or we hit an
+		// unescaped comma or space.
+		if i >= len(buf) ||
+			((buf[i] == ' ' || buf[i] == ',') && buf[i-1] != '\\') {
+			// cpu,tag{'', ' ', ','}
+			return i, fmt.Errorf("missing tag value")
+		}
+
+		if buf[i] == '=' && buf[i-1] != '\\' {
+			// cpu,tag=
+			return i + 1, nil
+		}
+	}
+}
+
+// scanTagsValue scans each character in a tag value.
+func scanTagsValue(buf []byte, i int) (int, int, error) {
+	// Tag value cannot be empty.
+	if i >= len(buf) || buf[i] == ',' || buf[i] == ' ' {
+		// cpu,tag={',', ' '}
+		return -1, i, fmt.Errorf("missing tag value")
+	}
+
+	// Examine each character in the tag value until we hit an unescaped
+	// comma (move onto next tag key), an unescaped space (move onto
+	// fields), or we error out.
+	for {
+		i++
+		if i >= len(buf) {
+			// cpu,tag=value
+			return -1, i, fmt.Errorf("missing fields")
+		}
+
+		// An unescaped equals sign is an invalid tag value.
+		if buf[i] == '=' && buf[i-1] != '\\' {
+			// cpu,tag={'=', 'fo=o'}
+			return -1, i, fmt.Errorf("invalid tag format")
+		}
+
+		if buf[i] == ',' && buf[i-1] != '\\' {
+			// cpu,tag=foo,
+			return tagKeyState, i + 1, nil
+		}
+
+		// cpu,tag=foo value=1.0
+		// cpu, tag=foo\= value=1.0
+		if buf[i] == ' ' && buf[i-1] != '\\' {
+			return fieldsState, i, nil
+		}
+	}
+}
+
+func insertionSort(l, r int, buf []byte, indices []int) {
+	for i := l + 1; i < r; i++ {
+		for j := i; j > l && less(buf, indices, j, j-1); j-- {
+			indices[j], indices[j-1] = indices[j-1], indices[j]
+		}
+	}
+}
+
+func less(buf []byte, indices []int, i, j int) bool {
+	// This grabs the tag names for i & j, it ignores the values
+	_, a := scanTo(buf, indices[i], '=')
+	_, b := scanTo(buf, indices[j], '=')
+	return bytes.Compare(a, b) < 0
+}
+
+func isFieldEscapeChar(b byte) bool {
+	for c := range escape.Codes {
+		if c == b {
+			return true
+		}
+	}
+	return false
+}
+
+// scanFields scans buf, starting at i for the fields section of a point.  It returns
+// the ending position and the byte slice of the fields within buf
+func scanFields(buf []byte, i int) (int, []byte, error) {
+	start := skipWhitespace(buf, i)
+	i = start
+	quoted := false
+
+	// tracks how many '=' we've seen
+	equals := 0
+
+	// tracks how many commas we've seen
+	commas := 0
+
+	for {
+		// reached the end of buf?
+		if i >= len(buf) {
+			break
+		}
+
+		// escaped characters?
+		if buf[i] == '\\' && i+1 < len(buf) {
+			i += 2
+			continue
+		}
+
+		// If the value is quoted, scan until we get to the end quote
+		// Only quote values in the field value since quotes are not significant
+		// in the field key
+		if buf[i] == '"' && equals > commas {
+			quoted = !quoted
+			i++
+			continue
+		}
+
+		// If we see an =, ensure that there is at least on char before and after it
+		if buf[i] == '=' && !quoted {
+			equals++
+
+			// check for "... =123" but allow "a\ =123"
+			if buf[i-1] == ' ' && buf[i-2] != '\\' {
+				return i, buf[start:i], fmt.Errorf("missing field key")
+			}
+
+			// check for "...a=123,=456" but allow "a=123,a\,=456"
+			if buf[i-1] == ',' && buf[i-2] != '\\' {
+				return i, buf[start:i], fmt.Errorf("missing field key")
+			}
+
+			// check for "... value="
+			if i+1 >= len(buf) {
+				return i, buf[start:i], fmt.Errorf("missing field value")
+			}
+
+			// check for "... value=,value2=..."
+			if buf[i+1] == ',' || buf[i+1] == ' ' {
+				return i, buf[start:i], fmt.Errorf("missing field value")
+			}
+
+			if isNumeric(buf[i+1]) || buf[i+1] == '-' || buf[i+1] == 'N' || buf[i+1] == 'n' {
+				var err error
+				i, err = scanNumber(buf, i+1)
+				if err != nil {
+					return i, buf[start:i], err
+				}
+				continue
+			}
+			// If next byte is not a double-quote, the value must be a boolean
+			if buf[i+1] != '"' {
+				var err error
+				i, _, err = scanBoolean(buf, i+1)
+				if err != nil {
+					return i, buf[start:i], err
+				}
+				continue
+			}
+		}
+
+		if buf[i] == ',' && !quoted {
+			commas++
+		}
+
+		// reached end of block?
+		if buf[i] == ' ' && !quoted {
+			break
+		}
+		i++
+	}
+
+	if quoted {
+		return i, buf[start:i], fmt.Errorf("unbalanced quotes")
+	}
+
+	// check that all field sections had key and values (e.g. prevent "a=1,b"
+	if equals == 0 || commas != equals-1 {
+		return i, buf[start:i], fmt.Errorf("invalid field format")
+	}
+
+	return i, buf[start:i], nil
+}
+
+// scanTime scans buf, starting at i for the time section of a point.  It returns
+// the ending position and the byte slice of the fields within buf and error if the
+// timestamp is not in the correct numeric format
+func scanTime(buf []byte, i int) (int, []byte, error) {
+	start := skipWhitespace(buf, i)
+	i = start
+	for {
+		// reached the end of buf?
+		if i >= len(buf) {
+			break
+		}
+
+		// Timestamps should be integers, make sure they are so we don't need to actually
+		// parse the timestamp until needed
+		if buf[i] < '0' || buf[i] > '9' {
+			// Handle negative timestamps
+			if i == start && buf[i] == '-' {
+				i++
+				continue
+			}
+			return i, buf[start:i], fmt.Errorf("bad timestamp")
+		}
+
+		// reached end of block?
+		if buf[i] == '\n' {
+			break
+		}
+		i++
+	}
+	return i, buf[start:i], nil
+}
+
+func isNumeric(b byte) bool {
+	return (b >= '0' && b <= '9') || b == '.'
+}
+
+// scanNumber returns the end position within buf, start at i after
+// scanning over buf for an integer, or float.  It returns an
+// error if a invalid number is scanned.
+func scanNumber(buf []byte, i int) (int, error) {
+	start := i
+	var isInt bool
+
+	// Is negative number?
+	if i < len(buf) && buf[i] == '-' {
+		i++
+		// There must be more characters now, as just '-' is illegal.
+		if i == len(buf) {
+			return i, ErrInvalidNumber
+		}
+	}
+
+	// how many decimal points we've see
+	decimal := false
+
+	// indicates the number is float in scientific notation
+	scientific := false
+
+	for {
+		if i >= len(buf) {
+			break
+		}
+
+		if buf[i] == ',' || buf[i] == ' ' {
+			break
+		}
+
+		if buf[i] == 'i' && i > start && !isInt {
+			isInt = true
+			i++
+			continue
+		}
+
+		if buf[i] == '.' {
+			// Can't have more than 1 decimal (e.g. 1.1.1 should fail)
+			if decimal {
+				return i, ErrInvalidNumber
+			}
+			decimal = true
+		}
+
+		// `e` is valid for floats but not as the first char
+		if i > start && (buf[i] == 'e' || buf[i] == 'E') {
+			scientific = true
+			i++
+			continue
+		}
+
+		// + and - are only valid at this point if they follow an e (scientific notation)
+		if (buf[i] == '+' || buf[i] == '-') && (buf[i-1] == 'e' || buf[i-1] == 'E') {
+			i++
+			continue
+		}
+
+		// NaN is an unsupported value
+		if i+2 < len(buf) && (buf[i] == 'N' || buf[i] == 'n') {
+			return i, ErrInvalidNumber
+		}
+
+		if !isNumeric(buf[i]) {
+			return i, ErrInvalidNumber
+		}
+		i++
+	}
+
+	if isInt && (decimal || scientific) {
+		return i, ErrInvalidNumber
+	}
+
+	numericDigits := i - start
+	if isInt {
+		numericDigits--
+	}
+	if decimal {
+		numericDigits--
+	}
+	if buf[start] == '-' {
+		numericDigits--
+	}
+
+	if numericDigits == 0 {
+		return i, ErrInvalidNumber
+	}
+
+	// It's more common that numbers will be within min/max range for their type but we need to prevent
+	// out or range numbers from being parsed successfully.  This uses some simple heuristics to decide
+	// if we should parse the number to the actual type.  It does not do it all the time because it incurs
+	// extra allocations and we end up converting the type again when writing points to disk.
+	if isInt {
+		// Make sure the last char is an 'i' for integers (e.g. 9i10 is not valid)
+		if buf[i-1] != 'i' {
+			return i, ErrInvalidNumber
+		}
+		// Parse the int to check bounds the number of digits could be larger than the max range
+		// We subtract 1 from the index to remove the `i` from our tests
+		if len(buf[start:i-1]) >= maxInt64Digits || len(buf[start:i-1]) >= minInt64Digits {
+			if _, err := strconv.ParseInt(string(buf[start:i-1]), 10, 64); err != nil {
+				return i, fmt.Errorf("unable to parse integer %s: %s", buf[start:i-1], err)
+			}
+		}
+	} else {
+		// Parse the float to check bounds if it's scientific or the number of digits could be larger than the max range
+		if scientific || len(buf[start:i]) >= maxFloat64Digits || len(buf[start:i]) >= minFloat64Digits {
+			if _, err := strconv.ParseFloat(string(buf[start:i]), 10); err != nil {
+				return i, fmt.Errorf("invalid float")
+			}
+		}
+	}
+
+	return i, nil
+}
+
+// scanBoolean returns the end position within buf, start at i after
+// scanning over buf for boolean. Valid values for a boolean are
+// t, T, true, TRUE, f, F, false, FALSE.  It returns an error if a invalid boolean
+// is scanned.
+func scanBoolean(buf []byte, i int) (int, []byte, error) {
+	start := i
+
+	if i < len(buf) && (buf[i] != 't' && buf[i] != 'f' && buf[i] != 'T' && buf[i] != 'F') {
+		return i, buf[start:i], fmt.Errorf("invalid boolean")
+	}
+
+	i++
+	for {
+		if i >= len(buf) {
+			break
+		}
+
+		if buf[i] == ',' || buf[i] == ' ' {
+			break
+		}
+		i++
+	}
+
+	// Single char bool (t, T, f, F) is ok
+	if i-start == 1 {
+		return i, buf[start:i], nil
+	}
+
+	// length must be 4 for true or TRUE
+	if (buf[start] == 't' || buf[start] == 'T') && i-start != 4 {
+		return i, buf[start:i], fmt.Errorf("invalid boolean")
+	}
+
+	// length must be 5 for false or FALSE
+	if (buf[start] == 'f' || buf[start] == 'F') && i-start != 5 {
+		return i, buf[start:i], fmt.Errorf("invalid boolean")
+	}
+
+	// Otherwise
+	valid := false
+	switch buf[start] {
+	case 't':
+		valid = bytes.Equal(buf[start:i], []byte("true"))
+	case 'f':
+		valid = bytes.Equal(buf[start:i], []byte("false"))
+	case 'T':
+		valid = bytes.Equal(buf[start:i], []byte("TRUE")) || bytes.Equal(buf[start:i], []byte("True"))
+	case 'F':
+		valid = bytes.Equal(buf[start:i], []byte("FALSE")) || bytes.Equal(buf[start:i], []byte("False"))
+	}
+
+	if !valid {
+		return i, buf[start:i], fmt.Errorf("invalid boolean")
+	}
+
+	return i, buf[start:i], nil
+
+}
+
+// skipWhitespace returns the end position within buf, starting at i after
+// scanning over spaces in tags
+func skipWhitespace(buf []byte, i int) int {
+	for i < len(buf) {
+		if buf[i] != ' ' && buf[i] != '\t' && buf[i] != 0 {
+			break
+		}
+		i++
+	}
+	return i
+}
+
+// scanLine returns the end position in buf and the next line found within
+// buf.
+func scanLine(buf []byte, i int) (int, []byte) {
+	start := i
+	quoted := false
+	fields := false
+
+	// tracks how many '=' and commas we've seen
+	// this duplicates some of the functionality in scanFields
+	equals := 0
+	commas := 0
+	for {
+		// reached the end of buf?
+		if i >= len(buf) {
+			break
+		}
+
+		// skip past escaped characters
+		if buf[i] == '\\' {
+			i += 2
+			continue
+		}
+
+		if buf[i] == ' ' {
+			fields = true
+		}
+
+		// If we see a double quote, makes sure it is not escaped
+		if fields {
+			if !quoted && buf[i] == '=' {
+				i++
+				equals++
+				continue
+			} else if !quoted && buf[i] == ',' {
+				i++
+				commas++
+				continue
+			} else if buf[i] == '"' && equals > commas {
+				i++
+				quoted = !quoted
+				continue
+			}
+		}
+
+		if buf[i] == '\n' && !quoted {
+			break
+		}
+
+		i++
+	}
+
+	return i, buf[start:i]
+}
+
+// scanTo returns the end position in buf and the next consecutive block
+// of bytes, starting from i and ending with stop byte, where stop byte
+// has not been escaped.
+//
+// If there are leading spaces, they are skipped.
+func scanTo(buf []byte, i int, stop byte) (int, []byte) {
+	start := i
+	for {
+		// reached the end of buf?
+		if i >= len(buf) {
+			break
+		}
+
+		// Reached unescaped stop value?
+		if buf[i] == stop && (i == 0 || buf[i-1] != '\\') {
+			break
+		}
+		i++
+	}
+
+	return i, buf[start:i]
+}
+
+// scanTo returns the end position in buf and the next consecutive block
+// of bytes, starting from i and ending with stop byte.  If there are leading
+// spaces, they are skipped.
+func scanToSpaceOr(buf []byte, i int, stop byte) (int, []byte) {
+	start := i
+	if buf[i] == stop || buf[i] == ' ' {
+		return i, buf[start:i]
+	}
+
+	for {
+		i++
+		if buf[i-1] == '\\' {
+			continue
+		}
+
+		// reached the end of buf?
+		if i >= len(buf) {
+			return i, buf[start:i]
+		}
+
+		// reached end of block?
+		if buf[i] == stop || buf[i] == ' ' {
+			return i, buf[start:i]
+		}
+	}
+}
+
+func scanTagValue(buf []byte, i int) (int, []byte) {
+	start := i
+	for {
+		if i >= len(buf) {
+			break
+		}
+
+		if buf[i] == ',' && buf[i-1] != '\\' {
+			break
+		}
+		i++
+	}
+	return i, buf[start:i]
+}
+
+func scanFieldValue(buf []byte, i int) (int, []byte) {
+	start := i
+	quoted := false
+	for {
+		if i >= len(buf) {
+			break
+		}
+
+		// Only escape char for a field value is a double-quote
+		if buf[i] == '\\' && i+1 < len(buf) && buf[i+1] == '"' {
+			i += 2
+			continue
+		}
+
+		// Quoted value? (e.g. string)
+		if buf[i] == '"' {
+			i++
+			quoted = !quoted
+			continue
+		}
+
+		if buf[i] == ',' && !quoted {
+			break
+		}
+		i++
+	}
+	return i, buf[start:i]
+}
+
+func escapeMeasurement(in []byte) []byte {
+	for b, esc := range measurementEscapeCodes {
+		in = bytes.Replace(in, []byte{b}, esc, -1)
+	}
+	return in
+}
+
+func unescapeMeasurement(in []byte) []byte {
+	for b, esc := range measurementEscapeCodes {
+		in = bytes.Replace(in, esc, []byte{b}, -1)
+	}
+	return in
+}
+
+func escapeTag(in []byte) []byte {
+	for b, esc := range tagEscapeCodes {
+		if bytes.IndexByte(in, b) != -1 {
+			in = bytes.Replace(in, []byte{b}, esc, -1)
+		}
+	}
+	return in
+}
+
+func unescapeTag(in []byte) []byte {
+	for b, esc := range tagEscapeCodes {
+		if bytes.IndexByte(in, b) != -1 {
+			in = bytes.Replace(in, esc, []byte{b}, -1)
+		}
+	}
+	return in
+}
+
+// escapeStringField returns a copy of in with any double quotes or
+// backslashes with escaped values
+func escapeStringField(in string) string {
+	var out []byte
+	i := 0
+	for {
+		if i >= len(in) {
+			break
+		}
+		// escape double-quotes
+		if in[i] == '\\' {
+			out = append(out, '\\')
+			out = append(out, '\\')
+			i++
+			continue
+		}
+		// escape double-quotes
+		if in[i] == '"' {
+			out = append(out, '\\')
+			out = append(out, '"')
+			i++
+			continue
+		}
+		out = append(out, in[i])
+		i++
+
+	}
+	return string(out)
+}
+
+// unescapeStringField returns a copy of in with any escaped double-quotes
+// or backslashes unescaped
+func unescapeStringField(in string) string {
+	if strings.IndexByte(in, '\\') == -1 {
+		return in
+	}
+
+	var out []byte
+	i := 0
+	for {
+		if i >= len(in) {
+			break
+		}
+		// unescape backslashes
+		if in[i] == '\\' && i+1 < len(in) && in[i+1] == '\\' {
+			out = append(out, '\\')
+			i += 2
+			continue
+		}
+		// unescape double-quotes
+		if in[i] == '\\' && i+1 < len(in) && in[i+1] == '"' {
+			out = append(out, '"')
+			i += 2
+			continue
+		}
+		out = append(out, in[i])
+		i++
+
+	}
+	return string(out)
+}
+
+// NewPoint returns a new point with the given measurement name, tags, fields and timestamp.  If
+// an unsupported field value (NaN) or out of range time is passed, this function returns an error.
+func NewPoint(name string, tags Tags, fields Fields, time time.Time) (Point, error) {
+	if len(fields) == 0 {
+		return nil, ErrPointMustHaveAField
+	}
+	if !time.IsZero() {
+		if err := CheckTime(time); err != nil {
+			return nil, err
+		}
+	}
+
+	for key, value := range fields {
+		if fv, ok := value.(float64); ok {
+			// Ensure the caller validates and handles invalid field values
+			if math.IsNaN(fv) {
+				return nil, fmt.Errorf("NaN is an unsupported value for field %s", key)
+			}
+		}
+		if len(key) == 0 {
+			return nil, fmt.Errorf("all fields must have non-empty names")
+		}
+	}
+
+	key := MakeKey([]byte(name), tags)
+	if len(key) > MaxKeyLength {
+		return nil, fmt.Errorf("max key length exceeded: %v > %v", len(key), MaxKeyLength)
+	}
+
+	return &point{
+		key:    key,
+		time:   time,
+		fields: fields.MarshalBinary(),
+	}, nil
+}
+
+// NewPointFromBytes returns a new Point from a marshalled Point.
+func NewPointFromBytes(b []byte) (Point, error) {
+	p := &point{}
+	if err := p.UnmarshalBinary(b); err != nil {
+		return nil, err
+	}
+	if len(p.Fields()) == 0 {
+		return nil, ErrPointMustHaveAField
+	}
+	return p, nil
+}
+
+// MustNewPoint returns a new point with the given measurement name, tags, fields and timestamp.  If
+// an unsupported field value (NaN) is passed, this function panics.
+func MustNewPoint(name string, tags Tags, fields Fields, time time.Time) Point {
+	pt, err := NewPoint(name, tags, fields, time)
+	if err != nil {
+		panic(err.Error())
+	}
+	return pt
+}
+
+func (p *point) Data() []byte {
+	return p.data
+}
+
+func (p *point) SetData(b []byte) {
+	p.data = b
+}
+
+func (p *point) Key() []byte {
+	return p.key
+}
+
+func (p *point) name() []byte {
+	_, name := scanTo(p.key, 0, ',')
+	return name
+}
+
+// Name return the measurement name for the point
+func (p *point) Name() string {
+	if p.cachedName != "" {
+		return p.cachedName
+	}
+	p.cachedName = string(escape.Unescape(p.name()))
+	return p.cachedName
+}
+
+// SetName updates the measurement name for the point
+func (p *point) SetName(name string) {
+	p.cachedName = ""
+	p.key = MakeKey([]byte(name), p.Tags())
+}
+
+// Time return the timestamp for the point
+func (p *point) Time() time.Time {
+	return p.time
+}
+
+// SetTime updates the timestamp for the point
+func (p *point) SetTime(t time.Time) {
+	p.time = t
+}
+
+// Tags returns the tag set for the point
+func (p *point) Tags() Tags {
+	return parseTags(p.key)
+}
+
+func parseTags(buf []byte) Tags {
+	tags := map[string]string{}
+
+	if len(buf) != 0 {
+		pos, name := scanTo(buf, 0, ',')
+
+		// it's an empyt key, so there are no tags
+		if len(name) == 0 {
+			return tags
+		}
+
+		i := pos + 1
+		var key, value []byte
+		for {
+			if i >= len(buf) {
+				break
+			}
+			i, key = scanTo(buf, i, '=')
+			i, value = scanTagValue(buf, i+1)
+
+			if len(value) == 0 {
+				continue
+			}
+
+			tags[string(unescapeTag(key))] = string(unescapeTag(value))
+
+			i++
+		}
+	}
+	return tags
+}
+
+// MakeKey creates a key for a set of tags.
+func MakeKey(name []byte, tags Tags) []byte {
+	// unescape the name and then re-escape it to avoid double escaping.
+	// The key should always be stored in escaped form.
+	return append(escapeMeasurement(unescapeMeasurement(name)), tags.HashKey()...)
+}
+
+// SetTags replaces the tags for the point
+func (p *point) SetTags(tags Tags) {
+	p.key = MakeKey([]byte(p.Name()), tags)
+}
+
+// AddTag adds or replaces a tag value for a point
+func (p *point) AddTag(key, value string) {
+	tags := p.Tags()
+	tags[key] = value
+	p.key = MakeKey([]byte(p.Name()), tags)
+}
+
+// Fields returns the fields for the point
+func (p *point) Fields() Fields {
+	if p.cachedFields != nil {
+		return p.cachedFields
+	}
+	p.cachedFields = p.unmarshalBinary()
+	return p.cachedFields
+}
+
+// SetPrecision will round a time to the specified precision
+func (p *point) SetPrecision(precision string) {
+	switch precision {
+	case "n":
+	case "u":
+		p.SetTime(p.Time().Truncate(time.Microsecond))
+	case "ms":
+		p.SetTime(p.Time().Truncate(time.Millisecond))
+	case "s":
+		p.SetTime(p.Time().Truncate(time.Second))
+	case "m":
+		p.SetTime(p.Time().Truncate(time.Minute))
+	case "h":
+		p.SetTime(p.Time().Truncate(time.Hour))
+	}
+}
+
+func (p *point) String() string {
+	if p.Time().IsZero() {
+		return string(p.Key()) + " " + string(p.fields)
+	}
+	return string(p.Key()) + " " + string(p.fields) + " " + strconv.FormatInt(p.UnixNano(), 10)
+}
+
+func (p *point) MarshalBinary() ([]byte, error) {
+	tb, err := p.time.MarshalBinary()
+	if err != nil {
+		return nil, err
+	}
+
+	b := make([]byte, 8+len(p.key)+len(p.fields)+len(tb))
+	i := 0
+
+	binary.BigEndian.PutUint32(b[i:], uint32(len(p.key)))
+	i += 4
+
+	i += copy(b[i:], p.key)
+
+	binary.BigEndian.PutUint32(b[i:i+4], uint32(len(p.fields)))
+	i += 4
+
+	i += copy(b[i:], p.fields)
+
+	copy(b[i:], tb)
+	return b, nil
+}
+
+func (p *point) UnmarshalBinary(b []byte) error {
+	var i int
+	keyLen := int(binary.BigEndian.Uint32(b[:4]))
+	i += int(4)
+
+	p.key = b[i : i+keyLen]
+	i += keyLen
+
+	fieldLen := int(binary.BigEndian.Uint32(b[i : i+4]))
+	i += int(4)
+
+	p.fields = b[i : i+fieldLen]
+	i += fieldLen
+
+	p.time = time.Now()
+	p.time.UnmarshalBinary(b[i:])
+	return nil
+}
+
+func (p *point) PrecisionString(precision string) string {
+	if p.Time().IsZero() {
+		return fmt.Sprintf("%s %s", p.Key(), string(p.fields))
+	}
+	return fmt.Sprintf("%s %s %d", p.Key(), string(p.fields),
+		p.UnixNano()/GetPrecisionMultiplier(precision))
+}
+
+func (p *point) RoundedString(d time.Duration) string {
+	if p.Time().IsZero() {
+		return fmt.Sprintf("%s %s", p.Key(), string(p.fields))
+	}
+	return fmt.Sprintf("%s %s %d", p.Key(), string(p.fields),
+		p.time.Round(d).UnixNano())
+}
+
+func (p *point) unmarshalBinary() Fields {
+	return newFieldsFromBinary(p.fields)
+}
+
+func (p *point) HashID() uint64 {
+	h := fnv.New64a()
+	h.Write(p.key)
+	sum := h.Sum64()
+	return sum
+}
+
+func (p *point) UnixNano() int64 {
+	return p.Time().UnixNano()
+}
+
+// Tags represents a mapping between a Point's tag names and their
+// values.
+type Tags map[string]string
+
+// HashKey hashes all of a tag's keys.
+func (t Tags) HashKey() []byte {
+	// Empty maps marshal to empty bytes.
+	if len(t) == 0 {
+		return nil
+	}
+
+	escaped := Tags{}
+	for k, v := range t {
+		ek := escapeTag([]byte(k))
+		ev := escapeTag([]byte(v))
+
+		if len(ev) > 0 {
+			escaped[string(ek)] = string(ev)
+		}
+	}
+
+	// Extract keys and determine final size.
+	sz := len(escaped) + (len(escaped) * 2) // separators
+	keys := make([]string, len(escaped)+1)
+	i := 0
+	for k, v := range escaped {
+		keys[i] = k
+		i++
+		sz += len(k) + len(v)
+	}
+	keys = keys[:i]
+	sort.Strings(keys)
+	// Generate marshaled bytes.
+	b := make([]byte, sz)
+	buf := b
+	idx := 0
+	for _, k := range keys {
+		buf[idx] = ','
+		idx++
+		copy(buf[idx:idx+len(k)], k)
+		idx += len(k)
+		buf[idx] = '='
+		idx++
+		v := escaped[k]
+		copy(buf[idx:idx+len(v)], v)
+		idx += len(v)
+	}
+	return b[:idx]
+}
+
+// Fields represents a mapping between a Point's field names and their
+// values.
+type Fields map[string]interface{}
+
+func parseNumber(val []byte) (interface{}, error) {
+	if val[len(val)-1] == 'i' {
+		val = val[:len(val)-1]
+		return strconv.ParseInt(string(val), 10, 64)
+	}
+	for i := 0; i < len(val); i++ {
+		// If there is a decimal or an N (NaN), I (Inf), parse as float
+		if val[i] == '.' || val[i] == 'N' || val[i] == 'n' || val[i] == 'I' || val[i] == 'i' || val[i] == 'e' {
+			return strconv.ParseFloat(string(val), 64)
+		}
+		if val[i] < '0' && val[i] > '9' {
+			return string(val), nil
+		}
+	}
+	return strconv.ParseFloat(string(val), 64)
+}
+
+func newFieldsFromBinary(buf []byte) Fields {
+	fields := make(Fields, 8)
+	var (
+		i              int
+		name, valueBuf []byte
+		value          interface{}
+		err            error
+	)
+	for i < len(buf) {
+
+		i, name = scanTo(buf, i, '=')
+		name = escape.Unescape(name)
+
+		i, valueBuf = scanFieldValue(buf, i+1)
+		if len(name) > 0 {
+			if len(valueBuf) == 0 {
+				fields[string(name)] = nil
+				continue
+			}
+
+			// If the first char is a double-quote, then unmarshal as string
+			if valueBuf[0] == '"' {
+				value = unescapeStringField(string(valueBuf[1 : len(valueBuf)-1]))
+				// Check for numeric characters and special NaN or Inf
+			} else if (valueBuf[0] >= '0' && valueBuf[0] <= '9') || valueBuf[0] == '-' || valueBuf[0] == '.' ||
+				valueBuf[0] == 'N' || valueBuf[0] == 'n' || // NaN
+				valueBuf[0] == 'I' || valueBuf[0] == 'i' { // Inf
+
+				value, err = parseNumber(valueBuf)
+				if err != nil {
+					panic(fmt.Sprintf("unable to parse number value '%v': %v", string(valueBuf), err))
+				}
+
+				// Otherwise parse it as bool
+			} else {
+				value, err = strconv.ParseBool(string(valueBuf))
+				if err != nil {
+					panic(fmt.Sprintf("unable to parse bool value '%v': %v\n", string(valueBuf), err))
+				}
+			}
+			fields[string(name)] = value
+		}
+		i++
+	}
+	return fields
+}
+
+// MarshalBinary encodes all the fields to their proper type and returns the binary
+// represenation
+// NOTE: uint64 is specifically not supported due to potential overflow when we decode
+// again later to an int64
+func (p Fields) MarshalBinary() []byte {
+	b := []byte{}
+	keys := make([]string, len(p))
+	i := 0
+	for k := range p {
+		keys[i] = k
+		i++
+	}
+	sort.Strings(keys)
+
+	for _, k := range keys {
+		v := p[k]
+		b = append(b, []byte(escape.String(k))...)
+		b = append(b, '=')
+		switch t := v.(type) {
+		case int:
+			b = append(b, []byte(strconv.FormatInt(int64(t), 10))...)
+			b = append(b, 'i')
+		case int8:
+			b = append(b, []byte(strconv.FormatInt(int64(t), 10))...)
+			b = append(b, 'i')
+		case int16:
+			b = append(b, []byte(strconv.FormatInt(int64(t), 10))...)
+			b = append(b, 'i')
+		case int32:
+			b = append(b, []byte(strconv.FormatInt(int64(t), 10))...)
+			b = append(b, 'i')
+		case int64:
+			b = append(b, []byte(strconv.FormatInt(t, 10))...)
+			b = append(b, 'i')
+		case uint:
+			b = append(b, []byte(strconv.FormatInt(int64(t), 10))...)
+			b = append(b, 'i')
+		case uint8:
+			b = append(b, []byte(strconv.FormatInt(int64(t), 10))...)
+			b = append(b, 'i')
+		case uint16:
+			b = append(b, []byte(strconv.FormatInt(int64(t), 10))...)
+			b = append(b, 'i')
+		case uint32:
+			b = append(b, []byte(strconv.FormatInt(int64(t), 10))...)
+			b = append(b, 'i')
+		case float32:
+			val := []byte(strconv.FormatFloat(float64(t), 'f', -1, 32))
+			b = append(b, val...)
+		case float64:
+			val := []byte(strconv.FormatFloat(t, 'f', -1, 64))
+			b = append(b, val...)
+		case bool:
+			b = append(b, []byte(strconv.FormatBool(t))...)
+		case []byte:
+			b = append(b, t...)
+		case string:
+			b = append(b, '"')
+			b = append(b, []byte(escapeStringField(t))...)
+			b = append(b, '"')
+		case nil:
+			// skip
+		default:
+			// Can't determine the type, so convert to string
+			b = append(b, '"')
+			b = append(b, []byte(escapeStringField(fmt.Sprintf("%v", v)))...)
+			b = append(b, '"')
+
+		}
+		b = append(b, ',')
+	}
+	if len(b) > 0 {
+		return b[0 : len(b)-1]
+	}
+	return b
+}
+
+type indexedSlice struct {
+	indices []int
+	b       []byte
+}
+
+func (s *indexedSlice) Less(i, j int) bool {
+	_, a := scanTo(s.b, s.indices[i], '=')
+	_, b := scanTo(s.b, s.indices[j], '=')
+	return bytes.Compare(a, b) < 0
+}
+
+func (s *indexedSlice) Swap(i, j int) {
+	s.indices[i], s.indices[j] = s.indices[j], s.indices[i]
+}
+
+func (s *indexedSlice) Len() int {
+	return len(s.indices)
+}

+ 60 - 0
Godeps/_workspace/src/github.com/influxdata/influxdb/models/rows.go

@@ -0,0 +1,60 @@
+package models
+
+import (
+	"hash/fnv"
+	"sort"
+)
+
+// Row represents a single row returned from the execution of a statement.
+type Row struct {
+	Name    string            `json:"name,omitempty"`
+	Tags    map[string]string `json:"tags,omitempty"`
+	Columns []string          `json:"columns,omitempty"`
+	Values  [][]interface{}   `json:"values,omitempty"`
+	Err     error             `json:"err,omitempty"`
+}
+
+// SameSeries returns true if r contains values for the same series as o.
+func (r *Row) SameSeries(o *Row) bool {
+	return r.tagsHash() == o.tagsHash() && r.Name == o.Name
+}
+
+// tagsHash returns a hash of tag key/value pairs.
+func (r *Row) tagsHash() uint64 {
+	h := fnv.New64a()
+	keys := r.tagsKeys()
+	for _, k := range keys {
+		h.Write([]byte(k))
+		h.Write([]byte(r.Tags[k]))
+	}
+	return h.Sum64()
+}
+
+// tagKeys returns a sorted list of tag keys.
+func (r *Row) tagsKeys() []string {
+	a := make([]string, 0, len(r.Tags))
+	for k := range r.Tags {
+		a = append(a, k)
+	}
+	sort.Strings(a)
+	return a
+}
+
+// Rows represents a collection of rows. Rows implements sort.Interface.
+type Rows []*Row
+
+func (p Rows) Len() int { return len(p) }
+
+func (p Rows) Less(i, j int) bool {
+	// Sort by name first.
+	if p[i].Name != p[j].Name {
+		return p[i].Name < p[j].Name
+	}
+
+	// Sort by tag set hash. Tags don't have a meaningful sort order so we
+	// just compute a hash and sort by that instead. This allows the tests
+	// to receive rows in a predictable order every time.
+	return p[i].tagsHash() < p[j].tagsHash()
+}
+
+func (p Rows) Swap(i, j int) { p[i], p[j] = p[j], p[i] }

+ 51 - 0
Godeps/_workspace/src/github.com/influxdata/influxdb/models/time.go

@@ -0,0 +1,51 @@
+package models
+
+// Helper time methods since parsing time can easily overflow and we only support a
+// specific time range.
+
+import (
+	"fmt"
+	"math"
+	"time"
+)
+
+var (
+	// MaxNanoTime is the maximum time that can be represented via int64 nanoseconds since the epoch.
+	MaxNanoTime = time.Unix(0, math.MaxInt64).UTC()
+	// MinNanoTime is the minumum time that can be represented via int64 nanoseconds since the epoch.
+	MinNanoTime = time.Unix(0, math.MinInt64).UTC()
+
+	// ErrTimeOutOfRange gets returned when time is out of the representable range using int64 nanoseconds since the epoch.
+	ErrTimeOutOfRange = fmt.Errorf("time outside range %s - %s", MinNanoTime, MaxNanoTime)
+)
+
+// SafeCalcTime safely calculates the time given. Will return error if the time is outside the
+// supported range.
+func SafeCalcTime(timestamp int64, precision string) (time.Time, error) {
+	mult := GetPrecisionMultiplier(precision)
+	if t, ok := safeSignedMult(timestamp, mult); ok {
+		return time.Unix(0, t).UTC(), nil
+	}
+
+	return time.Time{}, ErrTimeOutOfRange
+}
+
+// CheckTime checks that a time is within the safe range.
+func CheckTime(t time.Time) error {
+	if t.Before(MinNanoTime) || t.After(MaxNanoTime) {
+		return ErrTimeOutOfRange
+	}
+	return nil
+}
+
+// Perform the multiplication and check to make sure it didn't overflow.
+func safeSignedMult(a, b int64) (int64, bool) {
+	if a == 0 || b == 0 || a == 1 || b == 1 {
+		return a * b, true
+	}
+	if a == math.MinInt64 || b == math.MaxInt64 {
+		return 0, false
+	}
+	c := a * b
+	return c, c/b == a
+}

+ 53 - 0
Godeps/_workspace/src/github.com/influxdata/influxdb/pkg/escape/bytes.go

@@ -0,0 +1,53 @@
+package escape
+
+import "bytes"
+
+func Bytes(in []byte) []byte {
+	for b, esc := range Codes {
+		in = bytes.Replace(in, []byte{b}, esc, -1)
+	}
+	return in
+}
+
+func Unescape(in []byte) []byte {
+	if len(in) == 0 {
+		return nil
+	}
+
+	if bytes.IndexByte(in, '\\') == -1 {
+		return in
+	}
+
+	i := 0
+	inLen := len(in)
+	var out []byte
+
+	for {
+		if i >= inLen {
+			break
+		}
+		if in[i] == '\\' && i+1 < inLen {
+			switch in[i+1] {
+			case ',':
+				out = append(out, ',')
+				i += 2
+				continue
+			case '"':
+				out = append(out, '"')
+				i += 2
+				continue
+			case ' ':
+				out = append(out, ' ')
+				i += 2
+				continue
+			case '=':
+				out = append(out, '=')
+				i += 2
+				continue
+			}
+		}
+		out = append(out, in[i])
+		i += 1
+	}
+	return out
+}

+ 34 - 0
Godeps/_workspace/src/github.com/influxdata/influxdb/pkg/escape/strings.go

@@ -0,0 +1,34 @@
+package escape
+
+import "strings"
+
+var (
+	Codes = map[byte][]byte{
+		',': []byte(`\,`),
+		'"': []byte(`\"`),
+		' ': []byte(`\ `),
+		'=': []byte(`\=`),
+	}
+
+	codesStr = map[string]string{}
+)
+
+func init() {
+	for k, v := range Codes {
+		codesStr[string(k)] = string(v)
+	}
+}
+
+func UnescapeString(in string) string {
+	for b, esc := range codesStr {
+		in = strings.Replace(in, esc, b, -1)
+	}
+	return in
+}
+
+func String(in string) string {
+	for b, esc := range codesStr {
+		in = strings.Replace(in, b, esc, -1)
+	}
+	return in
+}

+ 14 - 0
conf/defaults.ini

@@ -6,6 +6,9 @@
 # possible values : production, development
 app_mode = production
 
+# instance name, defaults to HOSTNAME environment variable value or hostname if HOSTNAME var is empty
+instance_name = ${HOSTNAME}
+
 #################################### Paths ####################################
 [paths]
 # Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used)
@@ -335,3 +338,14 @@ global_api_key = -1
 
 # global limit on number of logged in users.
 global_session = -1
+
+#################################### Internal Grafana Metrics ##########################
+[metrics]
+enabled           = false
+interval_seconds  = 10
+
+[metrics.graphite]
+address = localhost:2003
+prefix = prod.grafana.%(instance_name)s.
+
+

+ 3 - 0
conf/sample.ini

@@ -6,6 +6,9 @@
 # possible values : production, development
 ; app_mode = production
 
+# instance name, defaults to HOSTNAME environment variable value or hostname if HOSTNAME var is empty
+; instance_name = ${HOSTNAME}
+
 #################################### Paths ####################################
 [paths]
 # Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used)

+ 4 - 1
pkg/api/api.go

@@ -235,7 +235,10 @@ func Register(r *macaron.Macaron) {
 		r.Get("/search/", Search)
 
 		// metrics
-		r.Get("/metrics/test", GetTestMetrics)
+		r.Get("/metrics/test", wrap(GetTestMetrics))
+
+		// metrics
+		r.Get("/metrics", wrap(GetInternalMetrics))
 
 	}, reqSignedIn)
 

+ 1 - 1
pkg/api/app_routes.go

@@ -30,7 +30,7 @@ func InitAppPluginRoutes(r *macaron.Macaron) {
 			}
 			handlers = append(handlers, AppPluginRoute(route, plugin.Id))
 			r.Route(url, route.Method, handlers...)
-			log.Info("Plugins: Adding proxy route %s", url)
+			log.Debug("Plugins: Adding proxy route %s", url)
 		}
 	}
 }

+ 7 - 3
pkg/api/common.go

@@ -12,8 +12,12 @@ import (
 )
 
 var (
-	NotFound    = ApiError(404, "Not found", nil)
-	ServerError = ApiError(500, "Server error", nil)
+	NotFound = func() Response {
+		return ApiError(404, "Not found", nil)
+	}
+	ServerError = func(err error) Response {
+		return ApiError(500, "Server error", err)
+	}
 )
 
 type Response interface {
@@ -34,7 +38,7 @@ func wrap(action interface{}) macaron.Handler {
 		if err == nil && val != nil && len(val) > 0 {
 			res = val[0].Interface().(Response)
 		} else {
-			res = ServerError
+			res = ServerError(err)
 		}
 
 		res.WriteTo(c.Resp)

+ 2 - 4
pkg/api/dashboard.go

@@ -31,8 +31,6 @@ func isDashboardStarredByUser(c *middleware.Context, dashId int64) (bool, error)
 }
 
 func GetDashboard(c *middleware.Context) {
-	metrics.M_Api_Dashboard_Get.Inc(1)
-
 	slug := strings.ToLower(c.Params(":slug"))
 
 	query := m.GetDashboardQuery{Slug: slug, OrgId: c.OrgId}
@@ -76,6 +74,7 @@ func GetDashboard(c *middleware.Context) {
 		},
 	}
 
+	c.TimeRequest(metrics.M_Api_Dashboard_Get)
 	c.JSON(200, dto)
 }
 
@@ -150,8 +149,7 @@ func PostDashboard(c *middleware.Context, cmd m.SaveDashboardCommand) {
 		return
 	}
 
-	metrics.M_Api_Dashboard_Post.Inc(1)
-
+	c.TimeRequest(metrics.M_Api_Dashboard_Save)
 	c.JSON(200, util.DynMap{"status": "success", "slug": cmd.Result.Slug, "version": cmd.Result.Version})
 }
 

+ 4 - 0
pkg/api/dataproxy.go

@@ -10,6 +10,7 @@ import (
 
 	"github.com/grafana/grafana/pkg/api/cloudwatch"
 	"github.com/grafana/grafana/pkg/bus"
+	"github.com/grafana/grafana/pkg/metrics"
 	"github.com/grafana/grafana/pkg/middleware"
 	m "github.com/grafana/grafana/pkg/models"
 	"github.com/grafana/grafana/pkg/setting"
@@ -80,7 +81,10 @@ func getDatasource(id int64, orgId int64) (*m.DataSource, error) {
 }
 
 func ProxyDataSourceRequest(c *middleware.Context) {
+	c.TimeRequest(metrics.M_DataSource_ProxyReq_Timer)
+
 	ds, err := getDatasource(c.ParamsInt64(":id"), c.OrgId)
+
 	if err != nil {
 		c.JsonApiErr(500, "Unable to load datasource meta data", err)
 		return

+ 50 - 2
pkg/api/metrics.go

@@ -1,10 +1,14 @@
 package api
 
 import (
-	"github.com/grafana/grafana/pkg/api/dtos"
-	"github.com/grafana/grafana/pkg/middleware"
+	"encoding/json"
 	"math/rand"
+	"net/http"
 	"strconv"
+
+	"github.com/grafana/grafana/pkg/api/dtos"
+	"github.com/grafana/grafana/pkg/metrics"
+	"github.com/grafana/grafana/pkg/middleware"
 )
 
 func GetTestMetrics(c *middleware.Context) {
@@ -34,3 +38,47 @@ func GetTestMetrics(c *middleware.Context) {
 
 	c.JSON(200, &result)
 }
+
+func GetInternalMetrics(c *middleware.Context) Response {
+	snapshots := metrics.MetricStats.GetSnapshots()
+
+	resp := make(map[string]interface{})
+
+	for _, m := range snapshots {
+		metricName := m.Name() + m.StringifyTags()
+
+		switch metric := m.(type) {
+		case metrics.Counter:
+			resp[metricName] = map[string]interface{}{
+				"count": metric.Count(),
+			}
+		case metrics.Timer:
+			percentiles := metric.Percentiles([]float64{0.25, 0.75, 0.90, 0.99})
+			resp[metricName] = map[string]interface{}{
+				"count": metric.Count(),
+				"min":   metric.Min(),
+				"max":   metric.Max(),
+				"mean":  metric.Mean(),
+				"std":   metric.StdDev(),
+				"p25":   percentiles[0],
+				"p75":   percentiles[1],
+				"p90":   percentiles[2],
+				"p99":   percentiles[3],
+			}
+		}
+	}
+
+	var b []byte
+	var err error
+	if b, err = json.MarshalIndent(resp, "", " "); err != nil {
+		return ApiError(500, "body json marshal", err)
+	}
+
+	return &NormalResponse{
+		body:   b,
+		status: 200,
+		header: http.Header{
+			"Content-Type": []string{"application/json"},
+		},
+	}
+}

+ 2 - 0
pkg/api/search.go

@@ -4,6 +4,7 @@ import (
 	"strconv"
 
 	"github.com/grafana/grafana/pkg/bus"
+	"github.com/grafana/grafana/pkg/metrics"
 	"github.com/grafana/grafana/pkg/middleware"
 	"github.com/grafana/grafana/pkg/services/search"
 )
@@ -42,5 +43,6 @@ func Search(c *middleware.Context) {
 		return
 	}
 
+	c.TimeRequest(metrics.M_Api_Dashboard_Search)
 	c.JSON(200, searchQuery.Result)
 }

+ 2 - 4
pkg/cmd/grafana-server/main.go

@@ -58,6 +58,7 @@ func main() {
 	flag.Parse()
 	writePIDFile()
 	initRuntime()
+	metrics.Init()
 
 	search.Init()
 	login.Init()
@@ -69,10 +70,6 @@ func main() {
 		log.Fatal(3, "Notification service failed to initialize", err)
 	}
 
-	if setting.ReportingEnabled {
-		go metrics.StartUsageReportLoop()
-	}
-
 	StartServer()
 	exitChan <- 0
 }
@@ -90,6 +87,7 @@ func initRuntime() {
 
 	log.Info("Starting Grafana")
 	log.Info("Version: %v, Commit: %v, Build date: %v", setting.BuildVersion, setting.BuildCommit, time.Unix(setting.BuildStamp, 0))
+
 	setting.LogConfigurationInfo()
 
 	sqlstore.NewEngine()

+ 1 - 1
pkg/cmd/grafana-server/web.go

@@ -31,7 +31,7 @@ func newMacaron() *macaron.Macaron {
 
 	for _, route := range plugins.StaticRoutes {
 		pluginRoute := path.Join("/public/plugins/", route.PluginId)
-		log.Info("Plugins: Adding route %s -> %s", pluginRoute, route.Directory)
+		log.Debug("Plugins: Adding route %s -> %s", pluginRoute, route.Directory)
 		mapStatic(m, route.Directory, "", pluginRoute)
 	}
 

+ 122 - 0
pkg/metrics/EMWA.go

@@ -0,0 +1,122 @@
+// includes code from
+// https://raw.githubusercontent.com/rcrowley/go-metrics/master/sample.go
+// Copyright 2012 Richard Crowley. All rights reserved.
+
+package metrics
+
+import (
+	"math"
+	"sync"
+	"sync/atomic"
+)
+
+// EWMAs continuously calculate an exponentially-weighted moving average
+// based on an outside source of clock ticks.
+type EWMA interface {
+	Rate() float64
+	Snapshot() EWMA
+	Tick()
+	Update(int64)
+}
+
+// NewEWMA constructs a new EWMA with the given alpha.
+func NewEWMA(alpha float64) EWMA {
+	if UseNilMetrics {
+		return NilEWMA{}
+	}
+	return &StandardEWMA{alpha: alpha}
+}
+
+// NewEWMA1 constructs a new EWMA for a one-minute moving average.
+func NewEWMA1() EWMA {
+	return NewEWMA(1 - math.Exp(-5.0/60.0/1))
+}
+
+// NewEWMA5 constructs a new EWMA for a five-minute moving average.
+func NewEWMA5() EWMA {
+	return NewEWMA(1 - math.Exp(-5.0/60.0/5))
+}
+
+// NewEWMA15 constructs a new EWMA for a fifteen-minute moving average.
+func NewEWMA15() EWMA {
+	return NewEWMA(1 - math.Exp(-5.0/60.0/15))
+}
+
+// EWMASnapshot is a read-only copy of another EWMA.
+type EWMASnapshot float64
+
+// Rate returns the rate of events per second at the time the snapshot was
+// taken.
+func (a EWMASnapshot) Rate() float64 { return float64(a) }
+
+// Snapshot returns the snapshot.
+func (a EWMASnapshot) Snapshot() EWMA { return a }
+
+// Tick panics.
+func (EWMASnapshot) Tick() {
+	panic("Tick called on an EWMASnapshot")
+}
+
+// Update panics.
+func (EWMASnapshot) Update(int64) {
+	panic("Update called on an EWMASnapshot")
+}
+
+// NilEWMA is a no-op EWMA.
+type NilEWMA struct{}
+
+// Rate is a no-op.
+func (NilEWMA) Rate() float64 { return 0.0 }
+
+// Snapshot is a no-op.
+func (NilEWMA) Snapshot() EWMA { return NilEWMA{} }
+
+// Tick is a no-op.
+func (NilEWMA) Tick() {}
+
+// Update is a no-op.
+func (NilEWMA) Update(n int64) {}
+
+// StandardEWMA is the standard implementation of an EWMA and tracks the number
+// of uncounted events and processes them on each tick.  It uses the
+// sync/atomic package to manage uncounted events.
+type StandardEWMA struct {
+	uncounted int64 // /!\ this should be the first member to ensure 64-bit alignment
+	alpha     float64
+	rate      float64
+	init      bool
+	mutex     sync.Mutex
+}
+
+// Rate returns the moving average rate of events per second.
+func (a *StandardEWMA) Rate() float64 {
+	a.mutex.Lock()
+	defer a.mutex.Unlock()
+	return a.rate * float64(1e9)
+}
+
+// Snapshot returns a read-only copy of the EWMA.
+func (a *StandardEWMA) Snapshot() EWMA {
+	return EWMASnapshot(a.Rate())
+}
+
+// Tick ticks the clock to update the moving average.  It assumes it is called
+// every five seconds.
+func (a *StandardEWMA) Tick() {
+	count := atomic.LoadInt64(&a.uncounted)
+	atomic.AddInt64(&a.uncounted, -count)
+	instantRate := float64(count) / float64(5e9)
+	a.mutex.Lock()
+	defer a.mutex.Unlock()
+	if a.init {
+		a.rate += a.alpha * (instantRate - a.rate)
+	} else {
+		a.init = true
+		a.rate = instantRate
+	}
+}
+
+// Update adds n uncounted events.
+func (a *StandardEWMA) Update(n int64) {
+	atomic.AddInt64(&a.uncounted, n)
+}

+ 46 - 0
pkg/metrics/combos.go

@@ -0,0 +1,46 @@
+package metrics
+
+// type comboCounterRef struct {
+// 	*MetricMeta
+// 	usageCounter  Counter
+// 	metricCounter Counter
+// }
+//
+// func RegComboCounter(name string, tagStrings ...string) Counter {
+// 	meta := NewMetricMeta(name, tagStrings)
+// 	cr := &comboCounterRef{
+// 		MetricMeta:    meta,
+// 		usageCounter:  NewCounter(meta),
+// 		metricCounter: NewCounter(meta),
+// 	}
+//
+// 	UsageStats.Register(cr.usageCounter)
+// 	MetricStats.Register(cr.metricCounter)
+//
+// 	return cr
+// }
+//
+// func (c comboCounterRef) Clear() {
+// 	c.usageCounter.Clear()
+// 	c.metricCounter.Clear()
+// }
+//
+// func (c comboCounterRef) Count() int64 {
+// 	panic("Count called on a combocounter ref")
+// }
+//
+// // Dec panics.
+// func (c comboCounterRef) Dec(i int64) {
+// 	c.usageCounter.Dec(i)
+// 	c.metricCounter.Dec(i)
+// }
+//
+// // Inc panics.
+// func (c comboCounterRef) Inc(i int64) {
+// 	c.usageCounter.Inc(i)
+// 	c.metricCounter.Inc(i)
+// }
+//
+// func (c comboCounterRef) Snapshot() Metric {
+// 	return c.metricCounter.Snapshot()
+// }

+ 61 - 0
pkg/metrics/common.go

@@ -0,0 +1,61 @@
+package metrics
+
+import "github.com/grafana/grafana/pkg/log"
+
+type MetricMeta struct {
+	tags map[string]string
+	name string
+}
+
+func NewMetricMeta(name string, tagStrings []string) *MetricMeta {
+	if len(tagStrings)%2 != 0 {
+		log.Fatal(3, "Metrics: tags array is missing value for key, %v", tagStrings)
+	}
+
+	tags := make(map[string]string)
+	for i := 0; i < len(tagStrings); i += 2 {
+		tags[tagStrings[i]] = tagStrings[i+1]
+	}
+
+	return &MetricMeta{
+		tags: tags,
+		name: name,
+	}
+}
+
+func (m *MetricMeta) Name() string {
+	return m.name
+}
+
+func (m *MetricMeta) GetTagsCopy() map[string]string {
+	if len(m.tags) == 0 {
+		return make(map[string]string)
+	}
+
+	copy := make(map[string]string)
+	for k2, v2 := range m.tags {
+		copy[k2] = v2
+	}
+
+	return copy
+}
+
+func (m *MetricMeta) StringifyTags() string {
+	if len(m.tags) == 0 {
+		return ""
+	}
+
+	str := ""
+	for key, value := range m.tags {
+		str += "." + key + "_" + value
+	}
+
+	return str
+}
+
+type Metric interface {
+	Name() string
+	GetTagsCopy() map[string]string
+	StringifyTags() string
+	Snapshot() Metric
+}

+ 18 - 28
pkg/metrics/counter.go

@@ -4,45 +4,33 @@ import "sync/atomic"
 
 // Counters hold an int64 value that can be incremented and decremented.
 type Counter interface {
+	Metric
+
 	Clear()
 	Count() int64
 	Dec(int64)
 	Inc(int64)
-	Snapshot() Counter
 }
 
 // NewCounter constructs a new StandardCounter.
-func NewCounter() Counter {
-	return &StandardCounter{0}
+func NewCounter(meta *MetricMeta) Counter {
+	return &StandardCounter{
+		MetricMeta: meta,
+		count:      0,
+	}
 }
 
-// CounterSnapshot is a read-only copy of another Counter.
-type CounterSnapshot int64
-
-// Clear panics.
-func (CounterSnapshot) Clear() {
-	panic("Clear called on a CounterSnapshot")
-}
-
-// Count returns the count at the time the snapshot was taken.
-func (c CounterSnapshot) Count() int64 { return int64(c) }
-
-// Dec panics.
-func (CounterSnapshot) Dec(int64) {
-	panic("Dec called on a CounterSnapshot")
+func RegCounter(name string, tagStrings ...string) Counter {
+	cr := NewCounter(NewMetricMeta(name, tagStrings))
+	MetricStats.Register(cr)
+	return cr
 }
 
-// Inc panics.
-func (CounterSnapshot) Inc(int64) {
-	panic("Inc called on a CounterSnapshot")
-}
-
-// Snapshot returns the snapshot.
-func (c CounterSnapshot) Snapshot() Counter { return c }
-
 // StandardCounter is the standard implementation of a Counter and uses the
 // sync/atomic package to manage a single int64 value.
 type StandardCounter struct {
+	*MetricMeta
+
 	count int64
 }
 
@@ -66,7 +54,9 @@ func (c *StandardCounter) Inc(i int64) {
 	atomic.AddInt64(&c.count, i)
 }
 
-// Snapshot returns a read-only copy of the counter.
-func (c *StandardCounter) Snapshot() Counter {
-	return CounterSnapshot(c.Count())
+func (c *StandardCounter) Snapshot() Metric {
+	return &StandardCounter{
+		MetricMeta: c.MetricMeta,
+		count:      c.count,
+	}
 }

+ 11 - 0
pkg/metrics/delta.go

@@ -0,0 +1,11 @@
+package metrics
+
+import "math"
+
+func calculateDelta(oldValue, newValue int64) int64 {
+	if oldValue < newValue {
+		return newValue - oldValue
+	} else {
+		return (math.MaxInt64 - oldValue) + (newValue - math.MinInt64) + 1
+	}
+}

+ 82 - 0
pkg/metrics/gauge.go

@@ -0,0 +1,82 @@
+// includes code from
+// https://raw.githubusercontent.com/rcrowley/go-metrics/master/sample.go
+// Copyright 2012 Richard Crowley. All rights reserved.
+
+package metrics
+
+import "sync/atomic"
+
+// Gauges hold an int64 value that can be set arbitrarily.
+type Gauge interface {
+	Metric
+
+	Update(int64)
+	Value() int64
+}
+
+func NewGauge(meta *MetricMeta) Gauge {
+	if UseNilMetrics {
+		return NilGauge{}
+	}
+	return &StandardGauge{
+		MetricMeta: meta,
+		value:      0,
+	}
+}
+
+func RegGauge(meta *MetricMeta) Gauge {
+	g := NewGauge(meta)
+	MetricStats.Register(g)
+	return g
+}
+
+// GaugeSnapshot is a read-only copy of another Gauge.
+type GaugeSnapshot struct {
+	*MetricMeta
+	value int64
+}
+
+// Snapshot returns the snapshot.
+func (g GaugeSnapshot) Snapshot() Metric { return g }
+
+// Update panics.
+func (GaugeSnapshot) Update(int64) {
+	panic("Update called on a GaugeSnapshot")
+}
+
+// Value returns the value at the time the snapshot was taken.
+func (g GaugeSnapshot) Value() int64 { return g.value }
+
+// NilGauge is a no-op Gauge.
+type NilGauge struct{ *MetricMeta }
+
+// Snapshot is a no-op.
+func (NilGauge) Snapshot() Metric { return NilGauge{} }
+
+// Update is a no-op.
+func (NilGauge) Update(v int64) {}
+
+// Value is a no-op.
+func (NilGauge) Value() int64 { return 0 }
+
+// StandardGauge is the standard implementation of a Gauge and uses the
+// sync/atomic package to manage a single int64 value.
+type StandardGauge struct {
+	*MetricMeta
+	value int64
+}
+
+// Snapshot returns a read-only copy of the gauge.
+func (g *StandardGauge) Snapshot() Metric {
+	return GaugeSnapshot{MetricMeta: g.MetricMeta, value: g.value}
+}
+
+// Update updates the gauge's value.
+func (g *StandardGauge) Update(v int64) {
+	atomic.StoreInt64(&g.value, v)
+}
+
+// Value returns the gauge's current value.
+func (g *StandardGauge) Value() int64 {
+	return atomic.LoadInt64(&g.value)
+}

+ 91 - 0
pkg/metrics/graphite.go

@@ -0,0 +1,91 @@
+package metrics
+
+import (
+	"bytes"
+	"fmt"
+	"net"
+	"time"
+
+	"github.com/grafana/grafana/pkg/log"
+	"github.com/grafana/grafana/pkg/setting"
+)
+
+type GraphitePublisher struct {
+	address    string
+	protocol   string
+	prefix     string
+	prevCounts map[string]int64
+}
+
+func CreateGraphitePublisher() (*GraphitePublisher, error) {
+	graphiteSection, err := setting.Cfg.GetSection("metrics.graphite")
+	if err != nil {
+		return nil, nil
+	}
+
+	publisher := &GraphitePublisher{}
+	publisher.prevCounts = make(map[string]int64)
+	publisher.protocol = "tcp"
+	publisher.address = graphiteSection.Key("address").MustString("localhost:2003")
+	publisher.prefix = graphiteSection.Key("prefix").MustString("service.grafana.%(instance_name)s")
+
+	return publisher, nil
+}
+
+func (this *GraphitePublisher) Publish(metrics []Metric) {
+	conn, err := net.DialTimeout(this.protocol, this.address, time.Second*5)
+
+	if err != nil {
+		log.Error(3, "Metrics: GraphitePublisher:  Failed to connect to %s!", err)
+		return
+	}
+
+	buf := bytes.NewBufferString("")
+	now := time.Now().Unix()
+
+	for _, m := range metrics {
+		metricName := this.prefix + m.Name() + m.StringifyTags()
+
+		switch metric := m.(type) {
+		case Counter:
+			this.addCount(buf, metricName+".count", metric.Count(), now)
+		case Timer:
+			percentiles := metric.Percentiles([]float64{0.25, 0.75, 0.90, 0.99})
+			this.addCount(buf, metricName+".count", metric.Count(), now)
+			this.addInt(buf, metricName+".max", metric.Max(), now)
+			this.addInt(buf, metricName+".min", metric.Min(), now)
+			this.addFloat(buf, metricName+".mean", metric.Mean(), now)
+			this.addFloat(buf, metricName+".std", metric.StdDev(), now)
+			this.addFloat(buf, metricName+".p25", percentiles[0], now)
+			this.addFloat(buf, metricName+".p75", percentiles[1], now)
+			this.addFloat(buf, metricName+".p90", percentiles[2], now)
+			this.addFloat(buf, metricName+".p99", percentiles[3], now)
+		}
+	}
+
+	log.Trace("Metrics: GraphitePublisher.Publish() \n%s", buf)
+	_, err = conn.Write(buf.Bytes())
+
+	if err != nil {
+		log.Error(3, "Metrics: GraphitePublisher: Failed to send metrics! %s", err)
+	}
+}
+
+func (this *GraphitePublisher) addInt(buf *bytes.Buffer, metric string, value int64, now int64) {
+	buf.WriteString(fmt.Sprintf("%s %d %d\n", metric, value, now))
+}
+
+func (this *GraphitePublisher) addFloat(buf *bytes.Buffer, metric string, value float64, now int64) {
+	buf.WriteString(fmt.Sprintf("%s %f %d\n", metric, value, now))
+}
+
+func (this *GraphitePublisher) addCount(buf *bytes.Buffer, metric string, value int64, now int64) {
+	delta := value
+
+	if last, ok := this.prevCounts[metric]; ok {
+		delta = calculateDelta(last, value)
+	}
+
+	this.prevCounts[metric] = value
+	buf.WriteString(fmt.Sprintf("%s %d %d\n", metric, delta, now))
+}

+ 189 - 0
pkg/metrics/histogram.go

@@ -0,0 +1,189 @@
+// includes code from
+// https://raw.githubusercontent.com/rcrowley/go-metrics/master/sample.go
+// Copyright 2012 Richard Crowley. All rights reserved.
+
+package metrics
+
+// Histograms calculate distribution statistics from a series of int64 values.
+type Histogram interface {
+	Metric
+
+	Clear()
+	Count() int64
+	Max() int64
+	Mean() float64
+	Min() int64
+	Percentile(float64) float64
+	Percentiles([]float64) []float64
+	StdDev() float64
+	Sum() int64
+	Update(int64)
+	Variance() float64
+}
+
+func NewHistogram(meta *MetricMeta, s Sample) Histogram {
+	return &StandardHistogram{
+		MetricMeta: meta,
+		sample:     s,
+	}
+}
+
+// HistogramSnapshot is a read-only copy of another Histogram.
+type HistogramSnapshot struct {
+	*MetricMeta
+	sample *SampleSnapshot
+}
+
+// Clear panics.
+func (*HistogramSnapshot) Clear() {
+	panic("Clear called on a HistogramSnapshot")
+}
+
+// Count returns the number of samples recorded at the time the snapshot was
+// taken.
+func (h *HistogramSnapshot) Count() int64 { return h.sample.Count() }
+
+// Max returns the maximum value in the sample at the time the snapshot was
+// taken.
+func (h *HistogramSnapshot) Max() int64 { return h.sample.Max() }
+
+// Mean returns the mean of the values in the sample at the time the snapshot
+// was taken.
+func (h *HistogramSnapshot) Mean() float64 { return h.sample.Mean() }
+
+// Min returns the minimum value in the sample at the time the snapshot was
+// taken.
+func (h *HistogramSnapshot) Min() int64 { return h.sample.Min() }
+
+// Percentile returns an arbitrary percentile of values in the sample at the
+// time the snapshot was taken.
+func (h *HistogramSnapshot) Percentile(p float64) float64 {
+	return h.sample.Percentile(p)
+}
+
+// Percentiles returns a slice of arbitrary percentiles of values in the sample
+// at the time the snapshot was taken.
+func (h *HistogramSnapshot) Percentiles(ps []float64) []float64 {
+	return h.sample.Percentiles(ps)
+}
+
+// Sample returns the Sample underlying the histogram.
+func (h *HistogramSnapshot) Sample() Sample { return h.sample }
+
+// Snapshot returns the snapshot.
+func (h *HistogramSnapshot) Snapshot() Metric { return h }
+
+// StdDev returns the standard deviation of the values in the sample at the
+// time the snapshot was taken.
+func (h *HistogramSnapshot) StdDev() float64 { return h.sample.StdDev() }
+
+// Sum returns the sum in the sample at the time the snapshot was taken.
+func (h *HistogramSnapshot) Sum() int64 { return h.sample.Sum() }
+
+// Update panics.
+func (*HistogramSnapshot) Update(int64) {
+	panic("Update called on a HistogramSnapshot")
+}
+
+// Variance returns the variance of inputs at the time the snapshot was taken.
+func (h *HistogramSnapshot) Variance() float64 { return h.sample.Variance() }
+
+// NilHistogram is a no-op Histogram.
+type NilHistogram struct {
+	*MetricMeta
+}
+
+// Clear is a no-op.
+func (NilHistogram) Clear() {}
+
+// Count is a no-op.
+func (NilHistogram) Count() int64 { return 0 }
+
+// Max is a no-op.
+func (NilHistogram) Max() int64 { return 0 }
+
+// Mean is a no-op.
+func (NilHistogram) Mean() float64 { return 0.0 }
+
+// Min is a no-op.
+func (NilHistogram) Min() int64 { return 0 }
+
+// Percentile is a no-op.
+func (NilHistogram) Percentile(p float64) float64 { return 0.0 }
+
+// Percentiles is a no-op.
+func (NilHistogram) Percentiles(ps []float64) []float64 {
+	return make([]float64, len(ps))
+}
+
+// Sample is a no-op.
+func (NilHistogram) Sample() Sample { return NilSample{} }
+
+// Snapshot is a no-op.
+func (n NilHistogram) Snapshot() Metric { return n }
+
+// StdDev is a no-op.
+func (NilHistogram) StdDev() float64 { return 0.0 }
+
+// Sum is a no-op.
+func (NilHistogram) Sum() int64 { return 0 }
+
+// Update is a no-op.
+func (NilHistogram) Update(v int64) {}
+
+// Variance is a no-op.
+func (NilHistogram) Variance() float64 { return 0.0 }
+
+// StandardHistogram is the standard implementation of a Histogram and uses a
+// Sample to bound its memory use.
+type StandardHistogram struct {
+	*MetricMeta
+	sample Sample
+}
+
+// Clear clears the histogram and its sample.
+func (h *StandardHistogram) Clear() { h.sample.Clear() }
+
+// Count returns the number of samples recorded since the histogram was last
+// cleared.
+func (h *StandardHistogram) Count() int64 { return h.sample.Count() }
+
+// Max returns the maximum value in the sample.
+func (h *StandardHistogram) Max() int64 { return h.sample.Max() }
+
+// Mean returns the mean of the values in the sample.
+func (h *StandardHistogram) Mean() float64 { return h.sample.Mean() }
+
+// Min returns the minimum value in the sample.
+func (h *StandardHistogram) Min() int64 { return h.sample.Min() }
+
+// Percentile returns an arbitrary percentile of the values in the sample.
+func (h *StandardHistogram) Percentile(p float64) float64 {
+	return h.sample.Percentile(p)
+}
+
+// Percentiles returns a slice of arbitrary percentiles of the values in the
+// sample.
+func (h *StandardHistogram) Percentiles(ps []float64) []float64 {
+	return h.sample.Percentiles(ps)
+}
+
+// Sample returns the Sample underlying the histogram.
+func (h *StandardHistogram) Sample() Sample { return h.sample }
+
+// Snapshot returns a read-only copy of the histogram.
+func (h *StandardHistogram) Snapshot() Metric {
+	return &HistogramSnapshot{sample: h.sample.Snapshot().(*SampleSnapshot)}
+}
+
+// StdDev returns the standard deviation of the values in the sample.
+func (h *StandardHistogram) StdDev() float64 { return h.sample.StdDev() }
+
+// Sum returns the sum in the sample.
+func (h *StandardHistogram) Sum() int64 { return h.sample.Sum() }
+
+// Update samples a new value.
+func (h *StandardHistogram) Update(v int64) { h.sample.Update(v) }
+
+// Variance returns the variance of the values in the sample.
+func (h *StandardHistogram) Variance() float64 { return h.sample.Variance() }

+ 90 - 0
pkg/metrics/histogram_test.go

@@ -0,0 +1,90 @@
+// includes code from
+// https://raw.githubusercontent.com/rcrowley/go-metrics/master/sample.go
+// Copyright 2012 Richard Crowley. All rights reserved.
+
+package metrics
+
+import "testing"
+
+func BenchmarkHistogram(b *testing.B) {
+	h := NewHistogram(nil, NewUniformSample(100))
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		h.Update(int64(i))
+	}
+}
+
+func TestHistogram10000(t *testing.T) {
+	h := NewHistogram(nil, NewUniformSample(100000))
+	for i := 1; i <= 10000; i++ {
+		h.Update(int64(i))
+	}
+	testHistogram10000(t, h)
+}
+
+func TestHistogramEmpty(t *testing.T) {
+	h := NewHistogram(nil, NewUniformSample(100))
+	if count := h.Count(); 0 != count {
+		t.Errorf("h.Count(): 0 != %v\n", count)
+	}
+	if min := h.Min(); 0 != min {
+		t.Errorf("h.Min(): 0 != %v\n", min)
+	}
+	if max := h.Max(); 0 != max {
+		t.Errorf("h.Max(): 0 != %v\n", max)
+	}
+	if mean := h.Mean(); 0.0 != mean {
+		t.Errorf("h.Mean(): 0.0 != %v\n", mean)
+	}
+	if stdDev := h.StdDev(); 0.0 != stdDev {
+		t.Errorf("h.StdDev(): 0.0 != %v\n", stdDev)
+	}
+	ps := h.Percentiles([]float64{0.5, 0.75, 0.99})
+	if 0.0 != ps[0] {
+		t.Errorf("median: 0.0 != %v\n", ps[0])
+	}
+	if 0.0 != ps[1] {
+		t.Errorf("75th percentile: 0.0 != %v\n", ps[1])
+	}
+	if 0.0 != ps[2] {
+		t.Errorf("99th percentile: 0.0 != %v\n", ps[2])
+	}
+}
+
+func TestHistogramSnapshot(t *testing.T) {
+	h := NewHistogram(nil, NewUniformSample(100000))
+	for i := 1; i <= 10000; i++ {
+		h.Update(int64(i))
+	}
+	snapshot := h.Snapshot().(Histogram)
+	h.Update(0)
+	testHistogram10000(t, snapshot)
+}
+
+func testHistogram10000(t *testing.T, h Histogram) {
+	if count := h.Count(); 10000 != count {
+		t.Errorf("h.Count(): 10000 != %v\n", count)
+	}
+	if min := h.Min(); 1 != min {
+		t.Errorf("h.Min(): 1 != %v\n", min)
+	}
+	if max := h.Max(); 10000 != max {
+		t.Errorf("h.Max(): 10000 != %v\n", max)
+	}
+	if mean := h.Mean(); 5000.5 != mean {
+		t.Errorf("h.Mean(): 5000.5 != %v\n", mean)
+	}
+	if stdDev := h.StdDev(); 2886.751331514372 != stdDev {
+		t.Errorf("h.StdDev(): 2886.751331514372 != %v\n", stdDev)
+	}
+	ps := h.Percentiles([]float64{0.5, 0.75, 0.99})
+	if 5000.5 != ps[0] {
+		t.Errorf("median: 5000.5 != %v\n", ps[0])
+	}
+	if 7500.75 != ps[1] {
+		t.Errorf("75th percentile: 7500.75 != %v\n", ps[1])
+	}
+	if 9900.99 != ps[2] {
+		t.Errorf("99th percentile: 9900.99 != %v\n", ps[2])
+	}
+}

+ 221 - 0
pkg/metrics/meter.go

@@ -0,0 +1,221 @@
+// includes code from
+// https://raw.githubusercontent.com/rcrowley/go-metrics/master/sample.go
+// Copyright 2012 Richard Crowley. All rights reserved.
+
+package metrics
+
+import (
+	"sync"
+	"time"
+)
+
+// Meters count events to produce exponentially-weighted moving average rates
+// at one-, five-, and fifteen-minutes and a mean rate.
+type Meter interface {
+	Metric
+
+	Count() int64
+	Mark(int64)
+	Rate1() float64
+	Rate5() float64
+	Rate15() float64
+	RateMean() float64
+}
+
+// NewMeter constructs a new StandardMeter and launches a goroutine.
+func NewMeter(meta *MetricMeta) Meter {
+	if UseNilMetrics {
+		return NilMeter{}
+	}
+
+	m := newStandardMeter(meta)
+	arbiter.Lock()
+	defer arbiter.Unlock()
+	arbiter.meters = append(arbiter.meters, m)
+	if !arbiter.started {
+		arbiter.started = true
+		go arbiter.tick()
+	}
+	return m
+}
+
+type MeterSnapshot struct {
+	*MetricMeta
+	count                          int64
+	rate1, rate5, rate15, rateMean float64
+}
+
+// Count returns the count of events at the time the snapshot was taken.
+func (m *MeterSnapshot) Count() int64 { return m.count }
+
+// Mark panics.
+func (*MeterSnapshot) Mark(n int64) {
+	panic("Mark called on a MeterSnapshot")
+}
+
+// Rate1 returns the one-minute moving average rate of events per second at the
+// time the snapshot was taken.
+func (m *MeterSnapshot) Rate1() float64 { return m.rate1 }
+
+// Rate5 returns the five-minute moving average rate of events per second at
+// the time the snapshot was taken.
+func (m *MeterSnapshot) Rate5() float64 { return m.rate5 }
+
+// Rate15 returns the fifteen-minute moving average rate of events per second
+// at the time the snapshot was taken.
+func (m *MeterSnapshot) Rate15() float64 { return m.rate15 }
+
+// RateMean returns the meter's mean rate of events per second at the time the
+// snapshot was taken.
+func (m *MeterSnapshot) RateMean() float64 { return m.rateMean }
+
+// Snapshot returns the snapshot.
+func (m *MeterSnapshot) Snapshot() Metric { return m }
+
+// NilMeter is a no-op Meter.
+type NilMeter struct{ *MetricMeta }
+
+// Count is a no-op.
+func (NilMeter) Count() int64 { return 0 }
+
+// Mark is a no-op.
+func (NilMeter) Mark(n int64) {}
+
+// Rate1 is a no-op.
+func (NilMeter) Rate1() float64 { return 0.0 }
+
+// Rate5 is a no-op.
+func (NilMeter) Rate5() float64 { return 0.0 }
+
+// Rate15is a no-op.
+func (NilMeter) Rate15() float64 { return 0.0 }
+
+// RateMean is a no-op.
+func (NilMeter) RateMean() float64 { return 0.0 }
+
+// Snapshot is a no-op.
+func (NilMeter) Snapshot() Metric { return NilMeter{} }
+
+// StandardMeter is the standard implementation of a Meter.
+type StandardMeter struct {
+	*MetricMeta
+	lock        sync.RWMutex
+	snapshot    *MeterSnapshot
+	a1, a5, a15 EWMA
+	startTime   time.Time
+}
+
+func newStandardMeter(meta *MetricMeta) *StandardMeter {
+	return &StandardMeter{
+		MetricMeta: meta,
+		snapshot:   &MeterSnapshot{MetricMeta: meta},
+		a1:         NewEWMA1(),
+		a5:         NewEWMA5(),
+		a15:        NewEWMA15(),
+		startTime:  time.Now(),
+	}
+}
+
+// Count returns the number of events recorded.
+func (m *StandardMeter) Count() int64 {
+	m.lock.RLock()
+	count := m.snapshot.count
+	m.lock.RUnlock()
+	return count
+}
+
+// Mark records the occurance of n events.
+func (m *StandardMeter) Mark(n int64) {
+	m.lock.Lock()
+	defer m.lock.Unlock()
+	m.snapshot.count += n
+	m.a1.Update(n)
+	m.a5.Update(n)
+	m.a15.Update(n)
+	m.updateSnapshot()
+}
+
+// Rate1 returns the one-minute moving average rate of events per second.
+func (m *StandardMeter) Rate1() float64 {
+	m.lock.RLock()
+	rate1 := m.snapshot.rate1
+	m.lock.RUnlock()
+	return rate1
+}
+
+// Rate5 returns the five-minute moving average rate of events per second.
+func (m *StandardMeter) Rate5() float64 {
+	m.lock.RLock()
+	rate5 := m.snapshot.rate5
+	m.lock.RUnlock()
+	return rate5
+}
+
+// Rate15 returns the fifteen-minute moving average rate of events per second.
+func (m *StandardMeter) Rate15() float64 {
+	m.lock.RLock()
+	rate15 := m.snapshot.rate15
+	m.lock.RUnlock()
+	return rate15
+}
+
+// RateMean returns the meter's mean rate of events per second.
+func (m *StandardMeter) RateMean() float64 {
+	m.lock.RLock()
+	rateMean := m.snapshot.rateMean
+	m.lock.RUnlock()
+	return rateMean
+}
+
+// Snapshot returns a read-only copy of the meter.
+func (m *StandardMeter) Snapshot() Metric {
+	m.lock.RLock()
+	snapshot := *m.snapshot
+	m.lock.RUnlock()
+	return &snapshot
+}
+
+func (m *StandardMeter) updateSnapshot() {
+	// should run with write lock held on m.lock
+	snapshot := m.snapshot
+	snapshot.rate1 = m.a1.Rate()
+	snapshot.rate5 = m.a5.Rate()
+	snapshot.rate15 = m.a15.Rate()
+	snapshot.rateMean = float64(snapshot.count) / time.Since(m.startTime).Seconds()
+}
+
+func (m *StandardMeter) tick() {
+	m.lock.Lock()
+	defer m.lock.Unlock()
+	m.a1.Tick()
+	m.a5.Tick()
+	m.a15.Tick()
+	m.updateSnapshot()
+}
+
+type meterArbiter struct {
+	sync.RWMutex
+	started bool
+	meters  []*StandardMeter
+	ticker  *time.Ticker
+}
+
+var arbiter = meterArbiter{ticker: time.NewTicker(5e9)}
+
+// Ticks meters on the scheduled interval
+func (ma *meterArbiter) tick() {
+	for {
+		select {
+		case <-ma.ticker.C:
+			ma.tickMeters()
+		}
+	}
+}
+
+func (ma *meterArbiter) tickMeters() {
+	ma.RLock()
+	defer ma.RUnlock()
+	for _, meter := range ma.meters {
+		meter.tick()
+	}
+}

+ 0 - 39
pkg/metrics/metric_ref.go

@@ -1,39 +0,0 @@
-package metrics
-
-type comboCounterRef struct {
-	usageCounter  Counter
-	metricCounter Counter
-}
-
-func NewComboCounterRef(name string) Counter {
-	cr := &comboCounterRef{}
-	cr.usageCounter = UsageStats.GetOrRegister(name, NewCounter).(Counter)
-	cr.metricCounter = MetricStats.GetOrRegister(name, NewCounter).(Counter)
-	return cr
-}
-
-func (c comboCounterRef) Clear() {
-	c.usageCounter.Clear()
-	c.metricCounter.Clear()
-}
-
-func (c comboCounterRef) Count() int64 {
-	panic("Count called on a combocounter ref")
-}
-
-// Dec panics.
-func (c comboCounterRef) Dec(i int64) {
-	c.usageCounter.Dec(i)
-	c.metricCounter.Dec(i)
-}
-
-// Inc panics.
-func (c comboCounterRef) Inc(i int64) {
-	c.usageCounter.Inc(i)
-	c.metricCounter.Inc(i)
-}
-
-// Snapshot returns the snapshot.
-func (c comboCounterRef) Snapshot() Counter {
-	panic("snapshot called on a combocounter ref")
-}

+ 66 - 26
pkg/metrics/metrics.go

@@ -1,31 +1,71 @@
 package metrics
 
-var UsageStats = NewRegistry()
-var MetricStats = NewRegistry()
+var MetricStats Registry
+var UseNilMetrics bool
+
+func init() {
+	// init with nil metrics
+	initMetricVars(&MetricSettings{})
+}
 
 var (
-	M_Instance_Start = NewComboCounterRef("instance.start")
-
-	M_Page_Status_200 = NewComboCounterRef("page.status.200")
-	M_Page_Status_500 = NewComboCounterRef("page.status.500")
-	M_Page_Status_404 = NewComboCounterRef("page.status.404")
-
-	M_Api_Status_500 = NewComboCounterRef("api.status.500")
-	M_Api_Status_404 = NewComboCounterRef("api.status.404")
-
-	M_Api_User_SignUpStarted   = NewComboCounterRef("api.user.signup_started")
-	M_Api_User_SignUpCompleted = NewComboCounterRef("api.user.signup_completed")
-	M_Api_User_SignUpInvite    = NewComboCounterRef("api.user.signup_invite")
-	M_Api_Dashboard_Get        = NewComboCounterRef("api.dashboard.get")
-	M_Api_Dashboard_Post       = NewComboCounterRef("api.dashboard.post")
-	M_Api_Admin_User_Create    = NewComboCounterRef("api.admin.user_create")
-	M_Api_Login_Post           = NewComboCounterRef("api.login.post")
-	M_Api_Login_OAuth          = NewComboCounterRef("api.login.oauth")
-	M_Api_Org_Create           = NewComboCounterRef("api.org.create")
-
-	M_Api_Dashboard_Snapshot_Create   = NewComboCounterRef("api.dashboard_snapshot.create")
-	M_Api_Dashboard_Snapshot_External = NewComboCounterRef("api.dashboard_snapshot.external")
-	M_Api_Dashboard_Snapshot_Get      = NewComboCounterRef("api.dashboard_snapshot.get")
-
-	M_Models_Dashboard_Insert = NewComboCounterRef("models.dashboard.insert")
+	M_Instance_Start                  Counter
+	M_Page_Status_200                 Counter
+	M_Page_Status_500                 Counter
+	M_Page_Status_404                 Counter
+	M_Api_Status_500                  Counter
+	M_Api_Status_404                  Counter
+	M_Api_User_SignUpStarted          Counter
+	M_Api_User_SignUpCompleted        Counter
+	M_Api_User_SignUpInvite           Counter
+	M_Api_Dashboard_Save              Timer
+	M_Api_Dashboard_Get               Timer
+	M_Api_Dashboard_Search            Timer
+	M_Api_Admin_User_Create           Counter
+	M_Api_Login_Post                  Counter
+	M_Api_Login_OAuth                 Counter
+	M_Api_Org_Create                  Counter
+	M_Api_Dashboard_Snapshot_Create   Counter
+	M_Api_Dashboard_Snapshot_External Counter
+	M_Api_Dashboard_Snapshot_Get      Counter
+	M_Models_Dashboard_Insert         Counter
+
+	// Timers
+	M_DataSource_ProxyReq_Timer Timer
 )
+
+func initMetricVars(settings *MetricSettings) {
+	UseNilMetrics = settings.Enabled == false
+	MetricStats = NewRegistry()
+
+	M_Instance_Start = RegCounter("instance_start")
+
+	M_Page_Status_200 = RegCounter("page.resp_status", "code", "200")
+	M_Page_Status_500 = RegCounter("page.resp_status", "code", "500")
+	M_Page_Status_404 = RegCounter("page.resp_status", "code", "404")
+
+	M_Api_Status_500 = RegCounter("api.resp_status", "code", "500")
+	M_Api_Status_404 = RegCounter("api.resp_status", "code", "404")
+
+	M_Api_User_SignUpStarted = RegCounter("api.user.signup_started")
+	M_Api_User_SignUpCompleted = RegCounter("api.user.signup_completed")
+	M_Api_User_SignUpInvite = RegCounter("api.user.signup_invite")
+
+	M_Api_Dashboard_Save = RegTimer("api.dashboard.save")
+	M_Api_Dashboard_Get = RegTimer("api.dashboard.get")
+	M_Api_Dashboard_Search = RegTimer("api.dashboard.search")
+
+	M_Api_Admin_User_Create = RegCounter("api.admin.user_create")
+	M_Api_Login_Post = RegCounter("api.login.post")
+	M_Api_Login_OAuth = RegCounter("api.login.oauth")
+	M_Api_Org_Create = RegCounter("api.org.create")
+
+	M_Api_Dashboard_Snapshot_Create = RegCounter("api.dashboard_snapshot.create")
+	M_Api_Dashboard_Snapshot_External = RegCounter("api.dashboard_snapshot.external")
+	M_Api_Dashboard_Snapshot_Get = RegCounter("api.dashboard_snapshot.get")
+
+	M_Models_Dashboard_Insert = RegCounter("models.dashboard.insert")
+
+	// Timers
+	M_DataSource_ProxyReq_Timer = RegTimer("api.dataproxy.request.all")
+}

+ 27 - 13
pkg/metrics/report_usage.go → pkg/metrics/publish.go

@@ -14,19 +14,43 @@ import (
 	"github.com/grafana/grafana/pkg/setting"
 )
 
-func StartUsageReportLoop() chan struct{} {
+func Init() {
+	settings := readSettings()
+	initMetricVars(settings)
+	go instrumentationLoop(settings)
+}
+
+func instrumentationLoop(settings *MetricSettings) chan struct{} {
 	M_Instance_Start.Inc(1)
 
-	ticker := time.NewTicker(time.Hour * 24)
+	onceEveryDayTick := time.NewTicker(time.Hour * 24)
+	secondTicker := time.NewTicker(time.Second * time.Duration(settings.IntervalSeconds))
+
 	for {
 		select {
-		case <-ticker.C:
+		case <-onceEveryDayTick.C:
 			sendUsageStats()
+		case <-secondTicker.C:
+			if settings.Enabled {
+				sendMetrics(settings)
+			}
 		}
 	}
 }
 
+func sendMetrics(settings *MetricSettings) {
+	metrics := MetricStats.GetSnapshots()
+
+	for _, publisher := range settings.Publishers {
+		publisher.Publish(metrics)
+	}
+}
+
 func sendUsageStats() {
+	if !setting.ReportingEnabled {
+		return
+	}
+
 	log.Trace("Sending anonymous usage stats to stats.grafana.org")
 
 	version := strings.Replace(setting.BuildVersion, ".", "_", -1)
@@ -37,16 +61,6 @@ func sendUsageStats() {
 		"metrics": metrics,
 	}
 
-	UsageStats.Each(func(name string, i interface{}) {
-		switch metric := i.(type) {
-		case Counter:
-			if metric.Count() > 0 {
-				metrics[name+".count"] = metric.Count()
-				metric.Clear()
-			}
-		}
-	})
-
 	statsQuery := m.GetSystemStatsQuery{}
 	if err := bus.Dispatch(&statsQuery); err != nil {
 		log.Error(3, "Failed to get system stats", err)

+ 13 - 78
pkg/metrics/registry.go

@@ -1,102 +1,37 @@
 package metrics
 
-import (
-	"fmt"
-	"reflect"
-	"sync"
-)
-
-// DuplicateMetric is the error returned by Registry.Register when a metric
-// already exists.  If you mean to Register that metric you must first
-// Unregister the existing metric.
-type DuplicateMetric string
-
-func (err DuplicateMetric) Error() string {
-	return fmt.Sprintf("duplicate metric: %s", string(err))
-}
+import "sync"
 
 type Registry interface {
-	// Call the given function for each registered metric.
-	Each(func(string, interface{}))
-
-	// Get the metric by the given name or nil if none is registered.
-	Get(string) interface{}
-
-	// Gets an existing metric or registers the given one.
-	// The interface can be the metric to register if not found in registry,
-	// or a function returning the metric for lazy instantiation.
-	GetOrRegister(string, interface{}) interface{}
-
-	// Register the given metric under the given name.
-	Register(string, interface{}) error
+	GetSnapshots() []Metric
+	Register(metric Metric)
 }
 
 // The standard implementation of a Registry is a mutex-protected map
 // of names to metrics.
 type StandardRegistry struct {
-	metrics map[string]interface{}
+	metrics []Metric
 	mutex   sync.Mutex
 }
 
 // Create a new registry.
 func NewRegistry() Registry {
-	return &StandardRegistry{metrics: make(map[string]interface{})}
-}
-
-// Call the given function for each registered metric.
-func (r *StandardRegistry) Each(f func(string, interface{})) {
-	for name, i := range r.registered() {
-		f(name, i)
-	}
-}
-
-// Get the metric by the given name or nil if none is registered.
-func (r *StandardRegistry) Get(name string) interface{} {
-	r.mutex.Lock()
-	defer r.mutex.Unlock()
-	return r.metrics[name]
-}
-
-// Gets an existing metric or creates and registers a new one. Threadsafe
-// alternative to calling Get and Register on failure.
-// The interface can be the metric to register if not found in registry,
-// or a function returning the metric for lazy instantiation.
-func (r *StandardRegistry) GetOrRegister(name string, i interface{}) interface{} {
-	r.mutex.Lock()
-	defer r.mutex.Unlock()
-	if metric, ok := r.metrics[name]; ok {
-		return metric
-	}
-	if v := reflect.ValueOf(i); v.Kind() == reflect.Func {
-		i = v.Call(nil)[0].Interface()
+	return &StandardRegistry{
+		metrics: make([]Metric, 0),
 	}
-	r.register(name, i)
-	return i
 }
 
-// Register the given metric under the given name.  Returns a DuplicateMetric
-// if a metric by the given name is already registered.
-func (r *StandardRegistry) Register(name string, i interface{}) error {
+func (r *StandardRegistry) Register(metric Metric) {
 	r.mutex.Lock()
 	defer r.mutex.Unlock()
-	return r.register(name, i)
+	r.metrics = append(r.metrics, metric)
 }
 
-func (r *StandardRegistry) register(name string, i interface{}) error {
-	if _, ok := r.metrics[name]; ok {
-		return DuplicateMetric(name)
-	}
-
-	r.metrics[name] = i
-	return nil
-}
-
-func (r *StandardRegistry) registered() map[string]interface{} {
-	metrics := make(map[string]interface{}, len(r.metrics))
-	r.mutex.Lock()
-	defer r.mutex.Unlock()
-	for name, i := range r.metrics {
-		metrics[name] = i
+// Call the given function for each registered metric.
+func (r *StandardRegistry) GetSnapshots() []Metric {
+	metrics := make([]Metric, len(r.metrics))
+	for i, metric := range r.metrics {
+		metrics[i] = metric.Snapshot()
 	}
 	return metrics
 }

+ 607 - 0
pkg/metrics/sample.go

@@ -0,0 +1,607 @@
+// includes code from
+// https://raw.githubusercontent.com/rcrowley/go-metrics/master/sample.go
+// Copyright 2012 Richard Crowley. All rights reserved.
+
+package metrics
+
+import (
+	"math"
+	"math/rand"
+	"sort"
+	"sync"
+	"time"
+)
+
+const rescaleThreshold = time.Hour
+
+// Samples maintain a statistically-significant selection of values from
+// a stream.
+type Sample interface {
+	Clear()
+	Count() int64
+	Max() int64
+	Mean() float64
+	Min() int64
+	Percentile(float64) float64
+	Percentiles([]float64) []float64
+	Size() int
+	Snapshot() Sample
+	StdDev() float64
+	Sum() int64
+	Update(int64)
+	Values() []int64
+	Variance() float64
+}
+
+// ExpDecaySample is an exponentially-decaying sample using a forward-decaying
+// priority reservoir.  See Cormode et al's "Forward Decay: A Practical Time
+// Decay Model for Streaming Systems".
+//
+// <http://www.research.att.com/people/Cormode_Graham/library/publications/CormodeShkapenyukSrivastavaXu09.pdf>
+type ExpDecaySample struct {
+	alpha         float64
+	count         int64
+	mutex         sync.Mutex
+	reservoirSize int
+	t0, t1        time.Time
+	values        *expDecaySampleHeap
+}
+
+// NewExpDecaySample constructs a new exponentially-decaying sample with the
+// given reservoir size and alpha.
+func NewExpDecaySample(reservoirSize int, alpha float64) Sample {
+	s := &ExpDecaySample{
+		alpha:         alpha,
+		reservoirSize: reservoirSize,
+		t0:            time.Now(),
+		values:        newExpDecaySampleHeap(reservoirSize),
+	}
+	s.t1 = s.t0.Add(rescaleThreshold)
+	return s
+}
+
+// Clear clears all samples.
+func (s *ExpDecaySample) Clear() {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	s.count = 0
+	s.t0 = time.Now()
+	s.t1 = s.t0.Add(rescaleThreshold)
+	s.values.Clear()
+}
+
+// Count returns the number of samples recorded, which may exceed the
+// reservoir size.
+func (s *ExpDecaySample) Count() int64 {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	return s.count
+}
+
+// Max returns the maximum value in the sample, which may not be the maximum
+// value ever to be part of the sample.
+func (s *ExpDecaySample) Max() int64 {
+	return SampleMax(s.Values())
+}
+
+// Mean returns the mean of the values in the sample.
+func (s *ExpDecaySample) Mean() float64 {
+	return SampleMean(s.Values())
+}
+
+// Min returns the minimum value in the sample, which may not be the minimum
+// value ever to be part of the sample.
+func (s *ExpDecaySample) Min() int64 {
+	return SampleMin(s.Values())
+}
+
+// Percentile returns an arbitrary percentile of values in the sample.
+func (s *ExpDecaySample) Percentile(p float64) float64 {
+	return SamplePercentile(s.Values(), p)
+}
+
+// Percentiles returns a slice of arbitrary percentiles of values in the
+// sample.
+func (s *ExpDecaySample) Percentiles(ps []float64) []float64 {
+	return SamplePercentiles(s.Values(), ps)
+}
+
+// Size returns the size of the sample, which is at most the reservoir size.
+func (s *ExpDecaySample) Size() int {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	return s.values.Size()
+}
+
+// Snapshot returns a read-only copy of the sample.
+func (s *ExpDecaySample) Snapshot() Sample {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	vals := s.values.Values()
+	values := make([]int64, len(vals))
+	for i, v := range vals {
+		values[i] = v.v
+	}
+	return &SampleSnapshot{
+		count:  s.count,
+		values: values,
+	}
+}
+
+// StdDev returns the standard deviation of the values in the sample.
+func (s *ExpDecaySample) StdDev() float64 {
+	return SampleStdDev(s.Values())
+}
+
+// Sum returns the sum of the values in the sample.
+func (s *ExpDecaySample) Sum() int64 {
+	return SampleSum(s.Values())
+}
+
+// Update samples a new value.
+func (s *ExpDecaySample) Update(v int64) {
+	s.update(time.Now(), v)
+}
+
+// Values returns a copy of the values in the sample.
+func (s *ExpDecaySample) Values() []int64 {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	vals := s.values.Values()
+	values := make([]int64, len(vals))
+	for i, v := range vals {
+		values[i] = v.v
+	}
+	return values
+}
+
+// Variance returns the variance of the values in the sample.
+func (s *ExpDecaySample) Variance() float64 {
+	return SampleVariance(s.Values())
+}
+
+// update samples a new value at a particular timestamp.  This is a method all
+// its own to facilitate testing.
+func (s *ExpDecaySample) update(t time.Time, v int64) {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	s.count++
+	if s.values.Size() == s.reservoirSize {
+		s.values.Pop()
+	}
+	s.values.Push(expDecaySample{
+		k: math.Exp(t.Sub(s.t0).Seconds()*s.alpha) / rand.Float64(),
+		v: v,
+	})
+	if t.After(s.t1) {
+		values := s.values.Values()
+		t0 := s.t0
+		s.values.Clear()
+		s.t0 = t
+		s.t1 = s.t0.Add(rescaleThreshold)
+		for _, v := range values {
+			v.k = v.k * math.Exp(-s.alpha*s.t0.Sub(t0).Seconds())
+			s.values.Push(v)
+		}
+	}
+}
+
+// NilSample is a no-op Sample.
+type NilSample struct{}
+
+// Clear is a no-op.
+func (NilSample) Clear() {}
+
+// Count is a no-op.
+func (NilSample) Count() int64 { return 0 }
+
+// Max is a no-op.
+func (NilSample) Max() int64 { return 0 }
+
+// Mean is a no-op.
+func (NilSample) Mean() float64 { return 0.0 }
+
+// Min is a no-op.
+func (NilSample) Min() int64 { return 0 }
+
+// Percentile is a no-op.
+func (NilSample) Percentile(p float64) float64 { return 0.0 }
+
+// Percentiles is a no-op.
+func (NilSample) Percentiles(ps []float64) []float64 {
+	return make([]float64, len(ps))
+}
+
+// Size is a no-op.
+func (NilSample) Size() int { return 0 }
+
+// Sample is a no-op.
+func (NilSample) Snapshot() Sample { return NilSample{} }
+
+// StdDev is a no-op.
+func (NilSample) StdDev() float64 { return 0.0 }
+
+// Sum is a no-op.
+func (NilSample) Sum() int64 { return 0 }
+
+// Update is a no-op.
+func (NilSample) Update(v int64) {}
+
+// Values is a no-op.
+func (NilSample) Values() []int64 { return []int64{} }
+
+// Variance is a no-op.
+func (NilSample) Variance() float64 { return 0.0 }
+
+// SampleMax returns the maximum value of the slice of int64.
+func SampleMax(values []int64) int64 {
+	if 0 == len(values) {
+		return 0
+	}
+	var max int64 = math.MinInt64
+	for _, v := range values {
+		if max < v {
+			max = v
+		}
+	}
+	return max
+}
+
+// SampleMean returns the mean value of the slice of int64.
+func SampleMean(values []int64) float64 {
+	if 0 == len(values) {
+		return 0.0
+	}
+	return float64(SampleSum(values)) / float64(len(values))
+}
+
+// SampleMin returns the minimum value of the slice of int64.
+func SampleMin(values []int64) int64 {
+	if 0 == len(values) {
+		return 0
+	}
+	var min int64 = math.MaxInt64
+	for _, v := range values {
+		if min > v {
+			min = v
+		}
+	}
+	return min
+}
+
+// SamplePercentiles returns an arbitrary percentile of the slice of int64.
+func SamplePercentile(values int64Slice, p float64) float64 {
+	return SamplePercentiles(values, []float64{p})[0]
+}
+
+// SamplePercentiles returns a slice of arbitrary percentiles of the slice of
+// int64.
+func SamplePercentiles(values int64Slice, ps []float64) []float64 {
+	scores := make([]float64, len(ps))
+	size := len(values)
+	if size > 0 {
+		sort.Sort(values)
+		for i, p := range ps {
+			pos := p * float64(size+1)
+			if pos < 1.0 {
+				scores[i] = float64(values[0])
+			} else if pos >= float64(size) {
+				scores[i] = float64(values[size-1])
+			} else {
+				lower := float64(values[int(pos)-1])
+				upper := float64(values[int(pos)])
+				scores[i] = lower + (pos-math.Floor(pos))*(upper-lower)
+			}
+		}
+	}
+	return scores
+}
+
+// SampleSnapshot is a read-only copy of another Sample.
+type SampleSnapshot struct {
+	count  int64
+	values []int64
+}
+
+// Clear panics.
+func (*SampleSnapshot) Clear() {
+	panic("Clear called on a SampleSnapshot")
+}
+
+// Count returns the count of inputs at the time the snapshot was taken.
+func (s *SampleSnapshot) Count() int64 { return s.count }
+
+// Max returns the maximal value at the time the snapshot was taken.
+func (s *SampleSnapshot) Max() int64 { return SampleMax(s.values) }
+
+// Mean returns the mean value at the time the snapshot was taken.
+func (s *SampleSnapshot) Mean() float64 { return SampleMean(s.values) }
+
+// Min returns the minimal value at the time the snapshot was taken.
+func (s *SampleSnapshot) Min() int64 { return SampleMin(s.values) }
+
+// Percentile returns an arbitrary percentile of values at the time the
+// snapshot was taken.
+func (s *SampleSnapshot) Percentile(p float64) float64 {
+	return SamplePercentile(s.values, p)
+}
+
+// Percentiles returns a slice of arbitrary percentiles of values at the time
+// the snapshot was taken.
+func (s *SampleSnapshot) Percentiles(ps []float64) []float64 {
+	return SamplePercentiles(s.values, ps)
+}
+
+// Size returns the size of the sample at the time the snapshot was taken.
+func (s *SampleSnapshot) Size() int { return len(s.values) }
+
+// Snapshot returns the snapshot.
+func (s *SampleSnapshot) Snapshot() Sample { return s }
+
+// StdDev returns the standard deviation of values at the time the snapshot was
+// taken.
+func (s *SampleSnapshot) StdDev() float64 { return SampleStdDev(s.values) }
+
+// Sum returns the sum of values at the time the snapshot was taken.
+func (s *SampleSnapshot) Sum() int64 { return SampleSum(s.values) }
+
+// Update panics.
+func (*SampleSnapshot) Update(int64) {
+	panic("Update called on a SampleSnapshot")
+}
+
+// Values returns a copy of the values in the sample.
+func (s *SampleSnapshot) Values() []int64 {
+	values := make([]int64, len(s.values))
+	copy(values, s.values)
+	return values
+}
+
+// Variance returns the variance of values at the time the snapshot was taken.
+func (s *SampleSnapshot) Variance() float64 { return SampleVariance(s.values) }
+
+// SampleStdDev returns the standard deviation of the slice of int64.
+func SampleStdDev(values []int64) float64 {
+	return math.Sqrt(SampleVariance(values))
+}
+
+// SampleSum returns the sum of the slice of int64.
+func SampleSum(values []int64) int64 {
+	var sum int64
+	for _, v := range values {
+		sum += v
+	}
+	return sum
+}
+
+// SampleVariance returns the variance of the slice of int64.
+func SampleVariance(values []int64) float64 {
+	if 0 == len(values) {
+		return 0.0
+	}
+	m := SampleMean(values)
+	var sum float64
+	for _, v := range values {
+		d := float64(v) - m
+		sum += d * d
+	}
+	return sum / float64(len(values))
+}
+
+// A uniform sample using Vitter's Algorithm R.
+//
+// <http://www.cs.umd.edu/~samir/498/vitter.pdf>
+type UniformSample struct {
+	count         int64
+	mutex         sync.Mutex
+	reservoirSize int
+	values        []int64
+}
+
+// NewUniformSample constructs a new uniform sample with the given reservoir
+// size.
+func NewUniformSample(reservoirSize int) Sample {
+	return &UniformSample{
+		reservoirSize: reservoirSize,
+		values:        make([]int64, 0, reservoirSize),
+	}
+}
+
+// Clear clears all samples.
+func (s *UniformSample) Clear() {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	s.count = 0
+	s.values = make([]int64, 0, s.reservoirSize)
+}
+
+// Count returns the number of samples recorded, which may exceed the
+// reservoir size.
+func (s *UniformSample) Count() int64 {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	return s.count
+}
+
+// Max returns the maximum value in the sample, which may not be the maximum
+// value ever to be part of the sample.
+func (s *UniformSample) Max() int64 {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	return SampleMax(s.values)
+}
+
+// Mean returns the mean of the values in the sample.
+func (s *UniformSample) Mean() float64 {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	return SampleMean(s.values)
+}
+
+// Min returns the minimum value in the sample, which may not be the minimum
+// value ever to be part of the sample.
+func (s *UniformSample) Min() int64 {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	return SampleMin(s.values)
+}
+
+// Percentile returns an arbitrary percentile of values in the sample.
+func (s *UniformSample) Percentile(p float64) float64 {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	return SamplePercentile(s.values, p)
+}
+
+// Percentiles returns a slice of arbitrary percentiles of values in the
+// sample.
+func (s *UniformSample) Percentiles(ps []float64) []float64 {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	return SamplePercentiles(s.values, ps)
+}
+
+// Size returns the size of the sample, which is at most the reservoir size.
+func (s *UniformSample) Size() int {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	return len(s.values)
+}
+
+// Snapshot returns a read-only copy of the sample.
+func (s *UniformSample) Snapshot() Sample {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	values := make([]int64, len(s.values))
+	copy(values, s.values)
+	return &SampleSnapshot{
+		count:  s.count,
+		values: values,
+	}
+}
+
+// StdDev returns the standard deviation of the values in the sample.
+func (s *UniformSample) StdDev() float64 {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	return SampleStdDev(s.values)
+}
+
+// Sum returns the sum of the values in the sample.
+func (s *UniformSample) Sum() int64 {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	return SampleSum(s.values)
+}
+
+// Update samples a new value.
+func (s *UniformSample) Update(v int64) {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	s.count++
+	if len(s.values) < s.reservoirSize {
+		s.values = append(s.values, v)
+	} else {
+		r := rand.Int63n(s.count)
+		if r < int64(len(s.values)) {
+			s.values[int(r)] = v
+		}
+	}
+}
+
+// Values returns a copy of the values in the sample.
+func (s *UniformSample) Values() []int64 {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	values := make([]int64, len(s.values))
+	copy(values, s.values)
+	return values
+}
+
+// Variance returns the variance of the values in the sample.
+func (s *UniformSample) Variance() float64 {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	return SampleVariance(s.values)
+}
+
+// expDecaySample represents an individual sample in a heap.
+type expDecaySample struct {
+	k float64
+	v int64
+}
+
+func newExpDecaySampleHeap(reservoirSize int) *expDecaySampleHeap {
+	return &expDecaySampleHeap{make([]expDecaySample, 0, reservoirSize)}
+}
+
+// expDecaySampleHeap is a min-heap of expDecaySamples.
+// The internal implementation is copied from the standard library's container/heap
+type expDecaySampleHeap struct {
+	s []expDecaySample
+}
+
+func (h *expDecaySampleHeap) Clear() {
+	h.s = h.s[:0]
+}
+
+func (h *expDecaySampleHeap) Push(s expDecaySample) {
+	n := len(h.s)
+	h.s = h.s[0 : n+1]
+	h.s[n] = s
+	h.up(n)
+}
+
+func (h *expDecaySampleHeap) Pop() expDecaySample {
+	n := len(h.s) - 1
+	h.s[0], h.s[n] = h.s[n], h.s[0]
+	h.down(0, n)
+
+	n = len(h.s)
+	s := h.s[n-1]
+	h.s = h.s[0 : n-1]
+	return s
+}
+
+func (h *expDecaySampleHeap) Size() int {
+	return len(h.s)
+}
+
+func (h *expDecaySampleHeap) Values() []expDecaySample {
+	return h.s
+}
+
+func (h *expDecaySampleHeap) up(j int) {
+	for {
+		i := (j - 1) / 2 // parent
+		if i == j || !(h.s[j].k < h.s[i].k) {
+			break
+		}
+		h.s[i], h.s[j] = h.s[j], h.s[i]
+		j = i
+	}
+}
+
+func (h *expDecaySampleHeap) down(i, n int) {
+	for {
+		j1 := 2*i + 1
+		if j1 >= n || j1 < 0 { // j1 < 0 after int overflow
+			break
+		}
+		j := j1 // left child
+		if j2 := j1 + 1; j2 < n && !(h.s[j1].k < h.s[j2].k) {
+			j = j2 // = 2*i + 2  // right child
+		}
+		if !(h.s[j].k < h.s[i].k) {
+			break
+		}
+		h.s[i], h.s[j] = h.s[j], h.s[i]
+		i = j
+	}
+}
+
+type int64Slice []int64
+
+func (p int64Slice) Len() int           { return len(p) }
+func (p int64Slice) Less(i, j int) bool { return p[i] < p[j] }
+func (p int64Slice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }

+ 367 - 0
pkg/metrics/sample_test.go

@@ -0,0 +1,367 @@
+// includes code from
+// https://raw.githubusercontent.com/rcrowley/go-metrics/master/sample.go
+// Copyright 2012 Richard Crowley. All rights reserved.
+
+package metrics
+
+import (
+	"math/rand"
+	"runtime"
+	"testing"
+	"time"
+)
+
+// Benchmark{Compute,Copy}{1000,1000000} demonstrate that, even for relatively
+// expensive computations like Variance, the cost of copying the Sample, as
+// approximated by a make and copy, is much greater than the cost of the
+// computation for small samples and only slightly less for large samples.
+func BenchmarkCompute1000(b *testing.B) {
+	s := make([]int64, 1000)
+	for i := 0; i < len(s); i++ {
+		s[i] = int64(i)
+	}
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		SampleVariance(s)
+	}
+}
+func BenchmarkCompute1000000(b *testing.B) {
+	s := make([]int64, 1000000)
+	for i := 0; i < len(s); i++ {
+		s[i] = int64(i)
+	}
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		SampleVariance(s)
+	}
+}
+func BenchmarkCopy1000(b *testing.B) {
+	s := make([]int64, 1000)
+	for i := 0; i < len(s); i++ {
+		s[i] = int64(i)
+	}
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		sCopy := make([]int64, len(s))
+		copy(sCopy, s)
+	}
+}
+func BenchmarkCopy1000000(b *testing.B) {
+	s := make([]int64, 1000000)
+	for i := 0; i < len(s); i++ {
+		s[i] = int64(i)
+	}
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		sCopy := make([]int64, len(s))
+		copy(sCopy, s)
+	}
+}
+
+func BenchmarkExpDecaySample257(b *testing.B) {
+	benchmarkSample(b, NewExpDecaySample(257, 0.015))
+}
+
+func BenchmarkExpDecaySample514(b *testing.B) {
+	benchmarkSample(b, NewExpDecaySample(514, 0.015))
+}
+
+func BenchmarkExpDecaySample1028(b *testing.B) {
+	benchmarkSample(b, NewExpDecaySample(1028, 0.015))
+}
+
+func BenchmarkUniformSample257(b *testing.B) {
+	benchmarkSample(b, NewUniformSample(257))
+}
+
+func BenchmarkUniformSample514(b *testing.B) {
+	benchmarkSample(b, NewUniformSample(514))
+}
+
+func BenchmarkUniformSample1028(b *testing.B) {
+	benchmarkSample(b, NewUniformSample(1028))
+}
+
+func TestExpDecaySample10(t *testing.T) {
+	rand.Seed(1)
+	s := NewExpDecaySample(100, 0.99)
+	for i := 0; i < 10; i++ {
+		s.Update(int64(i))
+	}
+	if size := s.Count(); 10 != size {
+		t.Errorf("s.Count(): 10 != %v\n", size)
+	}
+	if size := s.Size(); 10 != size {
+		t.Errorf("s.Size(): 10 != %v\n", size)
+	}
+	if l := len(s.Values()); 10 != l {
+		t.Errorf("len(s.Values()): 10 != %v\n", l)
+	}
+	for _, v := range s.Values() {
+		if v > 10 || v < 0 {
+			t.Errorf("out of range [0, 10): %v\n", v)
+		}
+	}
+}
+
+func TestExpDecaySample100(t *testing.T) {
+	rand.Seed(1)
+	s := NewExpDecaySample(1000, 0.01)
+	for i := 0; i < 100; i++ {
+		s.Update(int64(i))
+	}
+	if size := s.Count(); 100 != size {
+		t.Errorf("s.Count(): 100 != %v\n", size)
+	}
+	if size := s.Size(); 100 != size {
+		t.Errorf("s.Size(): 100 != %v\n", size)
+	}
+	if l := len(s.Values()); 100 != l {
+		t.Errorf("len(s.Values()): 100 != %v\n", l)
+	}
+	for _, v := range s.Values() {
+		if v > 100 || v < 0 {
+			t.Errorf("out of range [0, 100): %v\n", v)
+		}
+	}
+}
+
+func TestExpDecaySample1000(t *testing.T) {
+	rand.Seed(1)
+	s := NewExpDecaySample(100, 0.99)
+	for i := 0; i < 1000; i++ {
+		s.Update(int64(i))
+	}
+	if size := s.Count(); 1000 != size {
+		t.Errorf("s.Count(): 1000 != %v\n", size)
+	}
+	if size := s.Size(); 100 != size {
+		t.Errorf("s.Size(): 100 != %v\n", size)
+	}
+	if l := len(s.Values()); 100 != l {
+		t.Errorf("len(s.Values()): 100 != %v\n", l)
+	}
+	for _, v := range s.Values() {
+		if v > 1000 || v < 0 {
+			t.Errorf("out of range [0, 1000): %v\n", v)
+		}
+	}
+}
+
+// This test makes sure that the sample's priority is not amplified by using
+// nanosecond duration since start rather than second duration since start.
+// The priority becomes +Inf quickly after starting if this is done,
+// effectively freezing the set of samples until a rescale step happens.
+func TestExpDecaySampleNanosecondRegression(t *testing.T) {
+	rand.Seed(1)
+	s := NewExpDecaySample(100, 0.99)
+	for i := 0; i < 100; i++ {
+		s.Update(10)
+	}
+	time.Sleep(1 * time.Millisecond)
+	for i := 0; i < 100; i++ {
+		s.Update(20)
+	}
+	v := s.Values()
+	avg := float64(0)
+	for i := 0; i < len(v); i++ {
+		avg += float64(v[i])
+	}
+	avg /= float64(len(v))
+	if avg > 16 || avg < 14 {
+		t.Errorf("out of range [14, 16]: %v\n", avg)
+	}
+}
+
+func TestExpDecaySampleRescale(t *testing.T) {
+	s := NewExpDecaySample(2, 0.001).(*ExpDecaySample)
+	s.update(time.Now(), 1)
+	s.update(time.Now().Add(time.Hour+time.Microsecond), 1)
+	for _, v := range s.values.Values() {
+		if v.k == 0.0 {
+			t.Fatal("v.k == 0.0")
+		}
+	}
+}
+
+func TestExpDecaySampleSnapshot(t *testing.T) {
+	now := time.Now()
+	rand.Seed(1)
+	s := NewExpDecaySample(100, 0.99)
+	for i := 1; i <= 10000; i++ {
+		s.(*ExpDecaySample).update(now.Add(time.Duration(i)), int64(i))
+	}
+	snapshot := s.Snapshot()
+	s.Update(1)
+	testExpDecaySampleStatistics(t, snapshot)
+}
+
+func TestExpDecaySampleStatistics(t *testing.T) {
+	now := time.Now()
+	rand.Seed(1)
+	s := NewExpDecaySample(100, 0.99)
+	for i := 1; i <= 10000; i++ {
+		s.(*ExpDecaySample).update(now.Add(time.Duration(i)), int64(i))
+	}
+	testExpDecaySampleStatistics(t, s)
+}
+
+func TestUniformSample(t *testing.T) {
+	rand.Seed(1)
+	s := NewUniformSample(100)
+	for i := 0; i < 1000; i++ {
+		s.Update(int64(i))
+	}
+	if size := s.Count(); 1000 != size {
+		t.Errorf("s.Count(): 1000 != %v\n", size)
+	}
+	if size := s.Size(); 100 != size {
+		t.Errorf("s.Size(): 100 != %v\n", size)
+	}
+	if l := len(s.Values()); 100 != l {
+		t.Errorf("len(s.Values()): 100 != %v\n", l)
+	}
+	for _, v := range s.Values() {
+		if v > 1000 || v < 0 {
+			t.Errorf("out of range [0, 100): %v\n", v)
+		}
+	}
+}
+
+func TestUniformSampleIncludesTail(t *testing.T) {
+	rand.Seed(1)
+	s := NewUniformSample(100)
+	max := 100
+	for i := 0; i < max; i++ {
+		s.Update(int64(i))
+	}
+	v := s.Values()
+	sum := 0
+	exp := (max - 1) * max / 2
+	for i := 0; i < len(v); i++ {
+		sum += int(v[i])
+	}
+	if exp != sum {
+		t.Errorf("sum: %v != %v\n", exp, sum)
+	}
+}
+
+func TestUniformSampleSnapshot(t *testing.T) {
+	s := NewUniformSample(100)
+	for i := 1; i <= 10000; i++ {
+		s.Update(int64(i))
+	}
+	snapshot := s.Snapshot()
+	s.Update(1)
+	testUniformSampleStatistics(t, snapshot)
+}
+
+func TestUniformSampleStatistics(t *testing.T) {
+	rand.Seed(1)
+	s := NewUniformSample(100)
+	for i := 1; i <= 10000; i++ {
+		s.Update(int64(i))
+	}
+	testUniformSampleStatistics(t, s)
+}
+
+func benchmarkSample(b *testing.B, s Sample) {
+	var memStats runtime.MemStats
+	runtime.ReadMemStats(&memStats)
+	pauseTotalNs := memStats.PauseTotalNs
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		s.Update(1)
+	}
+	b.StopTimer()
+	runtime.GC()
+	runtime.ReadMemStats(&memStats)
+	b.Logf("GC cost: %d ns/op", int(memStats.PauseTotalNs-pauseTotalNs)/b.N)
+}
+
+func testExpDecaySampleStatistics(t *testing.T, s Sample) {
+	if count := s.Count(); 10000 != count {
+		t.Errorf("s.Count(): 10000 != %v\n", count)
+	}
+	if min := s.Min(); 107 != min {
+		t.Errorf("s.Min(): 107 != %v\n", min)
+	}
+	if max := s.Max(); 10000 != max {
+		t.Errorf("s.Max(): 10000 != %v\n", max)
+	}
+	if mean := s.Mean(); 4965.98 != mean {
+		t.Errorf("s.Mean(): 4965.98 != %v\n", mean)
+	}
+	if stdDev := s.StdDev(); 2959.825156930727 != stdDev {
+		t.Errorf("s.StdDev(): 2959.825156930727 != %v\n", stdDev)
+	}
+	ps := s.Percentiles([]float64{0.5, 0.75, 0.99})
+	if 4615 != ps[0] {
+		t.Errorf("median: 4615 != %v\n", ps[0])
+	}
+	if 7672 != ps[1] {
+		t.Errorf("75th percentile: 7672 != %v\n", ps[1])
+	}
+	if 9998.99 != ps[2] {
+		t.Errorf("99th percentile: 9998.99 != %v\n", ps[2])
+	}
+}
+
+func testUniformSampleStatistics(t *testing.T, s Sample) {
+	if count := s.Count(); 10000 != count {
+		t.Errorf("s.Count(): 10000 != %v\n", count)
+	}
+	if min := s.Min(); 37 != min {
+		t.Errorf("s.Min(): 37 != %v\n", min)
+	}
+	if max := s.Max(); 9989 != max {
+		t.Errorf("s.Max(): 9989 != %v\n", max)
+	}
+	if mean := s.Mean(); 4748.14 != mean {
+		t.Errorf("s.Mean(): 4748.14 != %v\n", mean)
+	}
+	if stdDev := s.StdDev(); 2826.684117548333 != stdDev {
+		t.Errorf("s.StdDev(): 2826.684117548333 != %v\n", stdDev)
+	}
+	ps := s.Percentiles([]float64{0.5, 0.75, 0.99})
+	if 4599 != ps[0] {
+		t.Errorf("median: 4599 != %v\n", ps[0])
+	}
+	if 7380.5 != ps[1] {
+		t.Errorf("75th percentile: 7380.5 != %v\n", ps[1])
+	}
+	if 9986.429999999998 != ps[2] {
+		t.Errorf("99th percentile: 9986.429999999998 != %v\n", ps[2])
+	}
+}
+
+// TestUniformSampleConcurrentUpdateCount would expose data race problems with
+// concurrent Update and Count calls on Sample when test is called with -race
+// argument
+func TestUniformSampleConcurrentUpdateCount(t *testing.T) {
+	if testing.Short() {
+		t.Skip("skipping in short mode")
+	}
+	s := NewUniformSample(100)
+	for i := 0; i < 100; i++ {
+		s.Update(int64(i))
+	}
+	quit := make(chan struct{})
+	go func() {
+		t := time.NewTicker(10 * time.Millisecond)
+		for {
+			select {
+			case <-t.C:
+				s.Update(rand.Int63())
+			case <-quit:
+				t.Stop()
+				return
+			}
+		}
+	}()
+	for i := 0; i < 1000; i++ {
+		s.Count()
+		time.Sleep(5 * time.Millisecond)
+	}
+	quit <- struct{}{}
+}

+ 46 - 0
pkg/metrics/settings.go

@@ -0,0 +1,46 @@
+package metrics
+
+import (
+	"github.com/grafana/grafana/pkg/log"
+	"github.com/grafana/grafana/pkg/setting"
+)
+
+type MetricPublisher interface {
+	Publish(metrics []Metric)
+}
+
+type MetricSettings struct {
+	Enabled         bool
+	IntervalSeconds int64
+
+	Publishers []MetricPublisher
+}
+
+func readSettings() *MetricSettings {
+	var settings = &MetricSettings{
+		Enabled:    false,
+		Publishers: make([]MetricPublisher, 0),
+	}
+
+	var section, err = setting.Cfg.GetSection("metrics")
+	if err != nil {
+		log.Fatal(3, "Unable to find metrics config section")
+		return nil
+	}
+
+	settings.Enabled = section.Key("enabled").MustBool(false)
+	settings.IntervalSeconds = section.Key("interval_seconds").MustInt64(10)
+
+	if !settings.Enabled {
+		return settings
+	}
+
+	if graphitePublisher, err := CreateGraphitePublisher(); err != nil {
+		log.Error(3, "Metrics: Failed to init Graphite metric publisher", err)
+	} else if graphitePublisher != nil {
+		log.Info("Metrics: Internal metrics publisher Graphite initialized")
+		settings.Publishers = append(settings.Publishers, graphitePublisher)
+	}
+
+	return settings
+}

+ 309 - 0
pkg/metrics/timer.go

@@ -0,0 +1,309 @@
+// includes code from
+// https://raw.githubusercontent.com/rcrowley/go-metrics/master/sample.go
+// Copyright 2012 Richard Crowley. All rights reserved.
+
+package metrics
+
+import (
+	"sync"
+	"time"
+)
+
+// Timers capture the duration and rate of events.
+type Timer interface {
+	Metric
+
+	Count() int64
+	Max() int64
+	Mean() float64
+	Min() int64
+	Percentile(float64) float64
+	Percentiles([]float64) []float64
+	Rate1() float64
+	Rate5() float64
+	Rate15() float64
+	RateMean() float64
+	StdDev() float64
+	Sum() int64
+	Time(func())
+	Update(time.Duration)
+	UpdateSince(time.Time)
+	Variance() float64
+}
+
+// NewCustomTimer constructs a new StandardTimer from a Histogram and a Meter.
+func NewCustomTimer(meta *MetricMeta, h Histogram, m Meter) Timer {
+	if UseNilMetrics {
+		return NilTimer{}
+	}
+	return &StandardTimer{
+		MetricMeta: meta,
+		histogram:  h,
+		meter:      m,
+	}
+}
+
+// NewTimer constructs a new StandardTimer using an exponentially-decaying
+// sample with the same reservoir size and alpha as UNIX load averages.
+func NewTimer(meta *MetricMeta) Timer {
+	if UseNilMetrics {
+		return NilTimer{}
+	}
+	return &StandardTimer{
+		MetricMeta: meta,
+		histogram:  NewHistogram(meta, NewExpDecaySample(1028, 0.015)),
+		meter:      NewMeter(meta),
+	}
+}
+
+func RegTimer(name string, tagStrings ...string) Timer {
+	tr := NewTimer(NewMetricMeta(name, tagStrings))
+	MetricStats.Register(tr)
+	return tr
+}
+
+// NilTimer is a no-op Timer.
+type NilTimer struct {
+	*MetricMeta
+	h Histogram
+	m Meter
+}
+
+// Count is a no-op.
+func (NilTimer) Count() int64 { return 0 }
+
+// Max is a no-op.
+func (NilTimer) Max() int64 { return 0 }
+
+// Mean is a no-op.
+func (NilTimer) Mean() float64 { return 0.0 }
+
+// Min is a no-op.
+func (NilTimer) Min() int64 { return 0 }
+
+// Percentile is a no-op.
+func (NilTimer) Percentile(p float64) float64 { return 0.0 }
+
+// Percentiles is a no-op.
+func (NilTimer) Percentiles(ps []float64) []float64 {
+	return make([]float64, len(ps))
+}
+
+// Rate1 is a no-op.
+func (NilTimer) Rate1() float64 { return 0.0 }
+
+// Rate5 is a no-op.
+func (NilTimer) Rate5() float64 { return 0.0 }
+
+// Rate15 is a no-op.
+func (NilTimer) Rate15() float64 { return 0.0 }
+
+// RateMean is a no-op.
+func (NilTimer) RateMean() float64 { return 0.0 }
+
+// Snapshot is a no-op.
+func (n NilTimer) Snapshot() Metric { return n }
+
+// StdDev is a no-op.
+func (NilTimer) StdDev() float64 { return 0.0 }
+
+// Sum is a no-op.
+func (NilTimer) Sum() int64 { return 0 }
+
+// Time is a no-op.
+func (NilTimer) Time(func()) {}
+
+// Update is a no-op.
+func (NilTimer) Update(time.Duration) {}
+
+// UpdateSince is a no-op.
+func (NilTimer) UpdateSince(time.Time) {}
+
+// Variance is a no-op.
+func (NilTimer) Variance() float64 { return 0.0 }
+
+// StandardTimer is the standard implementation of a Timer and uses a Histogram
+// and Meter.
+type StandardTimer struct {
+	*MetricMeta
+	histogram Histogram
+	meter     Meter
+	mutex     sync.Mutex
+}
+
+// Count returns the number of events recorded.
+func (t *StandardTimer) Count() int64 {
+	return t.histogram.Count()
+}
+
+// Max returns the maximum value in the sample.
+func (t *StandardTimer) Max() int64 {
+	return t.histogram.Max()
+}
+
+// Mean returns the mean of the values in the sample.
+func (t *StandardTimer) Mean() float64 {
+	return t.histogram.Mean()
+}
+
+// Min returns the minimum value in the sample.
+func (t *StandardTimer) Min() int64 {
+	return t.histogram.Min()
+}
+
+// Percentile returns an arbitrary percentile of the values in the sample.
+func (t *StandardTimer) Percentile(p float64) float64 {
+	return t.histogram.Percentile(p)
+}
+
+// Percentiles returns a slice of arbitrary percentiles of the values in the
+// sample.
+func (t *StandardTimer) Percentiles(ps []float64) []float64 {
+	return t.histogram.Percentiles(ps)
+}
+
+// Rate1 returns the one-minute moving average rate of events per second.
+func (t *StandardTimer) Rate1() float64 {
+	return t.meter.Rate1()
+}
+
+// Rate5 returns the five-minute moving average rate of events per second.
+func (t *StandardTimer) Rate5() float64 {
+	return t.meter.Rate5()
+}
+
+// Rate15 returns the fifteen-minute moving average rate of events per second.
+func (t *StandardTimer) Rate15() float64 {
+	return t.meter.Rate15()
+}
+
+// RateMean returns the meter's mean rate of events per second.
+func (t *StandardTimer) RateMean() float64 {
+	return t.meter.RateMean()
+}
+
+// Snapshot returns a read-only copy of the timer.
+func (t *StandardTimer) Snapshot() Metric {
+	t.mutex.Lock()
+	defer t.mutex.Unlock()
+	return &TimerSnapshot{
+		MetricMeta: t.MetricMeta,
+		histogram:  t.histogram.Snapshot().(*HistogramSnapshot),
+		meter:      t.meter.Snapshot().(*MeterSnapshot),
+	}
+}
+
+// StdDev returns the standard deviation of the values in the sample.
+func (t *StandardTimer) StdDev() float64 {
+	return t.histogram.StdDev()
+}
+
+// Sum returns the sum in the sample.
+func (t *StandardTimer) Sum() int64 {
+	return t.histogram.Sum()
+}
+
+// Record the duration of the execution of the given function.
+func (t *StandardTimer) Time(f func()) {
+	ts := time.Now()
+	f()
+	t.Update(time.Since(ts))
+}
+
+// Record the duration of an event.
+func (t *StandardTimer) Update(d time.Duration) {
+	t.mutex.Lock()
+	defer t.mutex.Unlock()
+	t.histogram.Update(int64(d))
+	t.meter.Mark(1)
+}
+
+// Record the duration of an event that started at a time and ends now.
+func (t *StandardTimer) UpdateSince(ts time.Time) {
+	t.mutex.Lock()
+	defer t.mutex.Unlock()
+	t.histogram.Update(int64(time.Since(ts)))
+	t.meter.Mark(1)
+}
+
+// Variance returns the variance of the values in the sample.
+func (t *StandardTimer) Variance() float64 {
+	return t.histogram.Variance()
+}
+
+// TimerSnapshot is a read-only copy of another Timer.
+type TimerSnapshot struct {
+	*MetricMeta
+	histogram *HistogramSnapshot
+	meter     *MeterSnapshot
+}
+
+// Count returns the number of events recorded at the time the snapshot was
+// taken.
+func (t *TimerSnapshot) Count() int64 { return t.histogram.Count() }
+
+// Max returns the maximum value at the time the snapshot was taken.
+func (t *TimerSnapshot) Max() int64 { return t.histogram.Max() }
+
+// Mean returns the mean value at the time the snapshot was taken.
+func (t *TimerSnapshot) Mean() float64 { return t.histogram.Mean() }
+
+// Min returns the minimum value at the time the snapshot was taken.
+func (t *TimerSnapshot) Min() int64 { return t.histogram.Min() }
+
+// Percentile returns an arbitrary percentile of sampled values at the time the
+// snapshot was taken.
+func (t *TimerSnapshot) Percentile(p float64) float64 {
+	return t.histogram.Percentile(p)
+}
+
+// Percentiles returns a slice of arbitrary percentiles of sampled values at
+// the time the snapshot was taken.
+func (t *TimerSnapshot) Percentiles(ps []float64) []float64 {
+	return t.histogram.Percentiles(ps)
+}
+
+// Rate1 returns the one-minute moving average rate of events per second at the
+// time the snapshot was taken.
+func (t *TimerSnapshot) Rate1() float64 { return t.meter.Rate1() }
+
+// Rate5 returns the five-minute moving average rate of events per second at
+// the time the snapshot was taken.
+func (t *TimerSnapshot) Rate5() float64 { return t.meter.Rate5() }
+
+// Rate15 returns the fifteen-minute moving average rate of events per second
+// at the time the snapshot was taken.
+func (t *TimerSnapshot) Rate15() float64 { return t.meter.Rate15() }
+
+// RateMean returns the meter's mean rate of events per second at the time the
+// snapshot was taken.
+func (t *TimerSnapshot) RateMean() float64 { return t.meter.RateMean() }
+
+// Snapshot returns the snapshot.
+func (t *TimerSnapshot) Snapshot() Metric { return t }
+
+// StdDev returns the standard deviation of the values at the time the snapshot
+// was taken.
+func (t *TimerSnapshot) StdDev() float64 { return t.histogram.StdDev() }
+
+// Sum returns the sum at the time the snapshot was taken.
+func (t *TimerSnapshot) Sum() int64 { return t.histogram.Sum() }
+
+// Time panics.
+func (*TimerSnapshot) Time(func()) {
+	panic("Time called on a TimerSnapshot")
+}
+
+// Update panics.
+func (*TimerSnapshot) Update(time.Duration) {
+	panic("Update called on a TimerSnapshot")
+}
+
+// UpdateSince panics.
+func (*TimerSnapshot) UpdateSince(time.Time) {
+	panic("UpdateSince called on a TimerSnapshot")
+}
+
+// Variance returns the variance of the values at the time the snapshot was
+// taken.
+func (t *TimerSnapshot) Variance() float64 { return t.histogram.Variance() }

+ 9 - 1
pkg/middleware/logger.go

@@ -21,6 +21,7 @@ import (
 	"time"
 
 	"github.com/grafana/grafana/pkg/log"
+	"github.com/grafana/grafana/pkg/metrics"
 	"github.com/grafana/grafana/pkg/setting"
 	"gopkg.in/macaron.v1"
 )
@@ -28,6 +29,7 @@ import (
 func Logger() macaron.Handler {
 	return func(res http.ResponseWriter, req *http.Request, c *macaron.Context) {
 		start := time.Now()
+		c.Data["perfmon.start"] = start
 
 		uname := c.GetCookie(setting.CookieUserName)
 		if len(uname) == 0 {
@@ -37,7 +39,13 @@ func Logger() macaron.Handler {
 		rw := res.(macaron.ResponseWriter)
 		c.Next()
 
-		content := fmt.Sprintf("Completed %s %s \"%s %s %s\" %v %s %d bytes in %dus", c.RemoteAddr(), uname, req.Method, req.URL.Path, req.Proto, rw.Status(), http.StatusText(rw.Status()), rw.Size(), time.Since(start)/time.Microsecond)
+		timeTakenMs := time.Since(start) / time.Millisecond
+		content := fmt.Sprintf("Completed %s %s \"%s %s %s\" %v %s %d bytes in %dms", c.RemoteAddr(), uname, req.Method, req.URL.Path, req.Proto, rw.Status(), http.StatusText(rw.Status()), rw.Size(), timeTakenMs)
+
+		if timer, ok := c.Data["perfmon.timer"]; ok {
+			timerTyped := timer.(metrics.Timer)
+			timerTyped.Update(timeTakenMs)
+		}
 
 		switch rw.Status() {
 		case 200, 304:

+ 4 - 0
pkg/middleware/middleware.go

@@ -257,3 +257,7 @@ func (ctx *Context) JsonApiErr(status int, message string, err error) {
 func (ctx *Context) HasUserRole(role m.RoleType) bool {
 	return ctx.OrgRole.Includes(role)
 }
+
+func (ctx *Context) TimeRequest(timer metrics.Timer) {
+	ctx.Data["perfmon.timer"] = timer
+}

+ 12 - 0
pkg/middleware/perf.go

@@ -0,0 +1,12 @@
+package middleware
+
+import (
+	"net/http"
+
+	"gopkg.in/macaron.v1"
+)
+
+func MeasureRequestTime() macaron.Handler {
+	return func(res http.ResponseWriter, req *http.Request, c *Context) {
+	}
+}

+ 27 - 3
pkg/setting/setting.go

@@ -37,9 +37,10 @@ const (
 
 var (
 	// App settings.
-	Env       string = DEV
-	AppUrl    string
-	AppSubUrl string
+	Env          string = DEV
+	AppUrl       string
+	AppSubUrl    string
+	InstanceName string
 
 	// build
 	BuildVersion string
@@ -259,6 +260,12 @@ func evalEnvVarExpression(value string) string {
 		envVar = strings.TrimPrefix(envVar, "${")
 		envVar = strings.TrimSuffix(envVar, "}")
 		envValue := os.Getenv(envVar)
+
+		// if env variable is hostname and it is emtpy use os.Hostname as default
+		if envVar == "HOSTNAME" && envValue == "" {
+			envValue, _ = os.Hostname()
+		}
+
 		return envValue
 	})
 }
@@ -395,11 +402,28 @@ func validateStaticRootPath() error {
 	return fmt.Errorf("Failed to detect generated css or javascript files in static root (%s), have you executed default grunt task?", StaticRootPath)
 }
 
+// func readInstanceName() string {
+// 	hostname, _ := os.Hostname()
+// 	if hostname == "" {
+// 		hostname = "hostname_unknown"
+// 	}
+//
+// 	instanceName := Cfg.Section("").Key("instance_name").MustString("")
+// 	if instanceName = "" {
+// 		// set value as it might be used in other places
+// 		Cfg.Section("").Key("instance_name").SetValue(hostname)
+// 		instanceName = hostname
+// 	}
+//
+// 	return
+// }
+
 func NewConfigContext(args *CommandLineArgs) error {
 	setHomePath(args)
 	loadConfiguration(args)
 
 	Env = Cfg.Section("").Key("app_mode").MustString("development")
+	InstanceName = Cfg.Section("").Key("instance_name").MustString("unknown_instance_name")
 	PluginsPath = Cfg.Section("paths").Key("plugins").String()
 
 	server := Cfg.Section("server")

+ 9 - 0
pkg/setting/setting_test.go

@@ -89,5 +89,14 @@ func TestLoadingSettings(t *testing.T) {
 			So(DataPath, ShouldEqual, "/tmp/env_override")
 		})
 
+		Convey("instance_name default to hostname even if hostname env is emtpy", func() {
+			NewConfigContext(&CommandLineArgs{
+				HomePath: "../../",
+			})
+
+			hostname, _ := os.Hostname()
+			So(InstanceName, ShouldEqual, hostname)
+		})
+
 	})
 }