Sfoglia il codice sorgente

feat(instrumentation): added influxdb client

Torkel Ödegaard 9 anni fa
parent
commit
86f0007768

+ 16 - 0
Godeps/Godeps.json

@@ -1,6 +1,7 @@
 {
 	"ImportPath": "github.com/grafana/grafana",
 	"GoVersion": "go1.5.1",
+	"GodepVersion": "v60",
 	"Packages": [
 		"./pkg/..."
 	],
@@ -226,6 +227,21 @@
 			"ImportPath": "github.com/hashicorp/go-version",
 			"Rev": "7e3c02b30806fa5779d3bdfc152ce4c6f40e7b38"
 		},
+		{
+			"ImportPath": "github.com/influxdata/influxdb/client",
+			"Comment": "v0.13.0-74-g2c9d0fc",
+			"Rev": "2c9d0fcc04eba3ffc88f2aafe8466874e384d80d"
+		},
+		{
+			"ImportPath": "github.com/influxdata/influxdb/models",
+			"Comment": "v0.13.0-74-g2c9d0fc",
+			"Rev": "2c9d0fcc04eba3ffc88f2aafe8466874e384d80d"
+		},
+		{
+			"ImportPath": "github.com/influxdata/influxdb/pkg/escape",
+			"Comment": "v0.13.0-74-g2c9d0fc",
+			"Rev": "2c9d0fcc04eba3ffc88f2aafe8466874e384d80d"
+		},
 		{
 			"ImportPath": "github.com/jmespath/go-jmespath",
 			"Comment": "0.2.2",

+ 20 - 0
Godeps/_workspace/src/github.com/influxdata/influxdb/LICENSE

@@ -0,0 +1,20 @@
+The MIT License (MIT)
+
+Copyright (c) 2013-2016 Errplane Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

+ 27 - 0
Godeps/_workspace/src/github.com/influxdata/influxdb/LICENSE_OF_DEPENDENCIES.md

@@ -0,0 +1,27 @@
+# List
+- bootstrap 3.3.5 [MIT LICENSE](https://github.com/twbs/bootstrap/blob/master/LICENSE)
+- collectd.org [ISC LICENSE](https://github.com/collectd/go-collectd/blob/master/LICENSE)
+- github.com/armon/go-metrics [MIT LICENSE](https://github.com/armon/go-metrics/blob/master/LICENSE)
+- github.com/BurntSushi/toml [WTFPL LICENSE](https://github.com/BurntSushi/toml/blob/master/COPYING)
+- github.com/bmizerany/pat [MIT LICENSE](https://github.com/bmizerany/pat#license)
+- github.com/boltdb/bolt [MIT LICENSE](https://github.com/boltdb/bolt/blob/master/LICENSE)
+- github.com/dgryski/go-bits [MIT LICENSE](https://github.com/dgryski/go-bits/blob/master/LICENSE)
+- github.com/dgryski/go-bitstream [MIT LICENSE](https://github.com/dgryski/go-bitstream/blob/master/LICENSE)
+- github.com/gogo/protobuf/proto [BSD LICENSE](https://github.com/gogo/protobuf/blob/master/LICENSE)
+- github.com/davecgh/go-spew/spew [ISC LICENSE](https://github.com/davecgh/go-spew/blob/master/LICENSE)
+- github.com/golang/snappy [BSD LICENSE](https://github.com/golang/snappy/blob/master/LICENSE)
+- github.com/hashicorp/go-msgpack [BSD LICENSE](https://github.com/hashicorp/go-msgpack/blob/master/LICENSE)
+- github.com/hashicorp/raft [MPL LICENSE](https://github.com/hashicorp/raft/blob/master/LICENSE)
+- github.com/hashicorp/raft-boltdb [MOZILLA PUBLIC LICENSE](https://github.com/hashicorp/raft-boltdb/blob/master/LICENSE)
+- github.com/influxdata/usage-client [MIT LICENSE](https://github.com/influxdata/usage-client/blob/master/LICENSE.txt)
+- github.com/jwilder/encoding [MIT LICENSE](https://github.com/jwilder/encoding/blob/master/LICENSE)
+- github.com/kimor79/gollectd [BSD LICENSE](https://github.com/kimor79/gollectd/blob/master/LICENSE)
+- github.com/paulbellamy/ratecounter [MIT LICENSE](https://github.com/paulbellamy/ratecounter/blob/master/LICENSE)
+- github.com/peterh/liner [MIT LICENSE](https://github.com/peterh/liner/blob/master/COPYING)
+- github.com/rakyll/statik [APACHE LICENSE](https://github.com/rakyll/statik/blob/master/LICENSE)
+- glyphicons [LICENSE](http://glyphicons.com/license/)
+- golang.org/x/crypto [BSD LICENSE](https://github.com/golang/crypto/blob/master/LICENSE)
+- golang.org/x/tools [BSD LICENSE](https://github.com/golang/tools/blob/master/LICENSE)
+- gopkg.in/fatih/pool.v2 [MIT LICENSE](https://github.com/fatih/pool/blob/v2.0.0/LICENSE)
+- jquery 2.1.4 [MIT LICENSE](https://github.com/jquery/jquery/blob/master/LICENSE.txt)
+- react 0.13.3 [BSD LICENSE](https://github.com/facebook/react/blob/master/LICENSE)

+ 267 - 0
Godeps/_workspace/src/github.com/influxdata/influxdb/client/README.md

@@ -0,0 +1,267 @@
+# InfluxDB Client
+
+[![GoDoc](https://godoc.org/github.com/influxdata/influxdb?status.svg)](http://godoc.org/github.com/influxdata/influxdb/client/v2)
+
+## Description
+
+**NOTE:** The Go client library now has a "v2" version, with the old version
+being deprecated. The new version can be imported at
+`import "github.com/influxdata/influxdb/client/v2"`. It is not backwards-compatible.
+
+A Go client library written and maintained by the **InfluxDB** team.
+This package provides convenience functions to read and write time series data.
+It uses the HTTP protocol to communicate with your **InfluxDB** cluster.
+
+
+## Getting Started
+
+### Connecting To Your Database
+
+Connecting to an **InfluxDB** database is straightforward. You will need a host
+name, a port and the cluster user credentials if applicable. The default port is
+8086. You can customize these settings to your specific installation via the
+**InfluxDB** configuration file.
+
+Though not necessary for experimentation, you may want to create a new user
+and authenticate the connection to your database.
+
+For more information please check out the
+[Admin Docs](https://docs.influxdata.com/influxdb/latest/administration/).
+
+For the impatient, you can create a new admin user _bubba_ by firing off the
+[InfluxDB CLI](https://github.com/influxdata/influxdb/blob/master/cmd/influx/main.go).
+
+```shell
+influx
+> create user bubba with password 'bumblebeetuna'
+> grant all privileges to bubba
+```
+
+And now for good measure set the credentials in you shell environment.
+In the example below we will use $INFLUX_USER and $INFLUX_PWD
+
+Now with the administrivia out of the way, let's connect to our database.
+
+NOTE: If you've opted out of creating a user, you can omit Username and Password in
+the configuration below.
+
+```go
+package main
+
+import (
+	"log"
+	"time"
+
+	"github.com/influxdata/influxdb/client/v2"
+)
+
+const (
+	MyDB = "square_holes"
+	username = "bubba"
+	password = "bumblebeetuna"
+)
+
+func main() {
+	// Make client
+	c, err := client.NewHTTPClient(client.HTTPConfig{
+		Addr: "http://localhost:8086",
+		Username: username,
+		Password: password,
+	})
+
+	if err != nil {
+	    log.Fatalln("Error: ", err)
+	}
+
+	// Create a new point batch
+	bp, err := client.NewBatchPoints(client.BatchPointsConfig{
+		Database:  MyDB,
+		Precision: "s",
+	})
+
+	if err != nil {
+	    log.Fatalln("Error: ", err)
+	}
+
+	// Create a point and add to batch
+	tags := map[string]string{"cpu": "cpu-total"}
+	fields := map[string]interface{}{
+		"idle":   10.1,
+		"system": 53.3,
+		"user":   46.6,
+	}
+	pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now())
+
+	if err != nil {
+	    log.Fatalln("Error: ", err)
+	}
+
+	bp.AddPoint(pt)
+
+	// Write the batch
+	c.Write(bp)
+}
+
+```
+
+### Inserting Data
+
+Time series data aka *points* are written to the database using batch inserts.
+The mechanism is to create one or more points and then create a batch aka
+*batch points* and write these to a given database and series. A series is a
+combination of a measurement (time/values) and a set of tags.
+
+In this sample we will create a batch of a 1,000 points. Each point has a time and
+a single value as well as 2 tags indicating a shape and color. We write these points
+to a database called _square_holes_ using a measurement named _shapes_.
+
+NOTE: You can specify a RetentionPolicy as part of the batch points. If not
+provided InfluxDB will use the database _default_ retention policy.
+
+```go
+func writePoints(clnt client.Client) {
+	sampleSize := 1000
+	rand.Seed(42)
+
+	bp, _ := client.NewBatchPoints(client.BatchPointsConfig{
+		Database:  "systemstats",
+		Precision: "us",
+	})
+
+	for i := 0; i < sampleSize; i++ {
+		regions := []string{"us-west1", "us-west2", "us-west3", "us-east1"}
+		tags := map[string]string{
+			"cpu":    "cpu-total",
+			"host":   fmt.Sprintf("host%d", rand.Intn(1000)),
+			"region": regions[rand.Intn(len(regions))],
+		}
+
+		idle := rand.Float64() * 100.0
+		fields := map[string]interface{}{
+			"idle": idle,
+			"busy": 100.0 - idle,
+		}
+
+		bp.AddPoint(client.NewPoint(
+			"cpu_usage",
+			tags,
+			fields,
+			time.Now(),
+		))
+	}
+
+	err := clnt.Write(bp)
+	if err != nil {
+		log.Fatal(err)
+	}
+}
+```
+
+
+### Querying Data
+
+One nice advantage of using **InfluxDB** the ability to query your data using familiar
+SQL constructs. In this example we can create a convenience function to query the database
+as follows:
+
+```go
+// queryDB convenience function to query the database
+func queryDB(clnt client.Client, cmd string) (res []client.Result, err error) {
+	q := client.Query{
+		Command:  cmd,
+		Database: MyDB,
+	}
+	if response, err := clnt.Query(q); err == nil {
+		if response.Error() != nil {
+			return res, response.Error()
+		}
+		res = response.Results
+	} else {
+		return res, err
+	}
+	return res, nil
+}
+```
+
+#### Creating a Database
+
+```go
+_, err := queryDB(clnt, fmt.Sprintf("CREATE DATABASE %s", MyDB))
+if err != nil {
+	log.Fatal(err)
+}
+```
+
+#### Count Records
+
+```go
+q := fmt.Sprintf("SELECT count(%s) FROM %s", "value", MyMeasurement)
+res, err := queryDB(clnt, q)
+if err != nil {
+	log.Fatal(err)
+}
+count := res[0].Series[0].Values[0][1]
+log.Printf("Found a total of %v records\n", count)
+```
+
+#### Find the last 10 _shapes_ records
+
+```go
+q := fmt.Sprintf("SELECT * FROM %s LIMIT %d", MyMeasurement, 20)
+res, err = queryDB(clnt, q)
+if err != nil {
+	log.Fatal(err)
+}
+
+for i, row := range res[0].Series[0].Values {
+	t, err := time.Parse(time.RFC3339, row[0].(string))
+	if err != nil {
+		log.Fatal(err)
+	}
+	val := row[1].(string)
+	log.Printf("[%2d] %s: %s\n", i, t.Format(time.Stamp), val)
+}
+```
+
+### Using the UDP Client
+
+The **InfluxDB** client also supports writing over UDP.
+
+```go
+func WriteUDP() {
+	// Make client
+	c := client.NewUDPClient("localhost:8089")
+
+	// Create a new point batch
+	bp, _ := client.NewBatchPoints(client.BatchPointsConfig{
+		Precision: "s",
+	})
+
+	// Create a point and add to batch
+	tags := map[string]string{"cpu": "cpu-total"}
+	fields := map[string]interface{}{
+		"idle":   10.1,
+		"system": 53.3,
+		"user":   46.6,
+	}
+	pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now())
+	if err != nil {
+		panic(err.Error())
+	}
+	bp.AddPoint(pt)
+
+	// Write the batch
+	c.Write(bp)
+}
+```
+
+## Go Docs
+
+Please refer to
+[http://godoc.org/github.com/influxdata/influxdb/client/v2](http://godoc.org/github.com/influxdata/influxdb/client/v2)
+for documentation.
+
+## See Also
+
+You can also examine how the client library is used by the
+[InfluxDB CLI](https://github.com/influxdata/influxdb/blob/master/cmd/influx/main.go).

+ 789 - 0
Godeps/_workspace/src/github.com/influxdata/influxdb/client/influxdb.go

@@ -0,0 +1,789 @@
+package client
+
+import (
+	"bytes"
+	"crypto/tls"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"net"
+	"net/http"
+	"net/url"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/influxdata/influxdb/models"
+)
+
+const (
+	// DefaultHost is the default host used to connect to an InfluxDB instance
+	DefaultHost = "localhost"
+
+	// DefaultPort is the default port used to connect to an InfluxDB instance
+	DefaultPort = 8086
+
+	// DefaultTimeout is the default connection timeout used to connect to an InfluxDB instance
+	DefaultTimeout = 0
+)
+
+// Query is used to send a command to the server. Both Command and Database are required.
+type Query struct {
+	Command  string
+	Database string
+
+	// Chunked tells the server to send back chunked responses. This places
+	// less load on the server by sending back chunks of the response rather
+	// than waiting for the entire response all at once.
+	Chunked bool
+
+	// ChunkSize sets the maximum number of rows that will be returned per
+	// chunk. Chunks are either divided based on their series or if they hit
+	// the chunk size limit.
+	//
+	// Chunked must be set to true for this option to be used.
+	ChunkSize int
+}
+
+// ParseConnectionString will parse a string to create a valid connection URL
+func ParseConnectionString(path string, ssl bool) (url.URL, error) {
+	var host string
+	var port int
+
+	h, p, err := net.SplitHostPort(path)
+	if err != nil {
+		if path == "" {
+			host = DefaultHost
+		} else {
+			host = path
+		}
+		// If they didn't specify a port, always use the default port
+		port = DefaultPort
+	} else {
+		host = h
+		port, err = strconv.Atoi(p)
+		if err != nil {
+			return url.URL{}, fmt.Errorf("invalid port number %q: %s\n", path, err)
+		}
+	}
+
+	u := url.URL{
+		Scheme: "http",
+	}
+	if ssl {
+		u.Scheme = "https"
+	}
+
+	u.Host = net.JoinHostPort(host, strconv.Itoa(port))
+
+	return u, nil
+}
+
+// Config is used to specify what server to connect to.
+// URL: The URL of the server connecting to.
+// Username/Password are optional. They will be passed via basic auth if provided.
+// UserAgent: If not provided, will default "InfluxDBClient",
+// Timeout: If not provided, will default to 0 (no timeout)
+type Config struct {
+	URL       url.URL
+	Username  string
+	Password  string
+	UserAgent string
+	Timeout   time.Duration
+	Precision string
+	UnsafeSsl bool
+}
+
+// NewConfig will create a config to be used in connecting to the client
+func NewConfig() Config {
+	return Config{
+		Timeout: DefaultTimeout,
+	}
+}
+
+// Client is used to make calls to the server.
+type Client struct {
+	url        url.URL
+	username   string
+	password   string
+	httpClient *http.Client
+	userAgent  string
+	precision  string
+}
+
+const (
+	// ConsistencyOne requires at least one data node acknowledged a write.
+	ConsistencyOne = "one"
+
+	// ConsistencyAll requires all data nodes to acknowledge a write.
+	ConsistencyAll = "all"
+
+	// ConsistencyQuorum requires a quorum of data nodes to acknowledge a write.
+	ConsistencyQuorum = "quorum"
+
+	// ConsistencyAny allows for hinted hand off, potentially no write happened yet.
+	ConsistencyAny = "any"
+)
+
+// NewClient will instantiate and return a connected client to issue commands to the server.
+func NewClient(c Config) (*Client, error) {
+	tlsConfig := &tls.Config{
+		InsecureSkipVerify: c.UnsafeSsl,
+	}
+
+	tr := &http.Transport{
+		TLSClientConfig: tlsConfig,
+	}
+
+	client := Client{
+		url:        c.URL,
+		username:   c.Username,
+		password:   c.Password,
+		httpClient: &http.Client{Timeout: c.Timeout, Transport: tr},
+		userAgent:  c.UserAgent,
+		precision:  c.Precision,
+	}
+	if client.userAgent == "" {
+		client.userAgent = "InfluxDBClient"
+	}
+	return &client, nil
+}
+
+// SetAuth will update the username and passwords
+func (c *Client) SetAuth(u, p string) {
+	c.username = u
+	c.password = p
+}
+
+// SetPrecision will update the precision
+func (c *Client) SetPrecision(precision string) {
+	c.precision = precision
+}
+
+// Query sends a command to the server and returns the Response
+func (c *Client) Query(q Query) (*Response, error) {
+	u := c.url
+
+	u.Path = "query"
+	values := u.Query()
+	values.Set("q", q.Command)
+	values.Set("db", q.Database)
+	if q.Chunked {
+		values.Set("chunked", "true")
+		if q.ChunkSize > 0 {
+			values.Set("chunk_size", strconv.Itoa(q.ChunkSize))
+		}
+	}
+	if c.precision != "" {
+		values.Set("epoch", c.precision)
+	}
+	u.RawQuery = values.Encode()
+
+	req, err := http.NewRequest("POST", u.String(), nil)
+	if err != nil {
+		return nil, err
+	}
+	req.Header.Set("User-Agent", c.userAgent)
+	if c.username != "" {
+		req.SetBasicAuth(c.username, c.password)
+	}
+
+	resp, err := c.httpClient.Do(req)
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+
+	var response Response
+	if q.Chunked {
+		cr := NewChunkedResponse(resp.Body)
+		for {
+			r, err := cr.NextResponse()
+			if err != nil {
+				// If we got an error while decoding the response, send that back.
+				return nil, err
+			}
+
+			if r == nil {
+				break
+			}
+
+			response.Results = append(response.Results, r.Results...)
+			if r.Err != nil {
+				response.Err = r.Err
+				break
+			}
+		}
+	} else {
+		dec := json.NewDecoder(resp.Body)
+		dec.UseNumber()
+		if err := dec.Decode(&response); err != nil {
+			// Ignore EOF errors if we got an invalid status code.
+			if !(err == io.EOF && resp.StatusCode != http.StatusOK) {
+				return nil, err
+			}
+		}
+	}
+
+	// If we don't have an error in our json response, and didn't get StatusOK,
+	// then send back an error.
+	if resp.StatusCode != http.StatusOK && response.Error() == nil {
+		return &response, fmt.Errorf("received status code %d from server", resp.StatusCode)
+	}
+	return &response, nil
+}
+
+// Write takes BatchPoints and allows for writing of multiple points with defaults
+// If successful, error is nil and Response is nil
+// If an error occurs, Response may contain additional information if populated.
+func (c *Client) Write(bp BatchPoints) (*Response, error) {
+	u := c.url
+	u.Path = "write"
+
+	var b bytes.Buffer
+	for _, p := range bp.Points {
+		err := checkPointTypes(p)
+		if err != nil {
+			return nil, err
+		}
+		if p.Raw != "" {
+			if _, err := b.WriteString(p.Raw); err != nil {
+				return nil, err
+			}
+		} else {
+			for k, v := range bp.Tags {
+				if p.Tags == nil {
+					p.Tags = make(map[string]string, len(bp.Tags))
+				}
+				p.Tags[k] = v
+			}
+
+			if _, err := b.WriteString(p.MarshalString()); err != nil {
+				return nil, err
+			}
+		}
+
+		if err := b.WriteByte('\n'); err != nil {
+			return nil, err
+		}
+	}
+
+	req, err := http.NewRequest("POST", u.String(), &b)
+	if err != nil {
+		return nil, err
+	}
+	req.Header.Set("Content-Type", "")
+	req.Header.Set("User-Agent", c.userAgent)
+	if c.username != "" {
+		req.SetBasicAuth(c.username, c.password)
+	}
+
+	precision := bp.Precision
+	if precision == "" {
+		precision = c.precision
+	}
+
+	params := req.URL.Query()
+	params.Set("db", bp.Database)
+	params.Set("rp", bp.RetentionPolicy)
+	params.Set("precision", precision)
+	params.Set("consistency", bp.WriteConsistency)
+	req.URL.RawQuery = params.Encode()
+
+	resp, err := c.httpClient.Do(req)
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+
+	var response Response
+	body, err := ioutil.ReadAll(resp.Body)
+	if err != nil {
+		return nil, err
+	}
+
+	if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK {
+		var err = fmt.Errorf(string(body))
+		response.Err = err
+		return &response, err
+	}
+
+	return nil, nil
+}
+
+// WriteLineProtocol takes a string with line returns to delimit each write
+// If successful, error is nil and Response is nil
+// If an error occurs, Response may contain additional information if populated.
+func (c *Client) WriteLineProtocol(data, database, retentionPolicy, precision, writeConsistency string) (*Response, error) {
+	u := c.url
+	u.Path = "write"
+
+	r := strings.NewReader(data)
+
+	req, err := http.NewRequest("POST", u.String(), r)
+	if err != nil {
+		return nil, err
+	}
+	req.Header.Set("Content-Type", "")
+	req.Header.Set("User-Agent", c.userAgent)
+	if c.username != "" {
+		req.SetBasicAuth(c.username, c.password)
+	}
+	params := req.URL.Query()
+	params.Set("db", database)
+	params.Set("rp", retentionPolicy)
+	params.Set("precision", precision)
+	params.Set("consistency", writeConsistency)
+	req.URL.RawQuery = params.Encode()
+
+	resp, err := c.httpClient.Do(req)
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+
+	var response Response
+	body, err := ioutil.ReadAll(resp.Body)
+	if err != nil {
+		return nil, err
+	}
+
+	if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK {
+		err := fmt.Errorf(string(body))
+		response.Err = err
+		return &response, err
+	}
+
+	return nil, nil
+}
+
+// Ping will check to see if the server is up
+// Ping returns how long the request took, the version of the server it connected to, and an error if one occurred.
+func (c *Client) Ping() (time.Duration, string, error) {
+	now := time.Now()
+	u := c.url
+	u.Path = "ping"
+
+	req, err := http.NewRequest("GET", u.String(), nil)
+	if err != nil {
+		return 0, "", err
+	}
+	req.Header.Set("User-Agent", c.userAgent)
+	if c.username != "" {
+		req.SetBasicAuth(c.username, c.password)
+	}
+
+	resp, err := c.httpClient.Do(req)
+	if err != nil {
+		return 0, "", err
+	}
+	defer resp.Body.Close()
+
+	version := resp.Header.Get("X-Influxdb-Version")
+	return time.Since(now), version, nil
+}
+
+// Structs
+
+// Message represents a user message.
+type Message struct {
+	Level string `json:"level,omitempty"`
+	Text  string `json:"text,omitempty"`
+}
+
+// Result represents a resultset returned from a single statement.
+type Result struct {
+	Series   []models.Row
+	Messages []*Message
+	Err      error
+}
+
+// MarshalJSON encodes the result into JSON.
+func (r *Result) MarshalJSON() ([]byte, error) {
+	// Define a struct that outputs "error" as a string.
+	var o struct {
+		Series   []models.Row `json:"series,omitempty"`
+		Messages []*Message   `json:"messages,omitempty"`
+		Err      string       `json:"error,omitempty"`
+	}
+
+	// Copy fields to output struct.
+	o.Series = r.Series
+	o.Messages = r.Messages
+	if r.Err != nil {
+		o.Err = r.Err.Error()
+	}
+
+	return json.Marshal(&o)
+}
+
+// UnmarshalJSON decodes the data into the Result struct
+func (r *Result) UnmarshalJSON(b []byte) error {
+	var o struct {
+		Series   []models.Row `json:"series,omitempty"`
+		Messages []*Message   `json:"messages,omitempty"`
+		Err      string       `json:"error,omitempty"`
+	}
+
+	dec := json.NewDecoder(bytes.NewBuffer(b))
+	dec.UseNumber()
+	err := dec.Decode(&o)
+	if err != nil {
+		return err
+	}
+	r.Series = o.Series
+	r.Messages = o.Messages
+	if o.Err != "" {
+		r.Err = errors.New(o.Err)
+	}
+	return nil
+}
+
+// Response represents a list of statement results.
+type Response struct {
+	Results []Result
+	Err     error
+}
+
+// MarshalJSON encodes the response into JSON.
+func (r *Response) MarshalJSON() ([]byte, error) {
+	// Define a struct that outputs "error" as a string.
+	var o struct {
+		Results []Result `json:"results,omitempty"`
+		Err     string   `json:"error,omitempty"`
+	}
+
+	// Copy fields to output struct.
+	o.Results = r.Results
+	if r.Err != nil {
+		o.Err = r.Err.Error()
+	}
+
+	return json.Marshal(&o)
+}
+
+// UnmarshalJSON decodes the data into the Response struct
+func (r *Response) UnmarshalJSON(b []byte) error {
+	var o struct {
+		Results []Result `json:"results,omitempty"`
+		Err     string   `json:"error,omitempty"`
+	}
+
+	dec := json.NewDecoder(bytes.NewBuffer(b))
+	dec.UseNumber()
+	err := dec.Decode(&o)
+	if err != nil {
+		return err
+	}
+	r.Results = o.Results
+	if o.Err != "" {
+		r.Err = errors.New(o.Err)
+	}
+	return nil
+}
+
+// Error returns the first error from any statement.
+// Returns nil if no errors occurred on any statements.
+func (r *Response) Error() error {
+	if r.Err != nil {
+		return r.Err
+	}
+	for _, result := range r.Results {
+		if result.Err != nil {
+			return result.Err
+		}
+	}
+	return nil
+}
+
+// ChunkedResponse represents a response from the server that
+// uses chunking to stream the output.
+type ChunkedResponse struct {
+	dec *json.Decoder
+}
+
+// NewChunkedResponse reads a stream and produces responses from the stream.
+func NewChunkedResponse(r io.Reader) *ChunkedResponse {
+	dec := json.NewDecoder(r)
+	dec.UseNumber()
+	return &ChunkedResponse{dec: dec}
+}
+
+// NextResponse reads the next line of the stream and returns a response.
+func (r *ChunkedResponse) NextResponse() (*Response, error) {
+	var response Response
+	if err := r.dec.Decode(&response); err != nil {
+		if err == io.EOF {
+			return nil, nil
+		}
+		return nil, err
+	}
+	return &response, nil
+}
+
+// Point defines the fields that will be written to the database
+// Measurement, Time, and Fields are required
+// Precision can be specified if the time is in epoch format (integer).
+// Valid values for Precision are n, u, ms, s, m, and h
+type Point struct {
+	Measurement string
+	Tags        map[string]string
+	Time        time.Time
+	Fields      map[string]interface{}
+	Precision   string
+	Raw         string
+}
+
+// MarshalJSON will format the time in RFC3339Nano
+// Precision is also ignored as it is only used for writing, not reading
+// Or another way to say it is we always send back in nanosecond precision
+func (p *Point) MarshalJSON() ([]byte, error) {
+	point := struct {
+		Measurement string                 `json:"measurement,omitempty"`
+		Tags        map[string]string      `json:"tags,omitempty"`
+		Time        string                 `json:"time,omitempty"`
+		Fields      map[string]interface{} `json:"fields,omitempty"`
+		Precision   string                 `json:"precision,omitempty"`
+	}{
+		Measurement: p.Measurement,
+		Tags:        p.Tags,
+		Fields:      p.Fields,
+		Precision:   p.Precision,
+	}
+	// Let it omit empty if it's really zero
+	if !p.Time.IsZero() {
+		point.Time = p.Time.UTC().Format(time.RFC3339Nano)
+	}
+	return json.Marshal(&point)
+}
+
+// MarshalString renders string representation of a Point with specified
+// precision. The default precision is nanoseconds.
+func (p *Point) MarshalString() string {
+	pt, err := models.NewPoint(p.Measurement, p.Tags, p.Fields, p.Time)
+	if err != nil {
+		return "# ERROR: " + err.Error() + " " + p.Measurement
+	}
+	if p.Precision == "" || p.Precision == "ns" || p.Precision == "n" {
+		return pt.String()
+	}
+	return pt.PrecisionString(p.Precision)
+}
+
+// UnmarshalJSON decodes the data into the Point struct
+func (p *Point) UnmarshalJSON(b []byte) error {
+	var normal struct {
+		Measurement string                 `json:"measurement"`
+		Tags        map[string]string      `json:"tags"`
+		Time        time.Time              `json:"time"`
+		Precision   string                 `json:"precision"`
+		Fields      map[string]interface{} `json:"fields"`
+	}
+	var epoch struct {
+		Measurement string                 `json:"measurement"`
+		Tags        map[string]string      `json:"tags"`
+		Time        *int64                 `json:"time"`
+		Precision   string                 `json:"precision"`
+		Fields      map[string]interface{} `json:"fields"`
+	}
+
+	if err := func() error {
+		var err error
+		dec := json.NewDecoder(bytes.NewBuffer(b))
+		dec.UseNumber()
+		if err = dec.Decode(&epoch); err != nil {
+			return err
+		}
+		// Convert from epoch to time.Time, but only if Time
+		// was actually set.
+		var ts time.Time
+		if epoch.Time != nil {
+			ts, err = EpochToTime(*epoch.Time, epoch.Precision)
+			if err != nil {
+				return err
+			}
+		}
+		p.Measurement = epoch.Measurement
+		p.Tags = epoch.Tags
+		p.Time = ts
+		p.Precision = epoch.Precision
+		p.Fields = normalizeFields(epoch.Fields)
+		return nil
+	}(); err == nil {
+		return nil
+	}
+
+	dec := json.NewDecoder(bytes.NewBuffer(b))
+	dec.UseNumber()
+	if err := dec.Decode(&normal); err != nil {
+		return err
+	}
+	normal.Time = SetPrecision(normal.Time, normal.Precision)
+	p.Measurement = normal.Measurement
+	p.Tags = normal.Tags
+	p.Time = normal.Time
+	p.Precision = normal.Precision
+	p.Fields = normalizeFields(normal.Fields)
+
+	return nil
+}
+
+// Remove any notion of json.Number
+func normalizeFields(fields map[string]interface{}) map[string]interface{} {
+	newFields := map[string]interface{}{}
+
+	for k, v := range fields {
+		switch v := v.(type) {
+		case json.Number:
+			jv, e := v.Float64()
+			if e != nil {
+				panic(fmt.Sprintf("unable to convert json.Number to float64: %s", e))
+			}
+			newFields[k] = jv
+		default:
+			newFields[k] = v
+		}
+	}
+	return newFields
+}
+
+// BatchPoints is used to send batched data in a single write.
+// Database and Points are required
+// If no retention policy is specified, it will use the databases default retention policy.
+// If tags are specified, they will be "merged" with all points. If a point already has that tag, it will be ignored.
+// If time is specified, it will be applied to any point with an empty time.
+// Precision can be specified if the time is in epoch format (integer).
+// Valid values for Precision are n, u, ms, s, m, and h
+type BatchPoints struct {
+	Points           []Point           `json:"points,omitempty"`
+	Database         string            `json:"database,omitempty"`
+	RetentionPolicy  string            `json:"retentionPolicy,omitempty"`
+	Tags             map[string]string `json:"tags,omitempty"`
+	Time             time.Time         `json:"time,omitempty"`
+	Precision        string            `json:"precision,omitempty"`
+	WriteConsistency string            `json:"-"`
+}
+
+// UnmarshalJSON decodes the data into the BatchPoints struct
+func (bp *BatchPoints) UnmarshalJSON(b []byte) error {
+	var normal struct {
+		Points          []Point           `json:"points"`
+		Database        string            `json:"database"`
+		RetentionPolicy string            `json:"retentionPolicy"`
+		Tags            map[string]string `json:"tags"`
+		Time            time.Time         `json:"time"`
+		Precision       string            `json:"precision"`
+	}
+	var epoch struct {
+		Points          []Point           `json:"points"`
+		Database        string            `json:"database"`
+		RetentionPolicy string            `json:"retentionPolicy"`
+		Tags            map[string]string `json:"tags"`
+		Time            *int64            `json:"time"`
+		Precision       string            `json:"precision"`
+	}
+
+	if err := func() error {
+		var err error
+		if err = json.Unmarshal(b, &epoch); err != nil {
+			return err
+		}
+		// Convert from epoch to time.Time
+		var ts time.Time
+		if epoch.Time != nil {
+			ts, err = EpochToTime(*epoch.Time, epoch.Precision)
+			if err != nil {
+				return err
+			}
+		}
+		bp.Points = epoch.Points
+		bp.Database = epoch.Database
+		bp.RetentionPolicy = epoch.RetentionPolicy
+		bp.Tags = epoch.Tags
+		bp.Time = ts
+		bp.Precision = epoch.Precision
+		return nil
+	}(); err == nil {
+		return nil
+	}
+
+	if err := json.Unmarshal(b, &normal); err != nil {
+		return err
+	}
+	normal.Time = SetPrecision(normal.Time, normal.Precision)
+	bp.Points = normal.Points
+	bp.Database = normal.Database
+	bp.RetentionPolicy = normal.RetentionPolicy
+	bp.Tags = normal.Tags
+	bp.Time = normal.Time
+	bp.Precision = normal.Precision
+
+	return nil
+}
+
+// utility functions
+
+// Addr provides the current url as a string of the server the client is connected to.
+func (c *Client) Addr() string {
+	return c.url.String()
+}
+
+// checkPointTypes ensures no unsupported types are submitted to influxdb, returning error if they are found.
+func checkPointTypes(p Point) error {
+	for _, v := range p.Fields {
+		switch v.(type) {
+		case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, float32, float64, bool, string, nil:
+			return nil
+		default:
+			return fmt.Errorf("unsupported point type: %T", v)
+		}
+	}
+	return nil
+}
+
+// helper functions
+
+// EpochToTime takes a unix epoch time and uses precision to return back a time.Time
+func EpochToTime(epoch int64, precision string) (time.Time, error) {
+	if precision == "" {
+		precision = "s"
+	}
+	var t time.Time
+	switch precision {
+	case "h":
+		t = time.Unix(0, epoch*int64(time.Hour))
+	case "m":
+		t = time.Unix(0, epoch*int64(time.Minute))
+	case "s":
+		t = time.Unix(0, epoch*int64(time.Second))
+	case "ms":
+		t = time.Unix(0, epoch*int64(time.Millisecond))
+	case "u":
+		t = time.Unix(0, epoch*int64(time.Microsecond))
+	case "n":
+		t = time.Unix(0, epoch)
+	default:
+		return time.Time{}, fmt.Errorf("Unknown precision %q", precision)
+	}
+	return t, nil
+}
+
+// SetPrecision will round a time to the specified precision
+func SetPrecision(t time.Time, precision string) time.Time {
+	switch precision {
+	case "n":
+	case "u":
+		return t.Round(time.Microsecond)
+	case "ms":
+		return t.Round(time.Millisecond)
+	case "s":
+		return t.Round(time.Second)
+	case "m":
+		return t.Round(time.Minute)
+	case "h":
+		return t.Round(time.Hour)
+	}
+	return t
+}

+ 46 - 0
Godeps/_workspace/src/github.com/influxdata/influxdb/models/consistency.go

@@ -0,0 +1,46 @@
+package models
+
+import (
+	"errors"
+	"strings"
+)
+
+// ConsistencyLevel represent a required replication criteria before a write can
+// be returned as successful
+type ConsistencyLevel int
+
+const (
+	// ConsistencyLevelAny allows for hinted hand off, potentially no write happened yet
+	ConsistencyLevelAny ConsistencyLevel = iota
+
+	// ConsistencyLevelOne requires at least one data node acknowledged a write
+	ConsistencyLevelOne
+
+	// ConsistencyLevelQuorum requires a quorum of data nodes to acknowledge a write
+	ConsistencyLevelQuorum
+
+	// ConsistencyLevelAll requires all data nodes to acknowledge a write
+	ConsistencyLevelAll
+)
+
+var (
+	// ErrInvalidConsistencyLevel is returned when parsing the string version
+	// of a consistency level.
+	ErrInvalidConsistencyLevel = errors.New("invalid consistency level")
+)
+
+// ParseConsistencyLevel converts a consistency level string to the corresponding ConsistencyLevel const
+func ParseConsistencyLevel(level string) (ConsistencyLevel, error) {
+	switch strings.ToLower(level) {
+	case "any":
+		return ConsistencyLevelAny, nil
+	case "one":
+		return ConsistencyLevelOne, nil
+	case "quorum":
+		return ConsistencyLevelQuorum, nil
+	case "all":
+		return ConsistencyLevelAll, nil
+	default:
+		return 0, ErrInvalidConsistencyLevel
+	}
+}

+ 1576 - 0
Godeps/_workspace/src/github.com/influxdata/influxdb/models/points.go

@@ -0,0 +1,1576 @@
+package models
+
+import (
+	"bytes"
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"hash/fnv"
+	"math"
+	"sort"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/influxdata/influxdb/pkg/escape"
+)
+
+var (
+	measurementEscapeCodes = map[byte][]byte{
+		',': []byte(`\,`),
+		' ': []byte(`\ `),
+	}
+
+	tagEscapeCodes = map[byte][]byte{
+		',': []byte(`\,`),
+		' ': []byte(`\ `),
+		'=': []byte(`\=`),
+	}
+
+	ErrPointMustHaveAField  = errors.New("point without fields is unsupported")
+	ErrInvalidNumber        = errors.New("invalid number")
+	ErrMaxKeyLengthExceeded = errors.New("max key length exceeded")
+)
+
+const (
+	MaxKeyLength = 65535
+)
+
+// Point defines the values that will be written to the database
+type Point interface {
+	Name() string
+	SetName(string)
+
+	Tags() Tags
+	AddTag(key, value string)
+	SetTags(tags Tags)
+
+	Fields() Fields
+
+	Time() time.Time
+	SetTime(t time.Time)
+	UnixNano() int64
+
+	HashID() uint64
+	Key() []byte
+
+	Data() []byte
+	SetData(buf []byte)
+
+	// String returns a string representation of the point, if there is a
+	// timestamp associated with the point then it will be specified with the default
+	// precision of nanoseconds
+	String() string
+
+	// Bytes returns a []byte representation of the point similar to string.
+	MarshalBinary() ([]byte, error)
+
+	// PrecisionString returns a string representation of the point, if there
+	// is a timestamp associated with the point then it will be specified in the
+	// given unit
+	PrecisionString(precision string) string
+
+	// RoundedString returns a string representation of the point, if there
+	// is a timestamp associated with the point, then it will be rounded to the
+	// given duration
+	RoundedString(d time.Duration) string
+}
+
+// Points represents a sortable list of points by timestamp.
+type Points []Point
+
+func (a Points) Len() int           { return len(a) }
+func (a Points) Less(i, j int) bool { return a[i].Time().Before(a[j].Time()) }
+func (a Points) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
+
+// point is the default implementation of Point.
+type point struct {
+	time time.Time
+
+	// text encoding of measurement and tags
+	// key must always be stored sorted by tags, if the original line was not sorted,
+	// we need to resort it
+	key []byte
+
+	// text encoding of field data
+	fields []byte
+
+	// text encoding of timestamp
+	ts []byte
+
+	// binary encoded field data
+	data []byte
+
+	// cached version of parsed fields from data
+	cachedFields map[string]interface{}
+
+	// cached version of parsed name from key
+	cachedName string
+}
+
+const (
+	// the number of characters for the largest possible int64 (9223372036854775807)
+	maxInt64Digits = 19
+
+	// the number of characters for the smallest possible int64 (-9223372036854775808)
+	minInt64Digits = 20
+
+	// the number of characters required for the largest float64 before a range check
+	// would occur during parsing
+	maxFloat64Digits = 25
+
+	// the number of characters required for smallest float64 before a range check occur
+	// would occur during parsing
+	minFloat64Digits = 27
+)
+
+// ParsePoints returns a slice of Points from a text representation of a point
+// with each point separated by newlines.  If any points fail to parse, a non-nil error
+// will be returned in addition to the points that parsed successfully.
+func ParsePoints(buf []byte) ([]Point, error) {
+	return ParsePointsWithPrecision(buf, time.Now().UTC(), "n")
+}
+
+// ParsePointsString is identical to ParsePoints but accepts a string
+// buffer.
+func ParsePointsString(buf string) ([]Point, error) {
+	return ParsePoints([]byte(buf))
+}
+
+// ParseKey returns the measurement name and tags from a point.
+func ParseKey(buf string) (string, Tags, error) {
+	// Ignore the error because scanMeasurement returns "missing fields" which we ignore
+	// when just parsing a key
+	state, i, _ := scanMeasurement([]byte(buf), 0)
+
+	var tags Tags
+	if state == tagKeyState {
+		tags = parseTags([]byte(buf))
+		// scanMeasurement returns the location of the comma if there are tags, strip that off
+		return string(buf[:i-1]), tags, nil
+	}
+	return string(buf[:i]), tags, nil
+}
+
+// ParsePointsWithPrecision is similar to ParsePoints, but allows the
+// caller to provide a precision for time.
+func ParsePointsWithPrecision(buf []byte, defaultTime time.Time, precision string) ([]Point, error) {
+	points := []Point{}
+	var (
+		pos    int
+		block  []byte
+		failed []string
+	)
+	for {
+		pos, block = scanLine(buf, pos)
+		pos++
+
+		if len(block) == 0 {
+			break
+		}
+
+		// lines which start with '#' are comments
+		start := skipWhitespace(block, 0)
+
+		// If line is all whitespace, just skip it
+		if start >= len(block) {
+			continue
+		}
+
+		if block[start] == '#' {
+			continue
+		}
+
+		// strip the newline if one is present
+		if block[len(block)-1] == '\n' {
+			block = block[:len(block)-1]
+		}
+
+		pt, err := parsePoint(block[start:len(block)], defaultTime, precision)
+		if err != nil {
+			failed = append(failed, fmt.Sprintf("unable to parse '%s': %v", string(block[start:len(block)]), err))
+		} else {
+			points = append(points, pt)
+		}
+
+		if pos >= len(buf) {
+			break
+		}
+
+	}
+	if len(failed) > 0 {
+		return points, fmt.Errorf("%s", strings.Join(failed, "\n"))
+	}
+	return points, nil
+
+}
+
+func parsePoint(buf []byte, defaultTime time.Time, precision string) (Point, error) {
+	// scan the first block which is measurement[,tag1=value1,tag2=value=2...]
+	pos, key, err := scanKey(buf, 0)
+	if err != nil {
+		return nil, err
+	}
+
+	// measurement name is required
+	if len(key) == 0 {
+		return nil, fmt.Errorf("missing measurement")
+	}
+
+	if len(key) > MaxKeyLength {
+		return nil, fmt.Errorf("max key length exceeded: %v > %v", len(key), MaxKeyLength)
+	}
+
+	// scan the second block is which is field1=value1[,field2=value2,...]
+	pos, fields, err := scanFields(buf, pos)
+	if err != nil {
+		return nil, err
+	}
+
+	// at least one field is required
+	if len(fields) == 0 {
+		return nil, fmt.Errorf("missing fields")
+	}
+
+	// scan the last block which is an optional integer timestamp
+	pos, ts, err := scanTime(buf, pos)
+
+	if err != nil {
+		return nil, err
+	}
+
+	pt := &point{
+		key:    key,
+		fields: fields,
+		ts:     ts,
+	}
+
+	if len(ts) == 0 {
+		pt.time = defaultTime
+		pt.SetPrecision(precision)
+	} else {
+		ts, err := strconv.ParseInt(string(ts), 10, 64)
+		if err != nil {
+			return nil, err
+		}
+		pt.time, err = SafeCalcTime(ts, precision)
+		if err != nil {
+			return nil, err
+		}
+	}
+	return pt, nil
+}
+
+// GetPrecisionMultiplier will return a multiplier for the precision specified
+func GetPrecisionMultiplier(precision string) int64 {
+	d := time.Nanosecond
+	switch precision {
+	case "u":
+		d = time.Microsecond
+	case "ms":
+		d = time.Millisecond
+	case "s":
+		d = time.Second
+	case "m":
+		d = time.Minute
+	case "h":
+		d = time.Hour
+	}
+	return int64(d)
+}
+
+// scanKey scans buf starting at i for the measurement and tag portion of the point.
+// It returns the ending position and the byte slice of key within buf.  If there
+// are tags, they will be sorted if they are not already.
+func scanKey(buf []byte, i int) (int, []byte, error) {
+	start := skipWhitespace(buf, i)
+
+	i = start
+
+	// Determines whether the tags are sort, assume they are
+	sorted := true
+
+	// indices holds the indexes within buf of the start of each tag.  For example,
+	// a buf of 'cpu,host=a,region=b,zone=c' would have indices slice of [4,11,20]
+	// which indicates that the first tag starts at buf[4], seconds at buf[11], and
+	// last at buf[20]
+	indices := make([]int, 100)
+
+	// tracks how many commas we've seen so we know how many values are indices.
+	// Since indices is an arbitrarily large slice,
+	// we need to know how many values in the buffer are in use.
+	commas := 0
+
+	// First scan the Point's measurement.
+	state, i, err := scanMeasurement(buf, i)
+	if err != nil {
+		return i, buf[start:i], err
+	}
+
+	// Optionally scan tags if needed.
+	if state == tagKeyState {
+		i, commas, indices, err = scanTags(buf, i, indices)
+		if err != nil {
+			return i, buf[start:i], err
+		}
+	}
+
+	// Now we know where the key region is within buf, and the locations of tags, we
+	// need to determine if duplicate tags exist and if the tags are sorted.  This iterates
+	// 1/2 of the list comparing each end with each other, walking towards the center from
+	// both sides.
+	for j := 0; j < commas/2; j++ {
+		// get the left and right tags
+		_, left := scanTo(buf[indices[j]:indices[j+1]-1], 0, '=')
+		_, right := scanTo(buf[indices[commas-j-1]:indices[commas-j]-1], 0, '=')
+
+		// If the tags are equal, then there are duplicate tags, and we should abort
+		if bytes.Equal(left, right) {
+			return i, buf[start:i], fmt.Errorf("duplicate tags")
+		}
+
+		// If left is greater than right, the tags are not sorted.  We must continue
+		// since their could be duplicate tags still.
+		if bytes.Compare(left, right) > 0 {
+			sorted = false
+		}
+	}
+
+	// If the tags are not sorted, then sort them.  This sort is inline and
+	// uses the tag indices we created earlier.  The actual buffer is not sorted, the
+	// indices are using the buffer for value comparison.  After the indices are sorted,
+	// the buffer is reconstructed from the sorted indices.
+	if !sorted && commas > 0 {
+		// Get the measurement name for later
+		measurement := buf[start : indices[0]-1]
+
+		// Sort the indices
+		indices := indices[:commas]
+		insertionSort(0, commas, buf, indices)
+
+		// Create a new key using the measurement and sorted indices
+		b := make([]byte, len(buf[start:i]))
+		pos := copy(b, measurement)
+		for _, i := range indices {
+			b[pos] = ','
+			pos++
+			_, v := scanToSpaceOr(buf, i, ',')
+			pos += copy(b[pos:], v)
+		}
+
+		return i, b, nil
+	}
+
+	return i, buf[start:i], nil
+}
+
+// The following constants allow us to specify which state to move to
+// next, when scanning sections of a Point.
+const (
+	tagKeyState = iota
+	tagValueState
+	fieldsState
+)
+
+// scanMeasurement examines the measurement part of a Point, returning
+// the next state to move to, and the current location in the buffer.
+func scanMeasurement(buf []byte, i int) (int, int, error) {
+	// Check first byte of measurement, anything except a comma is fine.
+	// It can't be a space, since whitespace is stripped prior to this
+	// function call.
+	if buf[i] == ',' {
+		return -1, i, fmt.Errorf("missing measurement")
+	}
+
+	for {
+		i++
+		if i >= len(buf) {
+			// cpu
+			return -1, i, fmt.Errorf("missing fields")
+		}
+
+		if buf[i-1] == '\\' {
+			// Skip character (it's escaped).
+			continue
+		}
+
+		// Unescaped comma; move onto scanning the tags.
+		if buf[i] == ',' {
+			return tagKeyState, i + 1, nil
+		}
+
+		// Unescaped space; move onto scanning the fields.
+		if buf[i] == ' ' {
+			// cpu value=1.0
+			return fieldsState, i, nil
+		}
+	}
+}
+
+// scanTags examines all the tags in a Point, keeping track of and
+// returning the updated indices slice, number of commas and location
+// in buf where to start examining the Point fields.
+func scanTags(buf []byte, i int, indices []int) (int, int, []int, error) {
+	var (
+		err    error
+		commas int
+		state  = tagKeyState
+	)
+
+	for {
+		switch state {
+		case tagKeyState:
+			// Grow our indices slice if we have too many tags.
+			if commas >= len(indices) {
+				newIndics := make([]int, cap(indices)*2)
+				copy(newIndics, indices)
+				indices = newIndics
+			}
+			indices[commas] = i
+			commas++
+
+			i, err = scanTagsKey(buf, i)
+			state = tagValueState // tag value always follows a tag key
+		case tagValueState:
+			state, i, err = scanTagsValue(buf, i)
+		case fieldsState:
+			indices[commas] = i + 1
+			return i, commas, indices, nil
+		}
+
+		if err != nil {
+			return i, commas, indices, err
+		}
+	}
+}
+
+// scanTagsKey scans each character in a tag key.
+func scanTagsKey(buf []byte, i int) (int, error) {
+	// First character of the key.
+	if i >= len(buf) || buf[i] == ' ' || buf[i] == ',' || buf[i] == '=' {
+		// cpu,{'', ' ', ',', '='}
+		return i, fmt.Errorf("missing tag key")
+	}
+
+	// Examine each character in the tag key until we hit an unescaped
+	// equals (the tag value), or we hit an error (i.e., unescaped
+	// space or comma).
+	for {
+		i++
+
+		// Either we reached the end of the buffer or we hit an
+		// unescaped comma or space.
+		if i >= len(buf) ||
+			((buf[i] == ' ' || buf[i] == ',') && buf[i-1] != '\\') {
+			// cpu,tag{'', ' ', ','}
+			return i, fmt.Errorf("missing tag value")
+		}
+
+		if buf[i] == '=' && buf[i-1] != '\\' {
+			// cpu,tag=
+			return i + 1, nil
+		}
+	}
+}
+
+// scanTagsValue scans each character in a tag value.
+func scanTagsValue(buf []byte, i int) (int, int, error) {
+	// Tag value cannot be empty.
+	if i >= len(buf) || buf[i] == ',' || buf[i] == ' ' {
+		// cpu,tag={',', ' '}
+		return -1, i, fmt.Errorf("missing tag value")
+	}
+
+	// Examine each character in the tag value until we hit an unescaped
+	// comma (move onto next tag key), an unescaped space (move onto
+	// fields), or we error out.
+	for {
+		i++
+		if i >= len(buf) {
+			// cpu,tag=value
+			return -1, i, fmt.Errorf("missing fields")
+		}
+
+		// An unescaped equals sign is an invalid tag value.
+		if buf[i] == '=' && buf[i-1] != '\\' {
+			// cpu,tag={'=', 'fo=o'}
+			return -1, i, fmt.Errorf("invalid tag format")
+		}
+
+		if buf[i] == ',' && buf[i-1] != '\\' {
+			// cpu,tag=foo,
+			return tagKeyState, i + 1, nil
+		}
+
+		// cpu,tag=foo value=1.0
+		// cpu, tag=foo\= value=1.0
+		if buf[i] == ' ' && buf[i-1] != '\\' {
+			return fieldsState, i, nil
+		}
+	}
+}
+
+func insertionSort(l, r int, buf []byte, indices []int) {
+	for i := l + 1; i < r; i++ {
+		for j := i; j > l && less(buf, indices, j, j-1); j-- {
+			indices[j], indices[j-1] = indices[j-1], indices[j]
+		}
+	}
+}
+
+func less(buf []byte, indices []int, i, j int) bool {
+	// This grabs the tag names for i & j, it ignores the values
+	_, a := scanTo(buf, indices[i], '=')
+	_, b := scanTo(buf, indices[j], '=')
+	return bytes.Compare(a, b) < 0
+}
+
+func isFieldEscapeChar(b byte) bool {
+	for c := range escape.Codes {
+		if c == b {
+			return true
+		}
+	}
+	return false
+}
+
+// scanFields scans buf, starting at i for the fields section of a point.  It returns
+// the ending position and the byte slice of the fields within buf
+func scanFields(buf []byte, i int) (int, []byte, error) {
+	start := skipWhitespace(buf, i)
+	i = start
+	quoted := false
+
+	// tracks how many '=' we've seen
+	equals := 0
+
+	// tracks how many commas we've seen
+	commas := 0
+
+	for {
+		// reached the end of buf?
+		if i >= len(buf) {
+			break
+		}
+
+		// escaped characters?
+		if buf[i] == '\\' && i+1 < len(buf) {
+			i += 2
+			continue
+		}
+
+		// If the value is quoted, scan until we get to the end quote
+		// Only quote values in the field value since quotes are not significant
+		// in the field key
+		if buf[i] == '"' && equals > commas {
+			quoted = !quoted
+			i++
+			continue
+		}
+
+		// If we see an =, ensure that there is at least on char before and after it
+		if buf[i] == '=' && !quoted {
+			equals++
+
+			// check for "... =123" but allow "a\ =123"
+			if buf[i-1] == ' ' && buf[i-2] != '\\' {
+				return i, buf[start:i], fmt.Errorf("missing field key")
+			}
+
+			// check for "...a=123,=456" but allow "a=123,a\,=456"
+			if buf[i-1] == ',' && buf[i-2] != '\\' {
+				return i, buf[start:i], fmt.Errorf("missing field key")
+			}
+
+			// check for "... value="
+			if i+1 >= len(buf) {
+				return i, buf[start:i], fmt.Errorf("missing field value")
+			}
+
+			// check for "... value=,value2=..."
+			if buf[i+1] == ',' || buf[i+1] == ' ' {
+				return i, buf[start:i], fmt.Errorf("missing field value")
+			}
+
+			if isNumeric(buf[i+1]) || buf[i+1] == '-' || buf[i+1] == 'N' || buf[i+1] == 'n' {
+				var err error
+				i, err = scanNumber(buf, i+1)
+				if err != nil {
+					return i, buf[start:i], err
+				}
+				continue
+			}
+			// If next byte is not a double-quote, the value must be a boolean
+			if buf[i+1] != '"' {
+				var err error
+				i, _, err = scanBoolean(buf, i+1)
+				if err != nil {
+					return i, buf[start:i], err
+				}
+				continue
+			}
+		}
+
+		if buf[i] == ',' && !quoted {
+			commas++
+		}
+
+		// reached end of block?
+		if buf[i] == ' ' && !quoted {
+			break
+		}
+		i++
+	}
+
+	if quoted {
+		return i, buf[start:i], fmt.Errorf("unbalanced quotes")
+	}
+
+	// check that all field sections had key and values (e.g. prevent "a=1,b"
+	if equals == 0 || commas != equals-1 {
+		return i, buf[start:i], fmt.Errorf("invalid field format")
+	}
+
+	return i, buf[start:i], nil
+}
+
+// scanTime scans buf, starting at i for the time section of a point.  It returns
+// the ending position and the byte slice of the fields within buf and error if the
+// timestamp is not in the correct numeric format
+func scanTime(buf []byte, i int) (int, []byte, error) {
+	start := skipWhitespace(buf, i)
+	i = start
+	for {
+		// reached the end of buf?
+		if i >= len(buf) {
+			break
+		}
+
+		// Timestamps should be integers, make sure they are so we don't need to actually
+		// parse the timestamp until needed
+		if buf[i] < '0' || buf[i] > '9' {
+			// Handle negative timestamps
+			if i == start && buf[i] == '-' {
+				i++
+				continue
+			}
+			return i, buf[start:i], fmt.Errorf("bad timestamp")
+		}
+
+		// reached end of block?
+		if buf[i] == '\n' {
+			break
+		}
+		i++
+	}
+	return i, buf[start:i], nil
+}
+
+func isNumeric(b byte) bool {
+	return (b >= '0' && b <= '9') || b == '.'
+}
+
+// scanNumber returns the end position within buf, start at i after
+// scanning over buf for an integer, or float.  It returns an
+// error if a invalid number is scanned.
+func scanNumber(buf []byte, i int) (int, error) {
+	start := i
+	var isInt bool
+
+	// Is negative number?
+	if i < len(buf) && buf[i] == '-' {
+		i++
+		// There must be more characters now, as just '-' is illegal.
+		if i == len(buf) {
+			return i, ErrInvalidNumber
+		}
+	}
+
+	// how many decimal points we've see
+	decimal := false
+
+	// indicates the number is float in scientific notation
+	scientific := false
+
+	for {
+		if i >= len(buf) {
+			break
+		}
+
+		if buf[i] == ',' || buf[i] == ' ' {
+			break
+		}
+
+		if buf[i] == 'i' && i > start && !isInt {
+			isInt = true
+			i++
+			continue
+		}
+
+		if buf[i] == '.' {
+			// Can't have more than 1 decimal (e.g. 1.1.1 should fail)
+			if decimal {
+				return i, ErrInvalidNumber
+			}
+			decimal = true
+		}
+
+		// `e` is valid for floats but not as the first char
+		if i > start && (buf[i] == 'e' || buf[i] == 'E') {
+			scientific = true
+			i++
+			continue
+		}
+
+		// + and - are only valid at this point if they follow an e (scientific notation)
+		if (buf[i] == '+' || buf[i] == '-') && (buf[i-1] == 'e' || buf[i-1] == 'E') {
+			i++
+			continue
+		}
+
+		// NaN is an unsupported value
+		if i+2 < len(buf) && (buf[i] == 'N' || buf[i] == 'n') {
+			return i, ErrInvalidNumber
+		}
+
+		if !isNumeric(buf[i]) {
+			return i, ErrInvalidNumber
+		}
+		i++
+	}
+
+	if isInt && (decimal || scientific) {
+		return i, ErrInvalidNumber
+	}
+
+	numericDigits := i - start
+	if isInt {
+		numericDigits--
+	}
+	if decimal {
+		numericDigits--
+	}
+	if buf[start] == '-' {
+		numericDigits--
+	}
+
+	if numericDigits == 0 {
+		return i, ErrInvalidNumber
+	}
+
+	// It's more common that numbers will be within min/max range for their type but we need to prevent
+	// out or range numbers from being parsed successfully.  This uses some simple heuristics to decide
+	// if we should parse the number to the actual type.  It does not do it all the time because it incurs
+	// extra allocations and we end up converting the type again when writing points to disk.
+	if isInt {
+		// Make sure the last char is an 'i' for integers (e.g. 9i10 is not valid)
+		if buf[i-1] != 'i' {
+			return i, ErrInvalidNumber
+		}
+		// Parse the int to check bounds the number of digits could be larger than the max range
+		// We subtract 1 from the index to remove the `i` from our tests
+		if len(buf[start:i-1]) >= maxInt64Digits || len(buf[start:i-1]) >= minInt64Digits {
+			if _, err := strconv.ParseInt(string(buf[start:i-1]), 10, 64); err != nil {
+				return i, fmt.Errorf("unable to parse integer %s: %s", buf[start:i-1], err)
+			}
+		}
+	} else {
+		// Parse the float to check bounds if it's scientific or the number of digits could be larger than the max range
+		if scientific || len(buf[start:i]) >= maxFloat64Digits || len(buf[start:i]) >= minFloat64Digits {
+			if _, err := strconv.ParseFloat(string(buf[start:i]), 10); err != nil {
+				return i, fmt.Errorf("invalid float")
+			}
+		}
+	}
+
+	return i, nil
+}
+
+// scanBoolean returns the end position within buf, start at i after
+// scanning over buf for boolean. Valid values for a boolean are
+// t, T, true, TRUE, f, F, false, FALSE.  It returns an error if a invalid boolean
+// is scanned.
+func scanBoolean(buf []byte, i int) (int, []byte, error) {
+	start := i
+
+	if i < len(buf) && (buf[i] != 't' && buf[i] != 'f' && buf[i] != 'T' && buf[i] != 'F') {
+		return i, buf[start:i], fmt.Errorf("invalid boolean")
+	}
+
+	i++
+	for {
+		if i >= len(buf) {
+			break
+		}
+
+		if buf[i] == ',' || buf[i] == ' ' {
+			break
+		}
+		i++
+	}
+
+	// Single char bool (t, T, f, F) is ok
+	if i-start == 1 {
+		return i, buf[start:i], nil
+	}
+
+	// length must be 4 for true or TRUE
+	if (buf[start] == 't' || buf[start] == 'T') && i-start != 4 {
+		return i, buf[start:i], fmt.Errorf("invalid boolean")
+	}
+
+	// length must be 5 for false or FALSE
+	if (buf[start] == 'f' || buf[start] == 'F') && i-start != 5 {
+		return i, buf[start:i], fmt.Errorf("invalid boolean")
+	}
+
+	// Otherwise
+	valid := false
+	switch buf[start] {
+	case 't':
+		valid = bytes.Equal(buf[start:i], []byte("true"))
+	case 'f':
+		valid = bytes.Equal(buf[start:i], []byte("false"))
+	case 'T':
+		valid = bytes.Equal(buf[start:i], []byte("TRUE")) || bytes.Equal(buf[start:i], []byte("True"))
+	case 'F':
+		valid = bytes.Equal(buf[start:i], []byte("FALSE")) || bytes.Equal(buf[start:i], []byte("False"))
+	}
+
+	if !valid {
+		return i, buf[start:i], fmt.Errorf("invalid boolean")
+	}
+
+	return i, buf[start:i], nil
+
+}
+
+// skipWhitespace returns the end position within buf, starting at i after
+// scanning over spaces in tags
+func skipWhitespace(buf []byte, i int) int {
+	for i < len(buf) {
+		if buf[i] != ' ' && buf[i] != '\t' && buf[i] != 0 {
+			break
+		}
+		i++
+	}
+	return i
+}
+
+// scanLine returns the end position in buf and the next line found within
+// buf.
+func scanLine(buf []byte, i int) (int, []byte) {
+	start := i
+	quoted := false
+	fields := false
+
+	// tracks how many '=' and commas we've seen
+	// this duplicates some of the functionality in scanFields
+	equals := 0
+	commas := 0
+	for {
+		// reached the end of buf?
+		if i >= len(buf) {
+			break
+		}
+
+		// skip past escaped characters
+		if buf[i] == '\\' {
+			i += 2
+			continue
+		}
+
+		if buf[i] == ' ' {
+			fields = true
+		}
+
+		// If we see a double quote, makes sure it is not escaped
+		if fields {
+			if !quoted && buf[i] == '=' {
+				i++
+				equals++
+				continue
+			} else if !quoted && buf[i] == ',' {
+				i++
+				commas++
+				continue
+			} else if buf[i] == '"' && equals > commas {
+				i++
+				quoted = !quoted
+				continue
+			}
+		}
+
+		if buf[i] == '\n' && !quoted {
+			break
+		}
+
+		i++
+	}
+
+	return i, buf[start:i]
+}
+
+// scanTo returns the end position in buf and the next consecutive block
+// of bytes, starting from i and ending with stop byte, where stop byte
+// has not been escaped.
+//
+// If there are leading spaces, they are skipped.
+func scanTo(buf []byte, i int, stop byte) (int, []byte) {
+	start := i
+	for {
+		// reached the end of buf?
+		if i >= len(buf) {
+			break
+		}
+
+		// Reached unescaped stop value?
+		if buf[i] == stop && (i == 0 || buf[i-1] != '\\') {
+			break
+		}
+		i++
+	}
+
+	return i, buf[start:i]
+}
+
+// scanTo returns the end position in buf and the next consecutive block
+// of bytes, starting from i and ending with stop byte.  If there are leading
+// spaces, they are skipped.
+func scanToSpaceOr(buf []byte, i int, stop byte) (int, []byte) {
+	start := i
+	if buf[i] == stop || buf[i] == ' ' {
+		return i, buf[start:i]
+	}
+
+	for {
+		i++
+		if buf[i-1] == '\\' {
+			continue
+		}
+
+		// reached the end of buf?
+		if i >= len(buf) {
+			return i, buf[start:i]
+		}
+
+		// reached end of block?
+		if buf[i] == stop || buf[i] == ' ' {
+			return i, buf[start:i]
+		}
+	}
+}
+
+func scanTagValue(buf []byte, i int) (int, []byte) {
+	start := i
+	for {
+		if i >= len(buf) {
+			break
+		}
+
+		if buf[i] == ',' && buf[i-1] != '\\' {
+			break
+		}
+		i++
+	}
+	return i, buf[start:i]
+}
+
+func scanFieldValue(buf []byte, i int) (int, []byte) {
+	start := i
+	quoted := false
+	for {
+		if i >= len(buf) {
+			break
+		}
+
+		// Only escape char for a field value is a double-quote
+		if buf[i] == '\\' && i+1 < len(buf) && buf[i+1] == '"' {
+			i += 2
+			continue
+		}
+
+		// Quoted value? (e.g. string)
+		if buf[i] == '"' {
+			i++
+			quoted = !quoted
+			continue
+		}
+
+		if buf[i] == ',' && !quoted {
+			break
+		}
+		i++
+	}
+	return i, buf[start:i]
+}
+
+func escapeMeasurement(in []byte) []byte {
+	for b, esc := range measurementEscapeCodes {
+		in = bytes.Replace(in, []byte{b}, esc, -1)
+	}
+	return in
+}
+
+func unescapeMeasurement(in []byte) []byte {
+	for b, esc := range measurementEscapeCodes {
+		in = bytes.Replace(in, esc, []byte{b}, -1)
+	}
+	return in
+}
+
+func escapeTag(in []byte) []byte {
+	for b, esc := range tagEscapeCodes {
+		if bytes.IndexByte(in, b) != -1 {
+			in = bytes.Replace(in, []byte{b}, esc, -1)
+		}
+	}
+	return in
+}
+
+func unescapeTag(in []byte) []byte {
+	for b, esc := range tagEscapeCodes {
+		if bytes.IndexByte(in, b) != -1 {
+			in = bytes.Replace(in, esc, []byte{b}, -1)
+		}
+	}
+	return in
+}
+
+// escapeStringField returns a copy of in with any double quotes or
+// backslashes with escaped values
+func escapeStringField(in string) string {
+	var out []byte
+	i := 0
+	for {
+		if i >= len(in) {
+			break
+		}
+		// escape double-quotes
+		if in[i] == '\\' {
+			out = append(out, '\\')
+			out = append(out, '\\')
+			i++
+			continue
+		}
+		// escape double-quotes
+		if in[i] == '"' {
+			out = append(out, '\\')
+			out = append(out, '"')
+			i++
+			continue
+		}
+		out = append(out, in[i])
+		i++
+
+	}
+	return string(out)
+}
+
+// unescapeStringField returns a copy of in with any escaped double-quotes
+// or backslashes unescaped
+func unescapeStringField(in string) string {
+	if strings.IndexByte(in, '\\') == -1 {
+		return in
+	}
+
+	var out []byte
+	i := 0
+	for {
+		if i >= len(in) {
+			break
+		}
+		// unescape backslashes
+		if in[i] == '\\' && i+1 < len(in) && in[i+1] == '\\' {
+			out = append(out, '\\')
+			i += 2
+			continue
+		}
+		// unescape double-quotes
+		if in[i] == '\\' && i+1 < len(in) && in[i+1] == '"' {
+			out = append(out, '"')
+			i += 2
+			continue
+		}
+		out = append(out, in[i])
+		i++
+
+	}
+	return string(out)
+}
+
+// NewPoint returns a new point with the given measurement name, tags, fields and timestamp.  If
+// an unsupported field value (NaN) or out of range time is passed, this function returns an error.
+func NewPoint(name string, tags Tags, fields Fields, time time.Time) (Point, error) {
+	if len(fields) == 0 {
+		return nil, ErrPointMustHaveAField
+	}
+	if !time.IsZero() {
+		if err := CheckTime(time); err != nil {
+			return nil, err
+		}
+	}
+
+	for key, value := range fields {
+		if fv, ok := value.(float64); ok {
+			// Ensure the caller validates and handles invalid field values
+			if math.IsNaN(fv) {
+				return nil, fmt.Errorf("NaN is an unsupported value for field %s", key)
+			}
+		}
+		if len(key) == 0 {
+			return nil, fmt.Errorf("all fields must have non-empty names")
+		}
+	}
+
+	key := MakeKey([]byte(name), tags)
+	if len(key) > MaxKeyLength {
+		return nil, fmt.Errorf("max key length exceeded: %v > %v", len(key), MaxKeyLength)
+	}
+
+	return &point{
+		key:    key,
+		time:   time,
+		fields: fields.MarshalBinary(),
+	}, nil
+}
+
+// NewPointFromBytes returns a new Point from a marshalled Point.
+func NewPointFromBytes(b []byte) (Point, error) {
+	p := &point{}
+	if err := p.UnmarshalBinary(b); err != nil {
+		return nil, err
+	}
+	if len(p.Fields()) == 0 {
+		return nil, ErrPointMustHaveAField
+	}
+	return p, nil
+}
+
+// MustNewPoint returns a new point with the given measurement name, tags, fields and timestamp.  If
+// an unsupported field value (NaN) is passed, this function panics.
+func MustNewPoint(name string, tags Tags, fields Fields, time time.Time) Point {
+	pt, err := NewPoint(name, tags, fields, time)
+	if err != nil {
+		panic(err.Error())
+	}
+	return pt
+}
+
+func (p *point) Data() []byte {
+	return p.data
+}
+
+func (p *point) SetData(b []byte) {
+	p.data = b
+}
+
+func (p *point) Key() []byte {
+	return p.key
+}
+
+func (p *point) name() []byte {
+	_, name := scanTo(p.key, 0, ',')
+	return name
+}
+
+// Name return the measurement name for the point
+func (p *point) Name() string {
+	if p.cachedName != "" {
+		return p.cachedName
+	}
+	p.cachedName = string(escape.Unescape(p.name()))
+	return p.cachedName
+}
+
+// SetName updates the measurement name for the point
+func (p *point) SetName(name string) {
+	p.cachedName = ""
+	p.key = MakeKey([]byte(name), p.Tags())
+}
+
+// Time return the timestamp for the point
+func (p *point) Time() time.Time {
+	return p.time
+}
+
+// SetTime updates the timestamp for the point
+func (p *point) SetTime(t time.Time) {
+	p.time = t
+}
+
+// Tags returns the tag set for the point
+func (p *point) Tags() Tags {
+	return parseTags(p.key)
+}
+
+func parseTags(buf []byte) Tags {
+	tags := map[string]string{}
+
+	if len(buf) != 0 {
+		pos, name := scanTo(buf, 0, ',')
+
+		// it's an empyt key, so there are no tags
+		if len(name) == 0 {
+			return tags
+		}
+
+		i := pos + 1
+		var key, value []byte
+		for {
+			if i >= len(buf) {
+				break
+			}
+			i, key = scanTo(buf, i, '=')
+			i, value = scanTagValue(buf, i+1)
+
+			if len(value) == 0 {
+				continue
+			}
+
+			tags[string(unescapeTag(key))] = string(unescapeTag(value))
+
+			i++
+		}
+	}
+	return tags
+}
+
+// MakeKey creates a key for a set of tags.
+func MakeKey(name []byte, tags Tags) []byte {
+	// unescape the name and then re-escape it to avoid double escaping.
+	// The key should always be stored in escaped form.
+	return append(escapeMeasurement(unescapeMeasurement(name)), tags.HashKey()...)
+}
+
+// SetTags replaces the tags for the point
+func (p *point) SetTags(tags Tags) {
+	p.key = MakeKey([]byte(p.Name()), tags)
+}
+
+// AddTag adds or replaces a tag value for a point
+func (p *point) AddTag(key, value string) {
+	tags := p.Tags()
+	tags[key] = value
+	p.key = MakeKey([]byte(p.Name()), tags)
+}
+
+// Fields returns the fields for the point
+func (p *point) Fields() Fields {
+	if p.cachedFields != nil {
+		return p.cachedFields
+	}
+	p.cachedFields = p.unmarshalBinary()
+	return p.cachedFields
+}
+
+// SetPrecision will round a time to the specified precision
+func (p *point) SetPrecision(precision string) {
+	switch precision {
+	case "n":
+	case "u":
+		p.SetTime(p.Time().Truncate(time.Microsecond))
+	case "ms":
+		p.SetTime(p.Time().Truncate(time.Millisecond))
+	case "s":
+		p.SetTime(p.Time().Truncate(time.Second))
+	case "m":
+		p.SetTime(p.Time().Truncate(time.Minute))
+	case "h":
+		p.SetTime(p.Time().Truncate(time.Hour))
+	}
+}
+
+func (p *point) String() string {
+	if p.Time().IsZero() {
+		return string(p.Key()) + " " + string(p.fields)
+	}
+	return string(p.Key()) + " " + string(p.fields) + " " + strconv.FormatInt(p.UnixNano(), 10)
+}
+
+func (p *point) MarshalBinary() ([]byte, error) {
+	tb, err := p.time.MarshalBinary()
+	if err != nil {
+		return nil, err
+	}
+
+	b := make([]byte, 8+len(p.key)+len(p.fields)+len(tb))
+	i := 0
+
+	binary.BigEndian.PutUint32(b[i:], uint32(len(p.key)))
+	i += 4
+
+	i += copy(b[i:], p.key)
+
+	binary.BigEndian.PutUint32(b[i:i+4], uint32(len(p.fields)))
+	i += 4
+
+	i += copy(b[i:], p.fields)
+
+	copy(b[i:], tb)
+	return b, nil
+}
+
+func (p *point) UnmarshalBinary(b []byte) error {
+	var i int
+	keyLen := int(binary.BigEndian.Uint32(b[:4]))
+	i += int(4)
+
+	p.key = b[i : i+keyLen]
+	i += keyLen
+
+	fieldLen := int(binary.BigEndian.Uint32(b[i : i+4]))
+	i += int(4)
+
+	p.fields = b[i : i+fieldLen]
+	i += fieldLen
+
+	p.time = time.Now()
+	p.time.UnmarshalBinary(b[i:])
+	return nil
+}
+
+func (p *point) PrecisionString(precision string) string {
+	if p.Time().IsZero() {
+		return fmt.Sprintf("%s %s", p.Key(), string(p.fields))
+	}
+	return fmt.Sprintf("%s %s %d", p.Key(), string(p.fields),
+		p.UnixNano()/GetPrecisionMultiplier(precision))
+}
+
+func (p *point) RoundedString(d time.Duration) string {
+	if p.Time().IsZero() {
+		return fmt.Sprintf("%s %s", p.Key(), string(p.fields))
+	}
+	return fmt.Sprintf("%s %s %d", p.Key(), string(p.fields),
+		p.time.Round(d).UnixNano())
+}
+
+func (p *point) unmarshalBinary() Fields {
+	return newFieldsFromBinary(p.fields)
+}
+
+func (p *point) HashID() uint64 {
+	h := fnv.New64a()
+	h.Write(p.key)
+	sum := h.Sum64()
+	return sum
+}
+
+func (p *point) UnixNano() int64 {
+	return p.Time().UnixNano()
+}
+
+// Tags represents a mapping between a Point's tag names and their
+// values.
+type Tags map[string]string
+
+// HashKey hashes all of a tag's keys.
+func (t Tags) HashKey() []byte {
+	// Empty maps marshal to empty bytes.
+	if len(t) == 0 {
+		return nil
+	}
+
+	escaped := Tags{}
+	for k, v := range t {
+		ek := escapeTag([]byte(k))
+		ev := escapeTag([]byte(v))
+
+		if len(ev) > 0 {
+			escaped[string(ek)] = string(ev)
+		}
+	}
+
+	// Extract keys and determine final size.
+	sz := len(escaped) + (len(escaped) * 2) // separators
+	keys := make([]string, len(escaped)+1)
+	i := 0
+	for k, v := range escaped {
+		keys[i] = k
+		i++
+		sz += len(k) + len(v)
+	}
+	keys = keys[:i]
+	sort.Strings(keys)
+	// Generate marshaled bytes.
+	b := make([]byte, sz)
+	buf := b
+	idx := 0
+	for _, k := range keys {
+		buf[idx] = ','
+		idx++
+		copy(buf[idx:idx+len(k)], k)
+		idx += len(k)
+		buf[idx] = '='
+		idx++
+		v := escaped[k]
+		copy(buf[idx:idx+len(v)], v)
+		idx += len(v)
+	}
+	return b[:idx]
+}
+
+// Fields represents a mapping between a Point's field names and their
+// values.
+type Fields map[string]interface{}
+
+func parseNumber(val []byte) (interface{}, error) {
+	if val[len(val)-1] == 'i' {
+		val = val[:len(val)-1]
+		return strconv.ParseInt(string(val), 10, 64)
+	}
+	for i := 0; i < len(val); i++ {
+		// If there is a decimal or an N (NaN), I (Inf), parse as float
+		if val[i] == '.' || val[i] == 'N' || val[i] == 'n' || val[i] == 'I' || val[i] == 'i' || val[i] == 'e' {
+			return strconv.ParseFloat(string(val), 64)
+		}
+		if val[i] < '0' && val[i] > '9' {
+			return string(val), nil
+		}
+	}
+	return strconv.ParseFloat(string(val), 64)
+}
+
+func newFieldsFromBinary(buf []byte) Fields {
+	fields := make(Fields, 8)
+	var (
+		i              int
+		name, valueBuf []byte
+		value          interface{}
+		err            error
+	)
+	for i < len(buf) {
+
+		i, name = scanTo(buf, i, '=')
+		name = escape.Unescape(name)
+
+		i, valueBuf = scanFieldValue(buf, i+1)
+		if len(name) > 0 {
+			if len(valueBuf) == 0 {
+				fields[string(name)] = nil
+				continue
+			}
+
+			// If the first char is a double-quote, then unmarshal as string
+			if valueBuf[0] == '"' {
+				value = unescapeStringField(string(valueBuf[1 : len(valueBuf)-1]))
+				// Check for numeric characters and special NaN or Inf
+			} else if (valueBuf[0] >= '0' && valueBuf[0] <= '9') || valueBuf[0] == '-' || valueBuf[0] == '.' ||
+				valueBuf[0] == 'N' || valueBuf[0] == 'n' || // NaN
+				valueBuf[0] == 'I' || valueBuf[0] == 'i' { // Inf
+
+				value, err = parseNumber(valueBuf)
+				if err != nil {
+					panic(fmt.Sprintf("unable to parse number value '%v': %v", string(valueBuf), err))
+				}
+
+				// Otherwise parse it as bool
+			} else {
+				value, err = strconv.ParseBool(string(valueBuf))
+				if err != nil {
+					panic(fmt.Sprintf("unable to parse bool value '%v': %v\n", string(valueBuf), err))
+				}
+			}
+			fields[string(name)] = value
+		}
+		i++
+	}
+	return fields
+}
+
+// MarshalBinary encodes all the fields to their proper type and returns the binary
+// represenation
+// NOTE: uint64 is specifically not supported due to potential overflow when we decode
+// again later to an int64
+func (p Fields) MarshalBinary() []byte {
+	b := []byte{}
+	keys := make([]string, len(p))
+	i := 0
+	for k := range p {
+		keys[i] = k
+		i++
+	}
+	sort.Strings(keys)
+
+	for _, k := range keys {
+		v := p[k]
+		b = append(b, []byte(escape.String(k))...)
+		b = append(b, '=')
+		switch t := v.(type) {
+		case int:
+			b = append(b, []byte(strconv.FormatInt(int64(t), 10))...)
+			b = append(b, 'i')
+		case int8:
+			b = append(b, []byte(strconv.FormatInt(int64(t), 10))...)
+			b = append(b, 'i')
+		case int16:
+			b = append(b, []byte(strconv.FormatInt(int64(t), 10))...)
+			b = append(b, 'i')
+		case int32:
+			b = append(b, []byte(strconv.FormatInt(int64(t), 10))...)
+			b = append(b, 'i')
+		case int64:
+			b = append(b, []byte(strconv.FormatInt(t, 10))...)
+			b = append(b, 'i')
+		case uint:
+			b = append(b, []byte(strconv.FormatInt(int64(t), 10))...)
+			b = append(b, 'i')
+		case uint8:
+			b = append(b, []byte(strconv.FormatInt(int64(t), 10))...)
+			b = append(b, 'i')
+		case uint16:
+			b = append(b, []byte(strconv.FormatInt(int64(t), 10))...)
+			b = append(b, 'i')
+		case uint32:
+			b = append(b, []byte(strconv.FormatInt(int64(t), 10))...)
+			b = append(b, 'i')
+		case float32:
+			val := []byte(strconv.FormatFloat(float64(t), 'f', -1, 32))
+			b = append(b, val...)
+		case float64:
+			val := []byte(strconv.FormatFloat(t, 'f', -1, 64))
+			b = append(b, val...)
+		case bool:
+			b = append(b, []byte(strconv.FormatBool(t))...)
+		case []byte:
+			b = append(b, t...)
+		case string:
+			b = append(b, '"')
+			b = append(b, []byte(escapeStringField(t))...)
+			b = append(b, '"')
+		case nil:
+			// skip
+		default:
+			// Can't determine the type, so convert to string
+			b = append(b, '"')
+			b = append(b, []byte(escapeStringField(fmt.Sprintf("%v", v)))...)
+			b = append(b, '"')
+
+		}
+		b = append(b, ',')
+	}
+	if len(b) > 0 {
+		return b[0 : len(b)-1]
+	}
+	return b
+}
+
+type indexedSlice struct {
+	indices []int
+	b       []byte
+}
+
+func (s *indexedSlice) Less(i, j int) bool {
+	_, a := scanTo(s.b, s.indices[i], '=')
+	_, b := scanTo(s.b, s.indices[j], '=')
+	return bytes.Compare(a, b) < 0
+}
+
+func (s *indexedSlice) Swap(i, j int) {
+	s.indices[i], s.indices[j] = s.indices[j], s.indices[i]
+}
+
+func (s *indexedSlice) Len() int {
+	return len(s.indices)
+}

+ 60 - 0
Godeps/_workspace/src/github.com/influxdata/influxdb/models/rows.go

@@ -0,0 +1,60 @@
+package models
+
+import (
+	"hash/fnv"
+	"sort"
+)
+
+// Row represents a single row returned from the execution of a statement.
+type Row struct {
+	Name    string            `json:"name,omitempty"`
+	Tags    map[string]string `json:"tags,omitempty"`
+	Columns []string          `json:"columns,omitempty"`
+	Values  [][]interface{}   `json:"values,omitempty"`
+	Err     error             `json:"err,omitempty"`
+}
+
+// SameSeries returns true if r contains values for the same series as o.
+func (r *Row) SameSeries(o *Row) bool {
+	return r.tagsHash() == o.tagsHash() && r.Name == o.Name
+}
+
+// tagsHash returns a hash of tag key/value pairs.
+func (r *Row) tagsHash() uint64 {
+	h := fnv.New64a()
+	keys := r.tagsKeys()
+	for _, k := range keys {
+		h.Write([]byte(k))
+		h.Write([]byte(r.Tags[k]))
+	}
+	return h.Sum64()
+}
+
+// tagKeys returns a sorted list of tag keys.
+func (r *Row) tagsKeys() []string {
+	a := make([]string, 0, len(r.Tags))
+	for k := range r.Tags {
+		a = append(a, k)
+	}
+	sort.Strings(a)
+	return a
+}
+
+// Rows represents a collection of rows. Rows implements sort.Interface.
+type Rows []*Row
+
+func (p Rows) Len() int { return len(p) }
+
+func (p Rows) Less(i, j int) bool {
+	// Sort by name first.
+	if p[i].Name != p[j].Name {
+		return p[i].Name < p[j].Name
+	}
+
+	// Sort by tag set hash. Tags don't have a meaningful sort order so we
+	// just compute a hash and sort by that instead. This allows the tests
+	// to receive rows in a predictable order every time.
+	return p[i].tagsHash() < p[j].tagsHash()
+}
+
+func (p Rows) Swap(i, j int) { p[i], p[j] = p[j], p[i] }

+ 51 - 0
Godeps/_workspace/src/github.com/influxdata/influxdb/models/time.go

@@ -0,0 +1,51 @@
+package models
+
+// Helper time methods since parsing time can easily overflow and we only support a
+// specific time range.
+
+import (
+	"fmt"
+	"math"
+	"time"
+)
+
+var (
+	// MaxNanoTime is the maximum time that can be represented via int64 nanoseconds since the epoch.
+	MaxNanoTime = time.Unix(0, math.MaxInt64).UTC()
+	// MinNanoTime is the minumum time that can be represented via int64 nanoseconds since the epoch.
+	MinNanoTime = time.Unix(0, math.MinInt64).UTC()
+
+	// ErrTimeOutOfRange gets returned when time is out of the representable range using int64 nanoseconds since the epoch.
+	ErrTimeOutOfRange = fmt.Errorf("time outside range %s - %s", MinNanoTime, MaxNanoTime)
+)
+
+// SafeCalcTime safely calculates the time given. Will return error if the time is outside the
+// supported range.
+func SafeCalcTime(timestamp int64, precision string) (time.Time, error) {
+	mult := GetPrecisionMultiplier(precision)
+	if t, ok := safeSignedMult(timestamp, mult); ok {
+		return time.Unix(0, t).UTC(), nil
+	}
+
+	return time.Time{}, ErrTimeOutOfRange
+}
+
+// CheckTime checks that a time is within the safe range.
+func CheckTime(t time.Time) error {
+	if t.Before(MinNanoTime) || t.After(MaxNanoTime) {
+		return ErrTimeOutOfRange
+	}
+	return nil
+}
+
+// Perform the multiplication and check to make sure it didn't overflow.
+func safeSignedMult(a, b int64) (int64, bool) {
+	if a == 0 || b == 0 || a == 1 || b == 1 {
+		return a * b, true
+	}
+	if a == math.MinInt64 || b == math.MaxInt64 {
+		return 0, false
+	}
+	c := a * b
+	return c, c/b == a
+}

+ 53 - 0
Godeps/_workspace/src/github.com/influxdata/influxdb/pkg/escape/bytes.go

@@ -0,0 +1,53 @@
+package escape
+
+import "bytes"
+
+func Bytes(in []byte) []byte {
+	for b, esc := range Codes {
+		in = bytes.Replace(in, []byte{b}, esc, -1)
+	}
+	return in
+}
+
+func Unescape(in []byte) []byte {
+	if len(in) == 0 {
+		return nil
+	}
+
+	if bytes.IndexByte(in, '\\') == -1 {
+		return in
+	}
+
+	i := 0
+	inLen := len(in)
+	var out []byte
+
+	for {
+		if i >= inLen {
+			break
+		}
+		if in[i] == '\\' && i+1 < inLen {
+			switch in[i+1] {
+			case ',':
+				out = append(out, ',')
+				i += 2
+				continue
+			case '"':
+				out = append(out, '"')
+				i += 2
+				continue
+			case ' ':
+				out = append(out, ' ')
+				i += 2
+				continue
+			case '=':
+				out = append(out, '=')
+				i += 2
+				continue
+			}
+		}
+		out = append(out, in[i])
+		i += 1
+	}
+	return out
+}

+ 34 - 0
Godeps/_workspace/src/github.com/influxdata/influxdb/pkg/escape/strings.go

@@ -0,0 +1,34 @@
+package escape
+
+import "strings"
+
+var (
+	Codes = map[byte][]byte{
+		',': []byte(`\,`),
+		'"': []byte(`\"`),
+		' ': []byte(`\ `),
+		'=': []byte(`\=`),
+	}
+
+	codesStr = map[string]string{}
+)
+
+func init() {
+	for k, v := range Codes {
+		codesStr[string(k)] = string(v)
+	}
+}
+
+func UnescapeString(in string) string {
+	for b, esc := range codesStr {
+		in = strings.Replace(in, esc, b, -1)
+	}
+	return in
+}
+
+func String(in string) string {
+	for b, esc := range codesStr {
+		in = strings.Replace(in, b, esc, -1)
+	}
+	return in
+}