Просмотр исходного кода

feat(alerting): added missing godep libs

Torkel Ödegaard 9 лет назад
Родитель
Сommit
461e6ae4ac
36 измененных файлов с 2435 добавлено и 2887 удалено
  1. 15 11
      Godeps/Godeps.json
  2. 21 0
      Godeps/_workspace/src/github.com/benbjohnson/clock/LICENSE
  3. 104 0
      Godeps/_workspace/src/github.com/benbjohnson/clock/README.md
  4. 319 0
      Godeps/_workspace/src/github.com/benbjohnson/clock/clock.go
  5. 16 0
      Godeps/_workspace/src/github.com/go-stack/stack/.travis.yml
  6. 13 0
      Godeps/_workspace/src/github.com/go-stack/stack/LICENSE.md
  7. 38 0
      Godeps/_workspace/src/github.com/go-stack/stack/README.md
  8. 349 0
      Godeps/_workspace/src/github.com/go-stack/stack/stack.go
  9. 10 0
      Godeps/_workspace/src/github.com/inconshreveable/log15/.travis.yml
  10. 11 0
      Godeps/_workspace/src/github.com/inconshreveable/log15/CONTRIBUTORS
  11. 13 0
      Godeps/_workspace/src/github.com/inconshreveable/log15/LICENSE
  12. 70 0
      Godeps/_workspace/src/github.com/inconshreveable/log15/README.md
  13. 333 0
      Godeps/_workspace/src/github.com/inconshreveable/log15/doc.go
  14. 257 0
      Godeps/_workspace/src/github.com/inconshreveable/log15/format.go
  15. 356 0
      Godeps/_workspace/src/github.com/inconshreveable/log15/handler.go
  16. 26 0
      Godeps/_workspace/src/github.com/inconshreveable/log15/handler_go13.go
  17. 23 0
      Godeps/_workspace/src/github.com/inconshreveable/log15/handler_go14.go
  18. 208 0
      Godeps/_workspace/src/github.com/inconshreveable/log15/logger.go
  19. 67 0
      Godeps/_workspace/src/github.com/inconshreveable/log15/root.go
  20. 55 0
      Godeps/_workspace/src/github.com/inconshreveable/log15/syslog.go
  21. 21 0
      Godeps/_workspace/src/github.com/inconshreveable/log15/term/LICENSE
  22. 13 0
      Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_appengine.go
  23. 12 0
      Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_darwin.go
  24. 18 0
      Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_freebsd.go
  25. 14 0
      Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_linux.go
  26. 20 0
      Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_notwindows.go
  27. 7 0
      Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_openbsd.go
  28. 26 0
      Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_windows.go
  29. 0 267
      Godeps/_workspace/src/github.com/influxdata/influxdb/client/README.md
  30. 0 789
      Godeps/_workspace/src/github.com/influxdata/influxdb/client/influxdb.go
  31. 0 46
      Godeps/_workspace/src/github.com/influxdata/influxdb/models/consistency.go
  32. 0 1576
      Godeps/_workspace/src/github.com/influxdata/influxdb/models/points.go
  33. 0 60
      Godeps/_workspace/src/github.com/influxdata/influxdb/models/rows.go
  34. 0 51
      Godeps/_workspace/src/github.com/influxdata/influxdb/models/time.go
  35. 0 53
      Godeps/_workspace/src/github.com/influxdata/influxdb/pkg/escape/bytes.go
  36. 0 34
      Godeps/_workspace/src/github.com/influxdata/influxdb/pkg/escape/strings.go

+ 15 - 11
Godeps/Godeps.json

@@ -130,6 +130,10 @@
 			"Comment": "v1.0.0",
 			"Rev": "abb928e07c4108683d6b4d0b6ca08fe6bc0eee5f"
 		},
+		{
+			"ImportPath": "github.com/benbjohnson/clock",
+			"Rev": "a620c1cc9866f84a2550ad53f4f353ec030fa26b"
+		},
 		{
 			"ImportPath": "github.com/bmizerany/assert",
 			"Comment": "release.r60-6-ge17e998",
@@ -205,6 +209,11 @@
 			"Comment": "v1.2-171-g267b128",
 			"Rev": "267b128680c46286b9ca13475c3cca5de8f79bd7"
 		},
+		{
+			"ImportPath": "github.com/go-stack/stack",
+			"Comment": "v1.5.2",
+			"Rev": "100eb0c0a9c5b306ca2fb4f165df21d80ada4b82"
+		},
 		{
 			"ImportPath": "github.com/go-xorm/core",
 			"Comment": "v0.4.4-7-g9e608f7",
@@ -228,19 +237,14 @@
 			"Rev": "7e3c02b30806fa5779d3bdfc152ce4c6f40e7b38"
 		},
 		{
-			"ImportPath": "github.com/influxdata/influxdb/client",
-			"Comment": "v0.13.0-74-g2c9d0fc",
-			"Rev": "2c9d0fcc04eba3ffc88f2aafe8466874e384d80d"
-		},
-		{
-			"ImportPath": "github.com/influxdata/influxdb/models",
-			"Comment": "v0.13.0-74-g2c9d0fc",
-			"Rev": "2c9d0fcc04eba3ffc88f2aafe8466874e384d80d"
+			"ImportPath": "github.com/inconshreveable/log15",
+			"Comment": "v2.3-61-g20bca5a",
+			"Rev": "20bca5a7a57282e241fac83ec9ea42538027f1c1"
 		},
 		{
-			"ImportPath": "github.com/influxdata/influxdb/pkg/escape",
-			"Comment": "v0.13.0-74-g2c9d0fc",
-			"Rev": "2c9d0fcc04eba3ffc88f2aafe8466874e384d80d"
+			"ImportPath": "github.com/inconshreveable/log15/term",
+			"Comment": "v2.3-61-g20bca5a",
+			"Rev": "20bca5a7a57282e241fac83ec9ea42538027f1c1"
 		},
 		{
 			"ImportPath": "github.com/jmespath/go-jmespath",

+ 21 - 0
Godeps/_workspace/src/github.com/benbjohnson/clock/LICENSE

@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Ben Johnson
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.

+ 104 - 0
Godeps/_workspace/src/github.com/benbjohnson/clock/README.md

@@ -0,0 +1,104 @@
+clock [![Build Status](https://drone.io/github.com/benbjohnson/clock/status.png)](https://drone.io/github.com/benbjohnson/clock/latest) [![Coverage Status](https://coveralls.io/repos/benbjohnson/clock/badge.png?branch=master)](https://coveralls.io/r/benbjohnson/clock?branch=master) [![GoDoc](https://godoc.org/github.com/benbjohnson/clock?status.png)](https://godoc.org/github.com/benbjohnson/clock) ![Project status](http://img.shields.io/status/experimental.png?color=red)
+=====
+
+Clock is a small library for mocking time in Go. It provides an interface
+around the standard library's [`time`][time] package so that the application
+can use the realtime clock while tests can use the mock clock.
+
+[time]: http://golang.org/pkg/time/
+
+
+## Usage
+
+### Realtime Clock
+
+Your application can maintain a `Clock` variable that will allow realtime and
+mock clocks to be interchangable. For example, if you had an `Application` type:
+
+```go
+import "github.com/benbjohnson/clock"
+
+type Application struct {
+	Clock clock.Clock
+}
+```
+
+You could initialize it to use the realtime clock like this:
+
+```go
+var app Application
+app.Clock = clock.New()
+...
+```
+
+Then all timers and time-related functionality should be performed from the
+`Clock` variable.
+
+
+### Mocking time
+
+In your tests, you will want to use a `Mock` clock:
+
+```go
+import (
+	"testing"
+
+	"github.com/benbjohnson/clock"
+)
+
+func TestApplication_DoSomething(t *testing.T) {
+	mock := clock.NewMock()
+	app := Application{Clock: mock}
+	...
+}
+```
+
+Now that you've initialized your application to use the mock clock, you can
+adjust the time programmatically. The mock clock always starts from the Unix
+epoch (midnight, Jan 1, 1970 UTC).
+
+
+### Controlling time
+
+The mock clock provides the same functions that the standard library's `time`
+package provides. For example, to find the current time, you use the `Now()`
+function:
+
+```go
+mock := clock.NewMock()
+
+// Find the current time.
+mock.Now().UTC() // 1970-01-01 00:00:00 +0000 UTC
+
+// Move the clock forward.
+mock.Add(2 * time.Hour)
+
+// Check the time again. It's 2 hours later!
+mock.Now().UTC() // 1970-01-01 02:00:00 +0000 UTC
+```
+
+Timers and Tickers are also controlled by this same mock clock. They will only
+execute when the clock is moved forward:
+
+```
+mock := clock.NewMock()
+count := 0
+
+// Kick off a timer to increment every 1 mock second.
+go func() {
+    ticker := clock.Ticker(1 * time.Second)
+    for {
+        <-ticker.C
+        count++
+    }
+}()
+runtime.Gosched()
+
+// Move the clock forward 10 second.
+mock.Add(10 * time.Second)
+
+// This prints 10.
+fmt.Println(count)
+```
+
+

+ 319 - 0
Godeps/_workspace/src/github.com/benbjohnson/clock/clock.go

@@ -0,0 +1,319 @@
+package clock
+
+import (
+	"sort"
+	"sync"
+	"time"
+)
+
+// Clock represents an interface to the functions in the standard library time
+// package. Two implementations are available in the clock package. The first
+// is a real-time clock which simply wraps the time package's functions. The
+// second is a mock clock which will only make forward progress when
+// programmatically adjusted.
+type Clock interface {
+	After(d time.Duration) <-chan time.Time
+	AfterFunc(d time.Duration, f func()) *Timer
+	Now() time.Time
+	Sleep(d time.Duration)
+	Tick(d time.Duration) <-chan time.Time
+	Ticker(d time.Duration) *Ticker
+	Timer(d time.Duration) *Timer
+}
+
+// New returns an instance of a real-time clock.
+func New() Clock {
+	return &clock{}
+}
+
+// clock implements a real-time clock by simply wrapping the time package functions.
+type clock struct{}
+
+func (c *clock) After(d time.Duration) <-chan time.Time { return time.After(d) }
+
+func (c *clock) AfterFunc(d time.Duration, f func()) *Timer {
+	return &Timer{timer: time.AfterFunc(d, f)}
+}
+
+func (c *clock) Now() time.Time { return time.Now() }
+
+func (c *clock) Sleep(d time.Duration) { time.Sleep(d) }
+
+func (c *clock) Tick(d time.Duration) <-chan time.Time { return time.Tick(d) }
+
+func (c *clock) Ticker(d time.Duration) *Ticker {
+	t := time.NewTicker(d)
+	return &Ticker{C: t.C, ticker: t}
+}
+
+func (c *clock) Timer(d time.Duration) *Timer {
+	t := time.NewTimer(d)
+	return &Timer{C: t.C, timer: t}
+}
+
+// Mock represents a mock clock that only moves forward programmically.
+// It can be preferable to a real-time clock when testing time-based functionality.
+type Mock struct {
+	mu     sync.Mutex
+	now    time.Time   // current time
+	timers clockTimers // tickers & timers
+}
+
+// NewMock returns an instance of a mock clock.
+// The current time of the mock clock on initialization is the Unix epoch.
+func NewMock() *Mock {
+	return &Mock{now: time.Unix(0, 0)}
+}
+
+// Add moves the current time of the mock clock forward by the duration.
+// This should only be called from a single goroutine at a time.
+func (m *Mock) Add(d time.Duration) {
+	// Calculate the final current time.
+	t := m.now.Add(d)
+
+	// Continue to execute timers until there are no more before the new time.
+	for {
+		if !m.runNextTimer(t) {
+			break
+		}
+	}
+
+	// Ensure that we end with the new time.
+	m.mu.Lock()
+	m.now = t
+	m.mu.Unlock()
+
+	// Give a small buffer to make sure the other goroutines get handled.
+	gosched()
+}
+
+// Sets the current time of the mock clock to a specific one.
+// This should only be called from a single goroutine at a time.
+func (m *Mock) Set(t time.Time) {
+	// Continue to execute timers until there are no more before the new time.
+	for {
+		if !m.runNextTimer(t) {
+			break
+		}
+	}
+
+	// Ensure that we end with the new time.
+	m.mu.Lock()
+	m.now = t
+	m.mu.Unlock()
+
+	// Give a small buffer to make sure the other goroutines get handled.
+	gosched()
+}
+
+// runNextTimer executes the next timer in chronological order and moves the
+// current time to the timer's next tick time. The next time is not executed if
+// it's next time if after the max time. Returns true if a timer is executed.
+func (m *Mock) runNextTimer(max time.Time) bool {
+	m.mu.Lock()
+
+	// Sort timers by time.
+	sort.Sort(m.timers)
+
+	// If we have no more timers then exit.
+	if len(m.timers) == 0 {
+		m.mu.Unlock()
+		return false
+	}
+
+	// Retrieve next timer. Exit if next tick is after new time.
+	t := m.timers[0]
+	if t.Next().After(max) {
+		m.mu.Unlock()
+		return false
+	}
+
+	// Move "now" forward and unlock clock.
+	m.now = t.Next()
+	m.mu.Unlock()
+
+	// Execute timer.
+	t.Tick(m.now)
+	return true
+}
+
+// After waits for the duration to elapse and then sends the current time on the returned channel.
+func (m *Mock) After(d time.Duration) <-chan time.Time {
+	return m.Timer(d).C
+}
+
+// AfterFunc waits for the duration to elapse and then executes a function.
+// A Timer is returned that can be stopped.
+func (m *Mock) AfterFunc(d time.Duration, f func()) *Timer {
+	t := m.Timer(d)
+	t.C = nil
+	t.fn = f
+	return t
+}
+
+// Now returns the current wall time on the mock clock.
+func (m *Mock) Now() time.Time {
+	m.mu.Lock()
+	defer m.mu.Unlock()
+	return m.now
+}
+
+// Sleep pauses the goroutine for the given duration on the mock clock.
+// The clock must be moved forward in a separate goroutine.
+func (m *Mock) Sleep(d time.Duration) {
+	<-m.After(d)
+}
+
+// Tick is a convenience function for Ticker().
+// It will return a ticker channel that cannot be stopped.
+func (m *Mock) Tick(d time.Duration) <-chan time.Time {
+	return m.Ticker(d).C
+}
+
+// Ticker creates a new instance of Ticker.
+func (m *Mock) Ticker(d time.Duration) *Ticker {
+	m.mu.Lock()
+	defer m.mu.Unlock()
+	ch := make(chan time.Time, 1)
+	t := &Ticker{
+		C:    ch,
+		c:    ch,
+		mock: m,
+		d:    d,
+		next: m.now.Add(d),
+	}
+	m.timers = append(m.timers, (*internalTicker)(t))
+	return t
+}
+
+// Timer creates a new instance of Timer.
+func (m *Mock) Timer(d time.Duration) *Timer {
+	m.mu.Lock()
+	defer m.mu.Unlock()
+	ch := make(chan time.Time, 1)
+	t := &Timer{
+		C:       ch,
+		c:       ch,
+		mock:    m,
+		next:    m.now.Add(d),
+		stopped: false,
+	}
+	m.timers = append(m.timers, (*internalTimer)(t))
+	return t
+}
+
+func (m *Mock) removeClockTimer(t clockTimer) {
+	m.mu.Lock()
+	defer m.mu.Unlock()
+	for i, timer := range m.timers {
+		if timer == t {
+			copy(m.timers[i:], m.timers[i+1:])
+			m.timers[len(m.timers)-1] = nil
+			m.timers = m.timers[:len(m.timers)-1]
+			break
+		}
+	}
+	sort.Sort(m.timers)
+}
+
+// clockTimer represents an object with an associated start time.
+type clockTimer interface {
+	Next() time.Time
+	Tick(time.Time)
+}
+
+// clockTimers represents a list of sortable timers.
+type clockTimers []clockTimer
+
+func (a clockTimers) Len() int           { return len(a) }
+func (a clockTimers) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
+func (a clockTimers) Less(i, j int) bool { return a[i].Next().Before(a[j].Next()) }
+
+// Timer represents a single event.
+// The current time will be sent on C, unless the timer was created by AfterFunc.
+type Timer struct {
+	C       <-chan time.Time
+	c       chan time.Time
+	timer   *time.Timer // realtime impl, if set
+	next    time.Time   // next tick time
+	mock    *Mock       // mock clock, if set
+	fn      func()      // AfterFunc function, if set
+	stopped bool        // True if stopped, false if running
+}
+
+// Stop turns off the ticker.
+func (t *Timer) Stop() bool {
+	if t.timer != nil {
+		return t.timer.Stop()
+	}
+
+	registered := !t.stopped
+	t.mock.removeClockTimer((*internalTimer)(t))
+	t.stopped = true
+	return registered
+}
+
+// Reset changes the expiry time of the timer
+func (t *Timer) Reset(d time.Duration) bool {
+	if t.timer != nil {
+		return t.timer.Reset(d)
+	}
+
+	t.next = t.mock.now.Add(d)
+	registered := !t.stopped
+	if t.stopped {
+		t.mock.mu.Lock()
+		t.mock.timers = append(t.mock.timers, (*internalTimer)(t))
+		t.mock.mu.Unlock()
+	}
+	t.stopped = false
+	return registered
+}
+
+type internalTimer Timer
+
+func (t *internalTimer) Next() time.Time { return t.next }
+func (t *internalTimer) Tick(now time.Time) {
+	if t.fn != nil {
+		t.fn()
+	} else {
+		t.c <- now
+	}
+	t.mock.removeClockTimer((*internalTimer)(t))
+	t.stopped = true
+	gosched()
+}
+
+// Ticker holds a channel that receives "ticks" at regular intervals.
+type Ticker struct {
+	C      <-chan time.Time
+	c      chan time.Time
+	ticker *time.Ticker  // realtime impl, if set
+	next   time.Time     // next tick time
+	mock   *Mock         // mock clock, if set
+	d      time.Duration // time between ticks
+}
+
+// Stop turns off the ticker.
+func (t *Ticker) Stop() {
+	if t.ticker != nil {
+		t.ticker.Stop()
+	} else {
+		t.mock.removeClockTimer((*internalTicker)(t))
+	}
+}
+
+type internalTicker Ticker
+
+func (t *internalTicker) Next() time.Time { return t.next }
+func (t *internalTicker) Tick(now time.Time) {
+	select {
+	case t.c <- now:
+	default:
+	}
+	t.next = now.Add(t.d)
+	gosched()
+}
+
+// Sleep momentarily so that other goroutines can process.
+func gosched() { time.Sleep(1 * time.Millisecond) }

+ 16 - 0
Godeps/_workspace/src/github.com/go-stack/stack/.travis.yml

@@ -0,0 +1,16 @@
+language: go
+sudo: false
+go:
+  - 1.2
+  - 1.3
+  - 1.4
+  - 1.5
+  - 1.6
+  - tip
+
+before_install:
+  - go get github.com/mattn/goveralls
+  - go get golang.org/x/tools/cmd/cover
+
+script:
+  - goveralls -service=travis-ci

+ 13 - 0
Godeps/_workspace/src/github.com/go-stack/stack/LICENSE.md

@@ -0,0 +1,13 @@
+Copyright 2014 Chris Hines
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.

+ 38 - 0
Godeps/_workspace/src/github.com/go-stack/stack/README.md

@@ -0,0 +1,38 @@
+[![GoDoc](https://godoc.org/github.com/go-stack/stack?status.svg)](https://godoc.org/github.com/go-stack/stack)
+[![Go Report Card](https://goreportcard.com/badge/go-stack/stack)](https://goreportcard.com/report/go-stack/stack)
+[![TravisCI](https://travis-ci.org/go-stack/stack.svg?branch=master)](https://travis-ci.org/go-stack/stack)
+[![Coverage Status](https://coveralls.io/repos/github/go-stack/stack/badge.svg?branch=master)](https://coveralls.io/github/go-stack/stack?branch=master)
+
+# stack
+
+Package stack implements utilities to capture, manipulate, and format call
+stacks. It provides a simpler API than package runtime.
+
+The implementation takes care of the minutia and special cases of interpreting
+the program counter (pc) values returned by runtime.Callers.
+
+## Versioning
+
+Package stack publishes releases via [semver](http://semver.org/) compatible Git
+tags prefixed with a single 'v'. The master branch always contains the latest
+release. The develop branch contains unreleased commits.
+
+## Formatting
+
+Package stack's types implement fmt.Formatter, which provides a simple and
+flexible way to declaratively configure formatting when used with logging or
+error tracking packages.
+
+```go
+func DoTheThing() {
+    c := stack.Caller(0)
+    log.Print(c)          // "source.go:10"
+    log.Printf("%+v", c)  // "pkg/path/source.go:10"
+    log.Printf("%n", c)   // "DoTheThing"
+
+    s := stack.Trace().TrimRuntime()
+    log.Print(s)          // "[source.go:15 caller.go:42 main.go:14]"
+}
+```
+
+See the docs for all of the supported formatting options.

+ 349 - 0
Godeps/_workspace/src/github.com/go-stack/stack/stack.go

@@ -0,0 +1,349 @@
+// Package stack implements utilities to capture, manipulate, and format call
+// stacks. It provides a simpler API than package runtime.
+//
+// The implementation takes care of the minutia and special cases of
+// interpreting the program counter (pc) values returned by runtime.Callers.
+//
+// Package stack's types implement fmt.Formatter, which provides a simple and
+// flexible way to declaratively configure formatting when used with logging
+// or error tracking packages.
+package stack
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"io"
+	"runtime"
+	"strconv"
+	"strings"
+)
+
+// Call records a single function invocation from a goroutine stack.
+type Call struct {
+	fn *runtime.Func
+	pc uintptr
+}
+
+// Caller returns a Call from the stack of the current goroutine. The argument
+// skip is the number of stack frames to ascend, with 0 identifying the
+// calling function.
+func Caller(skip int) Call {
+	var pcs [2]uintptr
+	n := runtime.Callers(skip+1, pcs[:])
+
+	var c Call
+
+	if n < 2 {
+		return c
+	}
+
+	c.pc = pcs[1]
+	if runtime.FuncForPC(pcs[0]) != sigpanic {
+		c.pc--
+	}
+	c.fn = runtime.FuncForPC(c.pc)
+	return c
+}
+
+// String implements fmt.Stinger. It is equivalent to fmt.Sprintf("%v", c).
+func (c Call) String() string {
+	return fmt.Sprint(c)
+}
+
+// MarshalText implements encoding.TextMarshaler. It formats the Call the same
+// as fmt.Sprintf("%v", c).
+func (c Call) MarshalText() ([]byte, error) {
+	if c.fn == nil {
+		return nil, ErrNoFunc
+	}
+	buf := bytes.Buffer{}
+	fmt.Fprint(&buf, c)
+	return buf.Bytes(), nil
+}
+
+// ErrNoFunc means that the Call has a nil *runtime.Func. The most likely
+// cause is a Call with the zero value.
+var ErrNoFunc = errors.New("no call stack information")
+
+// Format implements fmt.Formatter with support for the following verbs.
+//
+//    %s    source file
+//    %d    line number
+//    %n    function name
+//    %v    equivalent to %s:%d
+//
+// It accepts the '+' and '#' flags for most of the verbs as follows.
+//
+//    %+s   path of source file relative to the compile time GOPATH
+//    %#s   full path of source file
+//    %+n   import path qualified function name
+//    %+v   equivalent to %+s:%d
+//    %#v   equivalent to %#s:%d
+func (c Call) Format(s fmt.State, verb rune) {
+	if c.fn == nil {
+		fmt.Fprintf(s, "%%!%c(NOFUNC)", verb)
+		return
+	}
+
+	switch verb {
+	case 's', 'v':
+		file, line := c.fn.FileLine(c.pc)
+		switch {
+		case s.Flag('#'):
+			// done
+		case s.Flag('+'):
+			file = file[pkgIndex(file, c.fn.Name()):]
+		default:
+			const sep = "/"
+			if i := strings.LastIndex(file, sep); i != -1 {
+				file = file[i+len(sep):]
+			}
+		}
+		io.WriteString(s, file)
+		if verb == 'v' {
+			buf := [7]byte{':'}
+			s.Write(strconv.AppendInt(buf[:1], int64(line), 10))
+		}
+
+	case 'd':
+		_, line := c.fn.FileLine(c.pc)
+		buf := [6]byte{}
+		s.Write(strconv.AppendInt(buf[:0], int64(line), 10))
+
+	case 'n':
+		name := c.fn.Name()
+		if !s.Flag('+') {
+			const pathSep = "/"
+			if i := strings.LastIndex(name, pathSep); i != -1 {
+				name = name[i+len(pathSep):]
+			}
+			const pkgSep = "."
+			if i := strings.Index(name, pkgSep); i != -1 {
+				name = name[i+len(pkgSep):]
+			}
+		}
+		io.WriteString(s, name)
+	}
+}
+
+// PC returns the program counter for this call frame; multiple frames may
+// have the same PC value.
+func (c Call) PC() uintptr {
+	return c.pc
+}
+
+// name returns the import path qualified name of the function containing the
+// call.
+func (c Call) name() string {
+	if c.fn == nil {
+		return "???"
+	}
+	return c.fn.Name()
+}
+
+func (c Call) file() string {
+	if c.fn == nil {
+		return "???"
+	}
+	file, _ := c.fn.FileLine(c.pc)
+	return file
+}
+
+func (c Call) line() int {
+	if c.fn == nil {
+		return 0
+	}
+	_, line := c.fn.FileLine(c.pc)
+	return line
+}
+
+// CallStack records a sequence of function invocations from a goroutine
+// stack.
+type CallStack []Call
+
+// String implements fmt.Stinger. It is equivalent to fmt.Sprintf("%v", cs).
+func (cs CallStack) String() string {
+	return fmt.Sprint(cs)
+}
+
+var (
+	openBracketBytes  = []byte("[")
+	closeBracketBytes = []byte("]")
+	spaceBytes        = []byte(" ")
+)
+
+// MarshalText implements encoding.TextMarshaler. It formats the CallStack the
+// same as fmt.Sprintf("%v", cs).
+func (cs CallStack) MarshalText() ([]byte, error) {
+	buf := bytes.Buffer{}
+	buf.Write(openBracketBytes)
+	for i, pc := range cs {
+		if pc.fn == nil {
+			return nil, ErrNoFunc
+		}
+		if i > 0 {
+			buf.Write(spaceBytes)
+		}
+		fmt.Fprint(&buf, pc)
+	}
+	buf.Write(closeBracketBytes)
+	return buf.Bytes(), nil
+}
+
+// Format implements fmt.Formatter by printing the CallStack as square brackets
+// ([, ]) surrounding a space separated list of Calls each formatted with the
+// supplied verb and options.
+func (cs CallStack) Format(s fmt.State, verb rune) {
+	s.Write(openBracketBytes)
+	for i, pc := range cs {
+		if i > 0 {
+			s.Write(spaceBytes)
+		}
+		pc.Format(s, verb)
+	}
+	s.Write(closeBracketBytes)
+}
+
+// findSigpanic intentionally executes faulting code to generate a stack trace
+// containing an entry for runtime.sigpanic.
+func findSigpanic() *runtime.Func {
+	var fn *runtime.Func
+	var p *int
+	func() int {
+		defer func() {
+			if p := recover(); p != nil {
+				var pcs [512]uintptr
+				n := runtime.Callers(2, pcs[:])
+				for _, pc := range pcs[:n] {
+					f := runtime.FuncForPC(pc)
+					if f.Name() == "runtime.sigpanic" {
+						fn = f
+						break
+					}
+				}
+			}
+		}()
+		// intentional nil pointer dereference to trigger sigpanic
+		return *p
+	}()
+	return fn
+}
+
+var sigpanic = findSigpanic()
+
+// Trace returns a CallStack for the current goroutine with element 0
+// identifying the calling function.
+func Trace() CallStack {
+	var pcs [512]uintptr
+	n := runtime.Callers(2, pcs[:])
+	cs := make([]Call, n)
+
+	for i, pc := range pcs[:n] {
+		pcFix := pc
+		if i > 0 && cs[i-1].fn != sigpanic {
+			pcFix--
+		}
+		cs[i] = Call{
+			fn: runtime.FuncForPC(pcFix),
+			pc: pcFix,
+		}
+	}
+
+	return cs
+}
+
+// TrimBelow returns a slice of the CallStack with all entries below c
+// removed.
+func (cs CallStack) TrimBelow(c Call) CallStack {
+	for len(cs) > 0 && cs[0].pc != c.pc {
+		cs = cs[1:]
+	}
+	return cs
+}
+
+// TrimAbove returns a slice of the CallStack with all entries above c
+// removed.
+func (cs CallStack) TrimAbove(c Call) CallStack {
+	for len(cs) > 0 && cs[len(cs)-1].pc != c.pc {
+		cs = cs[:len(cs)-1]
+	}
+	return cs
+}
+
+// pkgIndex returns the index that results in file[index:] being the path of
+// file relative to the compile time GOPATH, and file[:index] being the
+// $GOPATH/src/ portion of file. funcName must be the name of a function in
+// file as returned by runtime.Func.Name.
+func pkgIndex(file, funcName string) int {
+	// As of Go 1.6.2 there is no direct way to know the compile time GOPATH
+	// at runtime, but we can infer the number of path segments in the GOPATH.
+	// We note that runtime.Func.Name() returns the function name qualified by
+	// the import path, which does not include the GOPATH. Thus we can trim
+	// segments from the beginning of the file path until the number of path
+	// separators remaining is one more than the number of path separators in
+	// the function name. For example, given:
+	//
+	//    GOPATH     /home/user
+	//    file       /home/user/src/pkg/sub/file.go
+	//    fn.Name()  pkg/sub.Type.Method
+	//
+	// We want to produce:
+	//
+	//    file[:idx] == /home/user/src/
+	//    file[idx:] == pkg/sub/file.go
+	//
+	// From this we can easily see that fn.Name() has one less path separator
+	// than our desired result for file[idx:]. We count separators from the
+	// end of the file path until it finds two more than in the function name
+	// and then move one character forward to preserve the initial path
+	// segment without a leading separator.
+	const sep = "/"
+	i := len(file)
+	for n := strings.Count(funcName, sep) + 2; n > 0; n-- {
+		i = strings.LastIndex(file[:i], sep)
+		if i == -1 {
+			i = -len(sep)
+			break
+		}
+	}
+	// get back to 0 or trim the leading separator
+	return i + len(sep)
+}
+
+var runtimePath string
+
+func init() {
+	var pcs [1]uintptr
+	runtime.Callers(0, pcs[:])
+	fn := runtime.FuncForPC(pcs[0])
+	file, _ := fn.FileLine(pcs[0])
+
+	idx := pkgIndex(file, fn.Name())
+
+	runtimePath = file[:idx]
+	if runtime.GOOS == "windows" {
+		runtimePath = strings.ToLower(runtimePath)
+	}
+}
+
+func inGoroot(c Call) bool {
+	file := c.file()
+	if len(file) == 0 || file[0] == '?' {
+		return true
+	}
+	if runtime.GOOS == "windows" {
+		file = strings.ToLower(file)
+	}
+	return strings.HasPrefix(file, runtimePath) || strings.HasSuffix(file, "/_testmain.go")
+}
+
+// TrimRuntime returns a slice of the CallStack with the topmost entries from
+// the go runtime removed. It considers any calls originating from unknown
+// files, files under GOROOT, or _testmain.go as part of the runtime.
+func (cs CallStack) TrimRuntime() CallStack {
+	for len(cs) > 0 && inGoroot(cs[len(cs)-1]) {
+		cs = cs[:len(cs)-1]
+	}
+	return cs
+}

+ 10 - 0
Godeps/_workspace/src/github.com/inconshreveable/log15/.travis.yml

@@ -0,0 +1,10 @@
+language: go
+
+go:
+  - 1.1
+  - 1.2
+  - 1.3
+  - 1.4
+  - 1.5
+  - 1.6
+  - tip

+ 11 - 0
Godeps/_workspace/src/github.com/inconshreveable/log15/CONTRIBUTORS

@@ -0,0 +1,11 @@
+Contributors to log15:
+
+- Aaron L 
+- Alan Shreve 
+- Chris Hines 
+- Ciaran Downey 
+- Dmitry Chestnykh 
+- Evan Shaw 
+- Péter Szilágyi 
+- Trevor Gattis 
+- Vincent Vanackere 

+ 13 - 0
Godeps/_workspace/src/github.com/inconshreveable/log15/LICENSE

@@ -0,0 +1,13 @@
+Copyright 2014 Alan Shreve
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.

+ 70 - 0
Godeps/_workspace/src/github.com/inconshreveable/log15/README.md

@@ -0,0 +1,70 @@
+![obligatory xkcd](http://imgs.xkcd.com/comics/standards.png)
+
+# log15 [![godoc reference](https://godoc.org/github.com/inconshreveable/log15?status.png)](https://godoc.org/github.com/inconshreveable/log15) [![Build Status](https://travis-ci.org/inconshreveable/log15.svg?branch=master)](https://travis-ci.org/inconshreveable/log15)
+
+Package log15 provides an opinionated, simple toolkit for best-practice logging in Go (golang) that is both human and machine readable. It is modeled after the Go standard library's [`io`](http://golang.org/pkg/io/) and [`net/http`](http://golang.org/pkg/net/http/) packages and is an alternative to the standard library's [`log`](http://golang.org/pkg/log/) package. 
+
+## Features
+- A simple, easy-to-understand API
+- Promotes structured logging by encouraging use of key/value pairs
+- Child loggers which inherit and add their own private context
+- Lazy evaluation of expensive operations
+- Simple Handler interface allowing for construction of flexible, custom logging configurations with a tiny API.
+- Color terminal support
+- Built-in support for logging to files, streams, syslog, and the network
+- Support for forking records to multiple handlers, buffering records for output, failing over from failed handler writes, + more
+
+## Versioning
+The API of the master branch of log15 should always be considered unstable. If you want to rely on a stable API,
+you must vendor the library.
+
+## Importing
+
+```go
+import log "github.com/inconshreveable/log15"
+```
+
+## Examples
+
+```go
+// all loggers can have key/value context
+srvlog := log.New("module", "app/server")
+
+// all log messages can have key/value context 
+srvlog.Warn("abnormal conn rate", "rate", curRate, "low", lowRate, "high", highRate)
+
+// child loggers with inherited context
+connlog := srvlog.New("raddr", c.RemoteAddr())
+connlog.Info("connection open")
+
+// lazy evaluation
+connlog.Debug("ping remote", "latency", log.Lazy{pingRemote})
+
+// flexible configuration
+srvlog.SetHandler(log.MultiHandler(
+    log.StreamHandler(os.Stderr, log.LogfmtFormat()),
+    log.LvlFilterHandler(
+        log.LvlError,
+        log.Must.FileHandler("errors.json", log.JsonFormat())))
+```
+
+## Breaking API Changes
+The following commits broke API stability. This reference is intended to help you understand the consequences of updating to a newer version
+of log15.
+
+- 57a084d014d4150152b19e4e531399a7145d1540 - Added a `Get()` method to the `Logger` interface to retrieve the current handler
+- 93404652ee366648fa622b64d1e2b67d75a3094a - `Record` field `Call` changed to `stack.Call` with switch to `github.com/go-stack/stack`
+- a5e7613673c73281f58e15a87d2cf0cf111e8152 - Restored `syslog.Priority` argument to the `SyslogXxx` handler constructors
+
+## FAQ
+
+### The varargs style is brittle and error prone! Can I have type safety please?
+Yes. Use `log.Ctx`:
+
+```go
+srvlog := log.New(log.Ctx{"module": "app/server"})
+srvlog.Warn("abnormal conn rate", log.Ctx{"rate": curRate, "low": lowRate, "high": highRate})
+```
+
+## License
+Apache

+ 333 - 0
Godeps/_workspace/src/github.com/inconshreveable/log15/doc.go

@@ -0,0 +1,333 @@
+/*
+Package log15 provides an opinionated, simple toolkit for best-practice logging that is
+both human and machine readable. It is modeled after the standard library's io and net/http
+packages.
+
+This package enforces you to only log key/value pairs. Keys must be strings. Values may be
+any type that you like. The default output format is logfmt, but you may also choose to use
+JSON instead if that suits you. Here's how you log:
+
+    log.Info("page accessed", "path", r.URL.Path, "user_id", user.id)
+
+This will output a line that looks like:
+
+     lvl=info t=2014-05-02T16:07:23-0700 msg="page accessed" path=/org/71/profile user_id=9
+
+Getting Started
+
+To get started, you'll want to import the library:
+
+    import log "github.com/inconshreveable/log15"
+
+
+Now you're ready to start logging:
+
+    func main() {
+        log.Info("Program starting", "args", os.Args())
+    }
+
+
+Convention
+
+Because recording a human-meaningful message is common and good practice, the first argument to every
+logging method is the value to the *implicit* key 'msg'.
+
+Additionally, the level you choose for a message will be automatically added with the key 'lvl', and so
+will the current timestamp with key 't'.
+
+You may supply any additional context as a set of key/value pairs to the logging function. log15 allows
+you to favor terseness, ordering, and speed over safety. This is a reasonable tradeoff for
+logging functions. You don't need to explicitly state keys/values, log15 understands that they alternate
+in the variadic argument list:
+
+    log.Warn("size out of bounds", "low", lowBound, "high", highBound, "val", val)
+
+If you really do favor your type-safety, you may choose to pass a log.Ctx instead:
+
+    log.Warn("size out of bounds", log.Ctx{"low": lowBound, "high": highBound, "val": val})
+
+
+Context loggers
+
+Frequently, you want to add context to a logger so that you can track actions associated with it. An http
+request is a good example. You can easily create new loggers that have context that is automatically included
+with each log line:
+
+    requestlogger := log.New("path", r.URL.Path)
+
+    // later
+    requestlogger.Debug("db txn commit", "duration", txnTimer.Finish())
+
+This will output a log line that includes the path context that is attached to the logger:
+
+    lvl=dbug t=2014-05-02T16:07:23-0700 path=/repo/12/add_hook msg="db txn commit" duration=0.12
+
+
+Handlers
+
+The Handler interface defines where log lines are printed to and how they are formated. Handler is a
+single interface that is inspired by net/http's handler interface:
+
+    type Handler interface {
+        Log(r *Record) error
+    }
+
+
+Handlers can filter records, format them, or dispatch to multiple other Handlers.
+This package implements a number of Handlers for common logging patterns that are
+easily composed to create flexible, custom logging structures.
+
+Here's an example handler that prints logfmt output to Stdout:
+
+    handler := log.StreamHandler(os.Stdout, log.LogfmtFormat())
+
+Here's an example handler that defers to two other handlers. One handler only prints records
+from the rpc package in logfmt to standard out. The other prints records at Error level
+or above in JSON formatted output to the file /var/log/service.json
+
+    handler := log.MultiHandler(
+        log.LvlFilterHandler(log.LvlError, log.Must.FileHandler("/var/log/service.json", log.JsonFormat())),
+        log.MatchFilterHandler("pkg", "app/rpc" log.StdoutHandler())
+    )
+
+Logging File Names and Line Numbers
+
+This package implements three Handlers that add debugging information to the
+context, CallerFileHandler, CallerFuncHandler and CallerStackHandler. Here's
+an example that adds the source file and line number of each logging call to
+the context.
+
+    h := log.CallerFileHandler(log.StdoutHandler())
+    log.Root().SetHandler(h)
+    ...
+    log.Error("open file", "err", err)
+
+This will output a line that looks like:
+
+    lvl=eror t=2014-05-02T16:07:23-0700 msg="open file" err="file not found" caller=data.go:42
+
+Here's an example that logs the call stack rather than just the call site.
+
+    h := log.CallerStackHandler("%+v", log.StdoutHandler())
+    log.Root().SetHandler(h)
+    ...
+    log.Error("open file", "err", err)
+
+This will output a line that looks like:
+
+    lvl=eror t=2014-05-02T16:07:23-0700 msg="open file" err="file not found" stack="[pkg/data.go:42 pkg/cmd/main.go]"
+
+The "%+v" format instructs the handler to include the path of the source file
+relative to the compile time GOPATH. The github.com/go-stack/stack package
+documents the full list of formatting verbs and modifiers available.
+
+Custom Handlers
+
+The Handler interface is so simple that it's also trivial to write your own. Let's create an
+example handler which tries to write to one handler, but if that fails it falls back to
+writing to another handler and includes the error that it encountered when trying to write
+to the primary. This might be useful when trying to log over a network socket, but if that
+fails you want to log those records to a file on disk.
+
+    type BackupHandler struct {
+        Primary Handler
+        Secondary Handler
+    }
+
+    func (h *BackupHandler) Log (r *Record) error {
+        err := h.Primary.Log(r)
+        if err != nil {
+            r.Ctx = append(ctx, "primary_err", err)
+            return h.Secondary.Log(r)
+        }
+        return nil
+    }
+
+This pattern is so useful that a generic version that handles an arbitrary number of Handlers
+is included as part of this library called FailoverHandler.
+
+Logging Expensive Operations
+
+Sometimes, you want to log values that are extremely expensive to compute, but you don't want to pay
+the price of computing them if you haven't turned up your logging level to a high level of detail.
+
+This package provides a simple type to annotate a logging operation that you want to be evaluated
+lazily, just when it is about to be logged, so that it would not be evaluated if an upstream Handler
+filters it out. Just wrap any function which takes no arguments with the log.Lazy type. For example:
+
+    func factorRSAKey() (factors []int) {
+        // return the factors of a very large number
+    }
+
+    log.Debug("factors", log.Lazy{factorRSAKey})
+
+If this message is not logged for any reason (like logging at the Error level), then
+factorRSAKey is never evaluated.
+
+Dynamic context values
+
+The same log.Lazy mechanism can be used to attach context to a logger which you want to be
+evaluated when the message is logged, but not when the logger is created. For example, let's imagine
+a game where you have Player objects:
+
+    type Player struct {
+        name string
+        alive bool
+        log.Logger
+    }
+
+You always want to log a player's name and whether they're alive or dead, so when you create the player
+object, you might do:
+
+    p := &Player{name: name, alive: true}
+    p.Logger = log.New("name", p.name, "alive", p.alive)
+
+Only now, even after a player has died, the logger will still report they are alive because the logging
+context is evaluated when the logger was created. By using the Lazy wrapper, we can defer the evaluation
+of whether the player is alive or not to each log message, so that the log records will reflect the player's
+current state no matter when the log message is written:
+
+    p := &Player{name: name, alive: true}
+    isAlive := func() bool { return p.alive }
+    player.Logger = log.New("name", p.name, "alive", log.Lazy{isAlive})
+
+Terminal Format
+
+If log15 detects that stdout is a terminal, it will configure the default
+handler for it (which is log.StdoutHandler) to use TerminalFormat. This format
+logs records nicely for your terminal, including color-coded output based
+on log level.
+
+Error Handling
+
+Becasuse log15 allows you to step around the type system, there are a few ways you can specify
+invalid arguments to the logging functions. You could, for example, wrap something that is not
+a zero-argument function with log.Lazy or pass a context key that is not a string. Since logging libraries
+are typically the mechanism by which errors are reported, it would be onerous for the logging functions
+to return errors. Instead, log15 handles errors by making these guarantees to you:
+
+- Any log record containing an error will still be printed with the error explained to you as part of the log record.
+
+- Any log record containing an error will include the context key LOG15_ERROR, enabling you to easily
+(and if you like, automatically) detect if any of your logging calls are passing bad values.
+
+Understanding this, you might wonder why the Handler interface can return an error value in its Log method. Handlers
+are encouraged to return errors only if they fail to write their log records out to an external source like if the
+syslog daemon is not responding. This allows the construction of useful handlers which cope with those failures
+like the FailoverHandler.
+
+Library Use
+
+log15 is intended to be useful for library authors as a way to provide configurable logging to
+users of their library. Best practice for use in a library is to always disable all output for your logger
+by default and to provide a public Logger instance that consumers of your library can configure. Like so:
+
+    package yourlib
+
+    import "github.com/inconshreveable/log15"
+
+    var Log = log.New()
+
+    func init() {
+        Log.SetHandler(log.DiscardHandler())
+    }
+
+Users of your library may then enable it if they like:
+
+    import "github.com/inconshreveable/log15"
+    import "example.com/yourlib"
+
+    func main() {
+        handler := // custom handler setup
+        yourlib.Log.SetHandler(handler)
+    }
+
+Best practices attaching logger context
+
+The ability to attach context to a logger is a powerful one. Where should you do it and why?
+I favor embedding a Logger directly into any persistent object in my application and adding
+unique, tracing context keys to it. For instance, imagine I am writing a web browser:
+
+    type Tab struct {
+        url string
+        render *RenderingContext
+        // ...
+
+        Logger
+    }
+
+    func NewTab(url string) *Tab {
+        return &Tab {
+            // ...
+            url: url,
+
+            Logger: log.New("url", url),
+        }
+    }
+
+When a new tab is created, I assign a logger to it with the url of
+the tab as context so it can easily be traced through the logs.
+Now, whenever we perform any operation with the tab, we'll log with its
+embedded logger and it will include the tab title automatically:
+
+    tab.Debug("moved position", "idx", tab.idx)
+
+There's only one problem. What if the tab url changes? We could
+use log.Lazy to make sure the current url is always written, but that
+would mean that we couldn't trace a tab's full lifetime through our
+logs after the user navigate to a new URL.
+
+Instead, think about what values to attach to your loggers the
+same way you think about what to use as a key in a SQL database schema.
+If it's possible to use a natural key that is unique for the lifetime of the
+object, do so. But otherwise, log15's ext package has a handy RandId
+function to let you generate what you might call "surrogate keys"
+They're just random hex identifiers to use for tracing. Back to our
+Tab example, we would prefer to set up our Logger like so:
+
+        import logext "github.com/inconshreveable/log15/ext"
+
+        t := &Tab {
+            // ...
+            url: url,
+        }
+
+        t.Logger = log.New("id", logext.RandId(8), "url", log.Lazy{t.getUrl})
+        return t
+
+Now we'll have a unique traceable identifier even across loading new urls, but
+we'll still be able to see the tab's current url in the log messages.
+
+Must
+
+For all Handler functions which can return an error, there is a version of that
+function which will return no error but panics on failure. They are all available
+on the Must object. For example:
+
+    log.Must.FileHandler("/path", log.JsonFormat)
+    log.Must.NetHandler("tcp", ":1234", log.JsonFormat)
+
+Inspiration and Credit
+
+All of the following excellent projects inspired the design of this library:
+
+code.google.com/p/log4go
+
+github.com/op/go-logging
+
+github.com/technoweenie/grohl
+
+github.com/Sirupsen/logrus
+
+github.com/kr/logfmt
+
+github.com/spacemonkeygo/spacelog
+
+golang's stdlib, notably io and net/http
+
+The Name
+
+https://xkcd.com/927/
+
+*/
+package log15

+ 257 - 0
Godeps/_workspace/src/github.com/inconshreveable/log15/format.go

@@ -0,0 +1,257 @@
+package log15
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"reflect"
+	"strconv"
+	"strings"
+	"time"
+)
+
+const (
+	timeFormat     = "2006-01-02T15:04:05-0700"
+	termTimeFormat = "01-02|15:04:05"
+	floatFormat    = 'f'
+	termMsgJust    = 40
+)
+
+type Format interface {
+	Format(r *Record) []byte
+}
+
+// FormatFunc returns a new Format object which uses
+// the given function to perform record formatting.
+func FormatFunc(f func(*Record) []byte) Format {
+	return formatFunc(f)
+}
+
+type formatFunc func(*Record) []byte
+
+func (f formatFunc) Format(r *Record) []byte {
+	return f(r)
+}
+
+// TerminalFormat formats log records optimized for human readability on
+// a terminal with color-coded level output and terser human friendly timestamp.
+// This format should only be used for interactive programs or while developing.
+//
+//     [TIME] [LEVEL] MESAGE key=value key=value ...
+//
+// Example:
+//
+//     [May 16 20:58:45] [DBUG] remove route ns=haproxy addr=127.0.0.1:50002
+//
+func TerminalFormat() Format {
+	return FormatFunc(func(r *Record) []byte {
+		var color = 0
+		switch r.Lvl {
+		case LvlCrit:
+			color = 35
+		case LvlError:
+			color = 31
+		case LvlWarn:
+			color = 33
+		case LvlInfo:
+			color = 32
+		case LvlDebug:
+			color = 36
+		}
+
+		b := &bytes.Buffer{}
+		lvl := strings.ToUpper(r.Lvl.String())
+		if color > 0 {
+			fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %s ", color, lvl, r.Time.Format(termTimeFormat), r.Msg)
+		} else {
+			fmt.Fprintf(b, "[%s] [%s] %s ", lvl, r.Time.Format(termTimeFormat), r.Msg)
+		}
+
+		// try to justify the log output for short messages
+		if len(r.Ctx) > 0 && len(r.Msg) < termMsgJust {
+			b.Write(bytes.Repeat([]byte{' '}, termMsgJust-len(r.Msg)))
+		}
+
+		// print the keys logfmt style
+		logfmt(b, r.Ctx, color)
+		return b.Bytes()
+	})
+}
+
+// LogfmtFormat prints records in logfmt format, an easy machine-parseable but human-readable
+// format for key/value pairs.
+//
+// For more details see: http://godoc.org/github.com/kr/logfmt
+//
+func LogfmtFormat() Format {
+	return FormatFunc(func(r *Record) []byte {
+		common := []interface{}{r.KeyNames.Time, r.Time, r.KeyNames.Lvl, r.Lvl, r.KeyNames.Msg, r.Msg}
+		buf := &bytes.Buffer{}
+		logfmt(buf, append(common, r.Ctx...), 0)
+		return buf.Bytes()
+	})
+}
+
+func logfmt(buf *bytes.Buffer, ctx []interface{}, color int) {
+	for i := 0; i < len(ctx); i += 2 {
+		if i != 0 {
+			buf.WriteByte(' ')
+		}
+
+		k, ok := ctx[i].(string)
+		v := formatLogfmtValue(ctx[i+1])
+		if !ok {
+			k, v = errorKey, formatLogfmtValue(k)
+		}
+
+		// XXX: we should probably check that all of your key bytes aren't invalid
+		if color > 0 {
+			fmt.Fprintf(buf, "\x1b[%dm%s\x1b[0m=%s", color, k, v)
+		} else {
+			fmt.Fprintf(buf, "%s=%s", k, v)
+		}
+	}
+
+	buf.WriteByte('\n')
+}
+
+// JsonFormat formats log records as JSON objects separated by newlines.
+// It is the equivalent of JsonFormatEx(false, true).
+func JsonFormat() Format {
+	return JsonFormatEx(false, true)
+}
+
+// JsonFormatEx formats log records as JSON objects. If pretty is true,
+// records will be pretty-printed. If lineSeparated is true, records
+// will be logged with a new line between each record.
+func JsonFormatEx(pretty, lineSeparated bool) Format {
+	jsonMarshal := json.Marshal
+	if pretty {
+		jsonMarshal = func(v interface{}) ([]byte, error) {
+			return json.MarshalIndent(v, "", "    ")
+		}
+	}
+
+	return FormatFunc(func(r *Record) []byte {
+		props := make(map[string]interface{})
+
+		props[r.KeyNames.Time] = r.Time
+		props[r.KeyNames.Lvl] = r.Lvl.String()
+		props[r.KeyNames.Msg] = r.Msg
+
+		for i := 0; i < len(r.Ctx); i += 2 {
+			k, ok := r.Ctx[i].(string)
+			if !ok {
+				props[errorKey] = fmt.Sprintf("%+v is not a string key", r.Ctx[i])
+			}
+			props[k] = formatJsonValue(r.Ctx[i+1])
+		}
+
+		b, err := jsonMarshal(props)
+		if err != nil {
+			b, _ = jsonMarshal(map[string]string{
+				errorKey: err.Error(),
+			})
+			return b
+		}
+
+		if lineSeparated {
+			b = append(b, '\n')
+		}
+
+		return b
+	})
+}
+
+func formatShared(value interface{}) (result interface{}) {
+	defer func() {
+		if err := recover(); err != nil {
+			if v := reflect.ValueOf(value); v.Kind() == reflect.Ptr && v.IsNil() {
+				result = "nil"
+			} else {
+				panic(err)
+			}
+		}
+	}()
+
+	switch v := value.(type) {
+	case time.Time:
+		return v.Format(timeFormat)
+
+	case error:
+		return v.Error()
+
+	case fmt.Stringer:
+		return v.String()
+
+	default:
+		return v
+	}
+}
+
+func formatJsonValue(value interface{}) interface{} {
+	value = formatShared(value)
+	switch value.(type) {
+	case int, int8, int16, int32, int64, float32, float64, uint, uint8, uint16, uint32, uint64, string:
+		return value
+	default:
+		return fmt.Sprintf("%+v", value)
+	}
+}
+
+// formatValue formats a value for serialization
+func formatLogfmtValue(value interface{}) string {
+	if value == nil {
+		return "nil"
+	}
+
+	value = formatShared(value)
+	switch v := value.(type) {
+	case bool:
+		return strconv.FormatBool(v)
+	case float32:
+		return strconv.FormatFloat(float64(v), floatFormat, 3, 64)
+	case float64:
+		return strconv.FormatFloat(v, floatFormat, 3, 64)
+	case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64:
+		return fmt.Sprintf("%d", value)
+	case string:
+		return escapeString(v)
+	default:
+		return escapeString(fmt.Sprintf("%+v", value))
+	}
+}
+
+func escapeString(s string) string {
+	needQuotes := false
+	e := bytes.Buffer{}
+	e.WriteByte('"')
+	for _, r := range s {
+		if r <= ' ' || r == '=' || r == '"' {
+			needQuotes = true
+		}
+
+		switch r {
+		case '\\', '"':
+			e.WriteByte('\\')
+			e.WriteByte(byte(r))
+		case '\n':
+			e.WriteByte('\\')
+			e.WriteByte('n')
+		case '\r':
+			e.WriteByte('\\')
+			e.WriteByte('r')
+		case '\t':
+			e.WriteByte('\\')
+			e.WriteByte('t')
+		default:
+			e.WriteRune(r)
+		}
+	}
+	e.WriteByte('"')
+	start, stop := 0, e.Len()
+	if !needQuotes {
+		start, stop = 1, stop-1
+	}
+	return string(e.Bytes()[start:stop])
+}

+ 356 - 0
Godeps/_workspace/src/github.com/inconshreveable/log15/handler.go

@@ -0,0 +1,356 @@
+package log15
+
+import (
+	"fmt"
+	"io"
+	"net"
+	"os"
+	"reflect"
+	"sync"
+
+	"github.com/go-stack/stack"
+)
+
+// A Logger prints its log records by writing to a Handler.
+// The Handler interface defines where and how log records are written.
+// Handlers are composable, providing you great flexibility in combining
+// them to achieve the logging structure that suits your applications.
+type Handler interface {
+	Log(r *Record) error
+}
+
+// FuncHandler returns a Handler that logs records with the given
+// function.
+func FuncHandler(fn func(r *Record) error) Handler {
+	return funcHandler(fn)
+}
+
+type funcHandler func(r *Record) error
+
+func (h funcHandler) Log(r *Record) error {
+	return h(r)
+}
+
+// StreamHandler writes log records to an io.Writer
+// with the given format. StreamHandler can be used
+// to easily begin writing log records to other
+// outputs.
+//
+// StreamHandler wraps itself with LazyHandler and SyncHandler
+// to evaluate Lazy objects and perform safe concurrent writes.
+func StreamHandler(wr io.Writer, fmtr Format) Handler {
+	h := FuncHandler(func(r *Record) error {
+		_, err := wr.Write(fmtr.Format(r))
+		return err
+	})
+	return LazyHandler(SyncHandler(h))
+}
+
+// SyncHandler can be wrapped around a handler to guarantee that
+// only a single Log operation can proceed at a time. It's necessary
+// for thread-safe concurrent writes.
+func SyncHandler(h Handler) Handler {
+	var mu sync.Mutex
+	return FuncHandler(func(r *Record) error {
+		defer mu.Unlock()
+		mu.Lock()
+		return h.Log(r)
+	})
+}
+
+// FileHandler returns a handler which writes log records to the give file
+// using the given format. If the path
+// already exists, FileHandler will append to the given file. If it does not,
+// FileHandler will create the file with mode 0644.
+func FileHandler(path string, fmtr Format) (Handler, error) {
+	f, err := os.OpenFile(path, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0644)
+	if err != nil {
+		return nil, err
+	}
+	return closingHandler{f, StreamHandler(f, fmtr)}, nil
+}
+
+// NetHandler opens a socket to the given address and writes records
+// over the connection.
+func NetHandler(network, addr string, fmtr Format) (Handler, error) {
+	conn, err := net.Dial(network, addr)
+	if err != nil {
+		return nil, err
+	}
+
+	return closingHandler{conn, StreamHandler(conn, fmtr)}, nil
+}
+
+// XXX: closingHandler is essentially unused at the moment
+// it's meant for a future time when the Handler interface supports
+// a possible Close() operation
+type closingHandler struct {
+	io.WriteCloser
+	Handler
+}
+
+func (h *closingHandler) Close() error {
+	return h.WriteCloser.Close()
+}
+
+// CallerFileHandler returns a Handler that adds the line number and file of
+// the calling function to the context with key "caller".
+func CallerFileHandler(h Handler) Handler {
+	return FuncHandler(func(r *Record) error {
+		r.Ctx = append(r.Ctx, "caller", fmt.Sprint(r.Call))
+		return h.Log(r)
+	})
+}
+
+// CallerFuncHandler returns a Handler that adds the calling function name to
+// the context with key "fn".
+func CallerFuncHandler(h Handler) Handler {
+	return FuncHandler(func(r *Record) error {
+		r.Ctx = append(r.Ctx, "fn", fmt.Sprintf("%+n", r.Call))
+		return h.Log(r)
+	})
+}
+
+// CallerStackHandler returns a Handler that adds a stack trace to the context
+// with key "stack". The stack trace is formated as a space separated list of
+// call sites inside matching []'s. The most recent call site is listed first.
+// Each call site is formatted according to format. See the documentation of
+// package github.com/go-stack/stack for the list of supported formats.
+func CallerStackHandler(format string, h Handler) Handler {
+	return FuncHandler(func(r *Record) error {
+		s := stack.Trace().TrimBelow(r.Call).TrimRuntime()
+		if len(s) > 0 {
+			r.Ctx = append(r.Ctx, "stack", fmt.Sprintf(format, s))
+		}
+		return h.Log(r)
+	})
+}
+
+// FilterHandler returns a Handler that only writes records to the
+// wrapped Handler if the given function evaluates true. For example,
+// to only log records where the 'err' key is not nil:
+//
+//    logger.SetHandler(FilterHandler(func(r *Record) bool {
+//        for i := 0; i < len(r.Ctx); i += 2 {
+//            if r.Ctx[i] == "err" {
+//                return r.Ctx[i+1] != nil
+//            }
+//        }
+//        return false
+//    }, h))
+//
+func FilterHandler(fn func(r *Record) bool, h Handler) Handler {
+	return FuncHandler(func(r *Record) error {
+		if fn(r) {
+			return h.Log(r)
+		}
+		return nil
+	})
+}
+
+// MatchFilterHandler returns a Handler that only writes records
+// to the wrapped Handler if the given key in the logged
+// context matches the value. For example, to only log records
+// from your ui package:
+//
+//    log.MatchFilterHandler("pkg", "app/ui", log.StdoutHandler)
+//
+func MatchFilterHandler(key string, value interface{}, h Handler) Handler {
+	return FilterHandler(func(r *Record) (pass bool) {
+		switch key {
+		case r.KeyNames.Lvl:
+			return r.Lvl == value
+		case r.KeyNames.Time:
+			return r.Time == value
+		case r.KeyNames.Msg:
+			return r.Msg == value
+		}
+
+		for i := 0; i < len(r.Ctx); i += 2 {
+			if r.Ctx[i] == key {
+				return r.Ctx[i+1] == value
+			}
+		}
+		return false
+	}, h)
+}
+
+// LvlFilterHandler returns a Handler that only writes
+// records which are less than the given verbosity
+// level to the wrapped Handler. For example, to only
+// log Error/Crit records:
+//
+//     log.LvlFilterHandler(log.Error, log.StdoutHandler)
+//
+func LvlFilterHandler(maxLvl Lvl, h Handler) Handler {
+	return FilterHandler(func(r *Record) (pass bool) {
+		return r.Lvl <= maxLvl
+	}, h)
+}
+
+// A MultiHandler dispatches any write to each of its handlers.
+// This is useful for writing different types of log information
+// to different locations. For example, to log to a file and
+// standard error:
+//
+//     log.MultiHandler(
+//         log.Must.FileHandler("/var/log/app.log", log.LogfmtFormat()),
+//         log.StderrHandler)
+//
+func MultiHandler(hs ...Handler) Handler {
+	return FuncHandler(func(r *Record) error {
+		for _, h := range hs {
+			// what to do about failures?
+			h.Log(r)
+		}
+		return nil
+	})
+}
+
+// A FailoverHandler writes all log records to the first handler
+// specified, but will failover and write to the second handler if
+// the first handler has failed, and so on for all handlers specified.
+// For example you might want to log to a network socket, but failover
+// to writing to a file if the network fails, and then to
+// standard out if the file write fails:
+//
+//     log.FailoverHandler(
+//         log.Must.NetHandler("tcp", ":9090", log.JsonFormat()),
+//         log.Must.FileHandler("/var/log/app.log", log.LogfmtFormat()),
+//         log.StdoutHandler)
+//
+// All writes that do not go to the first handler will add context with keys of
+// the form "failover_err_{idx}" which explain the error encountered while
+// trying to write to the handlers before them in the list.
+func FailoverHandler(hs ...Handler) Handler {
+	return FuncHandler(func(r *Record) error {
+		var err error
+		for i, h := range hs {
+			err = h.Log(r)
+			if err == nil {
+				return nil
+			} else {
+				r.Ctx = append(r.Ctx, fmt.Sprintf("failover_err_%d", i), err)
+			}
+		}
+
+		return err
+	})
+}
+
+// ChannelHandler writes all records to the given channel.
+// It blocks if the channel is full. Useful for async processing
+// of log messages, it's used by BufferedHandler.
+func ChannelHandler(recs chan<- *Record) Handler {
+	return FuncHandler(func(r *Record) error {
+		recs <- r
+		return nil
+	})
+}
+
+// BufferedHandler writes all records to a buffered
+// channel of the given size which flushes into the wrapped
+// handler whenever it is available for writing. Since these
+// writes happen asynchronously, all writes to a BufferedHandler
+// never return an error and any errors from the wrapped handler are ignored.
+func BufferedHandler(bufSize int, h Handler) Handler {
+	recs := make(chan *Record, bufSize)
+	go func() {
+		for m := range recs {
+			_ = h.Log(m)
+		}
+	}()
+	return ChannelHandler(recs)
+}
+
+// LazyHandler writes all values to the wrapped handler after evaluating
+// any lazy functions in the record's context. It is already wrapped
+// around StreamHandler and SyslogHandler in this library, you'll only need
+// it if you write your own Handler.
+func LazyHandler(h Handler) Handler {
+	return FuncHandler(func(r *Record) error {
+		// go through the values (odd indices) and reassign
+		// the values of any lazy fn to the result of its execution
+		hadErr := false
+		for i := 1; i < len(r.Ctx); i += 2 {
+			lz, ok := r.Ctx[i].(Lazy)
+			if ok {
+				v, err := evaluateLazy(lz)
+				if err != nil {
+					hadErr = true
+					r.Ctx[i] = err
+				} else {
+					if cs, ok := v.(stack.CallStack); ok {
+						v = cs.TrimBelow(r.Call).TrimRuntime()
+					}
+					r.Ctx[i] = v
+				}
+			}
+		}
+
+		if hadErr {
+			r.Ctx = append(r.Ctx, errorKey, "bad lazy")
+		}
+
+		return h.Log(r)
+	})
+}
+
+func evaluateLazy(lz Lazy) (interface{}, error) {
+	t := reflect.TypeOf(lz.Fn)
+
+	if t.Kind() != reflect.Func {
+		return nil, fmt.Errorf("INVALID_LAZY, not func: %+v", lz.Fn)
+	}
+
+	if t.NumIn() > 0 {
+		return nil, fmt.Errorf("INVALID_LAZY, func takes args: %+v", lz.Fn)
+	}
+
+	if t.NumOut() == 0 {
+		return nil, fmt.Errorf("INVALID_LAZY, no func return val: %+v", lz.Fn)
+	}
+
+	value := reflect.ValueOf(lz.Fn)
+	results := value.Call([]reflect.Value{})
+	if len(results) == 1 {
+		return results[0].Interface(), nil
+	} else {
+		values := make([]interface{}, len(results))
+		for i, v := range results {
+			values[i] = v.Interface()
+		}
+		return values, nil
+	}
+}
+
+// DiscardHandler reports success for all writes but does nothing.
+// It is useful for dynamically disabling logging at runtime via
+// a Logger's SetHandler method.
+func DiscardHandler() Handler {
+	return FuncHandler(func(r *Record) error {
+		return nil
+	})
+}
+
+// The Must object provides the following Handler creation functions
+// which instead of returning an error parameter only return a Handler
+// and panic on failure: FileHandler, NetHandler, SyslogHandler, SyslogNetHandler
+var Must muster
+
+func must(h Handler, err error) Handler {
+	if err != nil {
+		panic(err)
+	}
+	return h
+}
+
+type muster struct{}
+
+func (m muster) FileHandler(path string, fmtr Format) Handler {
+	return must(FileHandler(path, fmtr))
+}
+
+func (m muster) NetHandler(network, addr string, fmtr Format) Handler {
+	return must(NetHandler(network, addr, fmtr))
+}

+ 26 - 0
Godeps/_workspace/src/github.com/inconshreveable/log15/handler_go13.go

@@ -0,0 +1,26 @@
+// +build !go1.4
+
+package log15
+
+import (
+	"sync/atomic"
+	"unsafe"
+)
+
+// swapHandler wraps another handler that may be swapped out
+// dynamically at runtime in a thread-safe fashion.
+type swapHandler struct {
+	handler unsafe.Pointer
+}
+
+func (h *swapHandler) Log(r *Record) error {
+	return h.Get().Log(r)
+}
+
+func (h *swapHandler) Get() Handler {
+	return *(*Handler)(atomic.LoadPointer(&h.handler))
+}
+
+func (h *swapHandler) Swap(newHandler Handler) {
+	atomic.StorePointer(&h.handler, unsafe.Pointer(&newHandler))
+}

+ 23 - 0
Godeps/_workspace/src/github.com/inconshreveable/log15/handler_go14.go

@@ -0,0 +1,23 @@
+// +build go1.4
+
+package log15
+
+import "sync/atomic"
+
+// swapHandler wraps another handler that may be swapped out
+// dynamically at runtime in a thread-safe fashion.
+type swapHandler struct {
+	handler atomic.Value
+}
+
+func (h *swapHandler) Log(r *Record) error {
+	return (*h.handler.Load().(*Handler)).Log(r)
+}
+
+func (h *swapHandler) Swap(newHandler Handler) {
+	h.handler.Store(&newHandler)
+}
+
+func (h *swapHandler) Get() Handler {
+	return *h.handler.Load().(*Handler)
+}

+ 208 - 0
Godeps/_workspace/src/github.com/inconshreveable/log15/logger.go

@@ -0,0 +1,208 @@
+package log15
+
+import (
+	"fmt"
+	"time"
+
+	"github.com/go-stack/stack"
+)
+
+const timeKey = "t"
+const lvlKey = "lvl"
+const msgKey = "msg"
+const errorKey = "LOG15_ERROR"
+
+type Lvl int
+
+const (
+	LvlCrit Lvl = iota
+	LvlError
+	LvlWarn
+	LvlInfo
+	LvlDebug
+)
+
+// Returns the name of a Lvl
+func (l Lvl) String() string {
+	switch l {
+	case LvlDebug:
+		return "dbug"
+	case LvlInfo:
+		return "info"
+	case LvlWarn:
+		return "warn"
+	case LvlError:
+		return "eror"
+	case LvlCrit:
+		return "crit"
+	default:
+		panic("bad level")
+	}
+}
+
+// Returns the appropriate Lvl from a string name.
+// Useful for parsing command line args and configuration files.
+func LvlFromString(lvlString string) (Lvl, error) {
+	switch lvlString {
+	case "debug", "dbug":
+		return LvlDebug, nil
+	case "info":
+		return LvlInfo, nil
+	case "warn":
+		return LvlWarn, nil
+	case "error", "eror":
+		return LvlError, nil
+	case "crit":
+		return LvlCrit, nil
+	default:
+		return LvlDebug, fmt.Errorf("Unknown level: %v", lvlString)
+	}
+}
+
+// A Record is what a Logger asks its handler to write
+type Record struct {
+	Time     time.Time
+	Lvl      Lvl
+	Msg      string
+	Ctx      []interface{}
+	Call     stack.Call
+	KeyNames RecordKeyNames
+}
+
+type RecordKeyNames struct {
+	Time string
+	Msg  string
+	Lvl  string
+}
+
+// A Logger writes key/value pairs to a Handler
+type Logger interface {
+	// New returns a new Logger that has this logger's context plus the given context
+	New(ctx ...interface{}) Logger
+
+	// GetHandler gets the handler associated with the logger.
+	GetHandler() Handler
+
+	// SetHandler updates the logger to write records to the specified handler.
+	SetHandler(h Handler)
+
+	// Log a message at the given level with context key/value pairs
+	Debug(msg string, ctx ...interface{})
+	Info(msg string, ctx ...interface{})
+	Warn(msg string, ctx ...interface{})
+	Error(msg string, ctx ...interface{})
+	Crit(msg string, ctx ...interface{})
+}
+
+type logger struct {
+	ctx []interface{}
+	h   *swapHandler
+}
+
+func (l *logger) write(msg string, lvl Lvl, ctx []interface{}) {
+	l.h.Log(&Record{
+		Time: time.Now(),
+		Lvl:  lvl,
+		Msg:  msg,
+		Ctx:  newContext(l.ctx, ctx),
+		Call: stack.Caller(2),
+		KeyNames: RecordKeyNames{
+			Time: timeKey,
+			Msg:  msgKey,
+			Lvl:  lvlKey,
+		},
+	})
+}
+
+func (l *logger) New(ctx ...interface{}) Logger {
+	child := &logger{newContext(l.ctx, ctx), new(swapHandler)}
+	child.SetHandler(l.h)
+	return child
+}
+
+func newContext(prefix []interface{}, suffix []interface{}) []interface{} {
+	normalizedSuffix := normalize(suffix)
+	newCtx := make([]interface{}, len(prefix)+len(normalizedSuffix))
+	n := copy(newCtx, prefix)
+	copy(newCtx[n:], normalizedSuffix)
+	return newCtx
+}
+
+func (l *logger) Debug(msg string, ctx ...interface{}) {
+	l.write(msg, LvlDebug, ctx)
+}
+
+func (l *logger) Info(msg string, ctx ...interface{}) {
+	l.write(msg, LvlInfo, ctx)
+}
+
+func (l *logger) Warn(msg string, ctx ...interface{}) {
+	l.write(msg, LvlWarn, ctx)
+}
+
+func (l *logger) Error(msg string, ctx ...interface{}) {
+	l.write(msg, LvlError, ctx)
+}
+
+func (l *logger) Crit(msg string, ctx ...interface{}) {
+	l.write(msg, LvlCrit, ctx)
+}
+
+func (l *logger) GetHandler() Handler {
+	return l.h.Get()
+}
+
+func (l *logger) SetHandler(h Handler) {
+	l.h.Swap(h)
+}
+
+func normalize(ctx []interface{}) []interface{} {
+	// if the caller passed a Ctx object, then expand it
+	if len(ctx) == 1 {
+		if ctxMap, ok := ctx[0].(Ctx); ok {
+			ctx = ctxMap.toArray()
+		}
+	}
+
+	// ctx needs to be even because it's a series of key/value pairs
+	// no one wants to check for errors on logging functions,
+	// so instead of erroring on bad input, we'll just make sure
+	// that things are the right length and users can fix bugs
+	// when they see the output looks wrong
+	if len(ctx)%2 != 0 {
+		ctx = append(ctx, nil, errorKey, "Normalized odd number of arguments by adding nil")
+	}
+
+	return ctx
+}
+
+// Lazy allows you to defer calculation of a logged value that is expensive
+// to compute until it is certain that it must be evaluated with the given filters.
+//
+// Lazy may also be used in conjunction with a Logger's New() function
+// to generate a child logger which always reports the current value of changing
+// state.
+//
+// You may wrap any function which takes no arguments to Lazy. It may return any
+// number of values of any type.
+type Lazy struct {
+	Fn interface{}
+}
+
+// Ctx is a map of key/value pairs to pass as context to a log function
+// Use this only if you really need greater safety around the arguments you pass
+// to the logging functions.
+type Ctx map[string]interface{}
+
+func (c Ctx) toArray() []interface{} {
+	arr := make([]interface{}, len(c)*2)
+
+	i := 0
+	for k, v := range c {
+		arr[i] = k
+		arr[i+1] = v
+		i += 2
+	}
+
+	return arr
+}

+ 67 - 0
Godeps/_workspace/src/github.com/inconshreveable/log15/root.go

@@ -0,0 +1,67 @@
+package log15
+
+import (
+	"os"
+
+	"github.com/inconshreveable/log15/term"
+	"github.com/mattn/go-colorable"
+)
+
+var (
+	root          *logger
+	StdoutHandler = StreamHandler(os.Stdout, LogfmtFormat())
+	StderrHandler = StreamHandler(os.Stderr, LogfmtFormat())
+)
+
+func init() {
+	if term.IsTty(os.Stdout.Fd()) {
+		StdoutHandler = StreamHandler(colorable.NewColorableStdout(), TerminalFormat())
+	}
+
+	if term.IsTty(os.Stderr.Fd()) {
+		StderrHandler = StreamHandler(colorable.NewColorableStderr(), TerminalFormat())
+	}
+
+	root = &logger{[]interface{}{}, new(swapHandler)}
+	root.SetHandler(StdoutHandler)
+}
+
+// New returns a new logger with the given context.
+// New is a convenient alias for Root().New
+func New(ctx ...interface{}) Logger {
+	return root.New(ctx...)
+}
+
+// Root returns the root logger
+func Root() Logger {
+	return root
+}
+
+// The following functions bypass the exported logger methods (logger.Debug,
+// etc.) to keep the call depth the same for all paths to logger.write so
+// runtime.Caller(2) always refers to the call site in client code.
+
+// Debug is a convenient alias for Root().Debug
+func Debug(msg string, ctx ...interface{}) {
+	root.write(msg, LvlDebug, ctx)
+}
+
+// Info is a convenient alias for Root().Info
+func Info(msg string, ctx ...interface{}) {
+	root.write(msg, LvlInfo, ctx)
+}
+
+// Warn is a convenient alias for Root().Warn
+func Warn(msg string, ctx ...interface{}) {
+	root.write(msg, LvlWarn, ctx)
+}
+
+// Error is a convenient alias for Root().Error
+func Error(msg string, ctx ...interface{}) {
+	root.write(msg, LvlError, ctx)
+}
+
+// Crit is a convenient alias for Root().Crit
+func Crit(msg string, ctx ...interface{}) {
+	root.write(msg, LvlCrit, ctx)
+}

+ 55 - 0
Godeps/_workspace/src/github.com/inconshreveable/log15/syslog.go

@@ -0,0 +1,55 @@
+// +build !windows,!plan9
+
+package log15
+
+import (
+	"log/syslog"
+	"strings"
+)
+
+// SyslogHandler opens a connection to the system syslog daemon by calling
+// syslog.New and writes all records to it.
+func SyslogHandler(priority syslog.Priority, tag string, fmtr Format) (Handler, error) {
+	wr, err := syslog.New(priority, tag)
+	return sharedSyslog(fmtr, wr, err)
+}
+
+// SyslogHandler opens a connection to a log daemon over the network and writes
+// all log records to it.
+func SyslogNetHandler(net, addr string, priority syslog.Priority, tag string, fmtr Format) (Handler, error) {
+	wr, err := syslog.Dial(net, addr, priority, tag)
+	return sharedSyslog(fmtr, wr, err)
+}
+
+func sharedSyslog(fmtr Format, sysWr *syslog.Writer, err error) (Handler, error) {
+	if err != nil {
+		return nil, err
+	}
+	h := FuncHandler(func(r *Record) error {
+		var syslogFn = sysWr.Info
+		switch r.Lvl {
+		case LvlCrit:
+			syslogFn = sysWr.Crit
+		case LvlError:
+			syslogFn = sysWr.Err
+		case LvlWarn:
+			syslogFn = sysWr.Warning
+		case LvlInfo:
+			syslogFn = sysWr.Info
+		case LvlDebug:
+			syslogFn = sysWr.Debug
+		}
+
+		s := strings.TrimSpace(string(fmtr.Format(r)))
+		return syslogFn(s)
+	})
+	return LazyHandler(&closingHandler{sysWr, h}), nil
+}
+
+func (m muster) SyslogHandler(priority syslog.Priority, tag string, fmtr Format) Handler {
+	return must(SyslogHandler(priority, tag, fmtr))
+}
+
+func (m muster) SyslogNetHandler(net, addr string, priority syslog.Priority, tag string, fmtr Format) Handler {
+	return must(SyslogNetHandler(net, addr, priority, tag, fmtr))
+}

+ 21 - 0
Godeps/_workspace/src/github.com/inconshreveable/log15/term/LICENSE

@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Simon Eskildsen
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.

+ 13 - 0
Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_appengine.go

@@ -0,0 +1,13 @@
+// Based on ssh/terminal:
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build appengine
+
+package term
+
+// IsTty always returns false on AppEngine.
+func IsTty(fd uintptr) bool {
+	return false
+}

+ 12 - 0
Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_darwin.go

@@ -0,0 +1,12 @@
+// Based on ssh/terminal:
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package term
+
+import "syscall"
+
+const ioctlReadTermios = syscall.TIOCGETA
+
+type Termios syscall.Termios

+ 18 - 0
Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_freebsd.go

@@ -0,0 +1,18 @@
+package term
+
+import (
+	"syscall"
+)
+
+const ioctlReadTermios = syscall.TIOCGETA
+
+// Go 1.2 doesn't include Termios for FreeBSD. This should be added in 1.3 and this could be merged with terminal_darwin.
+type Termios struct {
+	Iflag  uint32
+	Oflag  uint32
+	Cflag  uint32
+	Lflag  uint32
+	Cc     [20]uint8
+	Ispeed uint32
+	Ospeed uint32
+}

+ 14 - 0
Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_linux.go

@@ -0,0 +1,14 @@
+// Based on ssh/terminal:
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+
+package term
+
+import "syscall"
+
+const ioctlReadTermios = syscall.TCGETS
+
+type Termios syscall.Termios

+ 20 - 0
Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_notwindows.go

@@ -0,0 +1,20 @@
+// Based on ssh/terminal:
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux,!appengine darwin freebsd openbsd
+
+package term
+
+import (
+	"syscall"
+	"unsafe"
+)
+
+// IsTty returns true if the given file descriptor is a terminal.
+func IsTty(fd uintptr) bool {
+	var termios Termios
+	_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
+	return err == 0
+}

+ 7 - 0
Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_openbsd.go

@@ -0,0 +1,7 @@
+package term
+
+import "syscall"
+
+const ioctlReadTermios = syscall.TIOCGETA
+
+type Termios syscall.Termios

+ 26 - 0
Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_windows.go

@@ -0,0 +1,26 @@
+// Based on ssh/terminal:
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build windows
+
+package term
+
+import (
+	"syscall"
+	"unsafe"
+)
+
+var kernel32 = syscall.NewLazyDLL("kernel32.dll")
+
+var (
+	procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
+)
+
+// IsTty returns true if the given file descriptor is a terminal.
+func IsTty(fd uintptr) bool {
+	var st uint32
+	r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0)
+	return r != 0 && e == 0
+}

+ 0 - 267
Godeps/_workspace/src/github.com/influxdata/influxdb/client/README.md

@@ -1,267 +0,0 @@
-# InfluxDB Client
-
-[![GoDoc](https://godoc.org/github.com/influxdata/influxdb?status.svg)](http://godoc.org/github.com/influxdata/influxdb/client/v2)
-
-## Description
-
-**NOTE:** The Go client library now has a "v2" version, with the old version
-being deprecated. The new version can be imported at
-`import "github.com/influxdata/influxdb/client/v2"`. It is not backwards-compatible.
-
-A Go client library written and maintained by the **InfluxDB** team.
-This package provides convenience functions to read and write time series data.
-It uses the HTTP protocol to communicate with your **InfluxDB** cluster.
-
-
-## Getting Started
-
-### Connecting To Your Database
-
-Connecting to an **InfluxDB** database is straightforward. You will need a host
-name, a port and the cluster user credentials if applicable. The default port is
-8086. You can customize these settings to your specific installation via the
-**InfluxDB** configuration file.
-
-Though not necessary for experimentation, you may want to create a new user
-and authenticate the connection to your database.
-
-For more information please check out the
-[Admin Docs](https://docs.influxdata.com/influxdb/latest/administration/).
-
-For the impatient, you can create a new admin user _bubba_ by firing off the
-[InfluxDB CLI](https://github.com/influxdata/influxdb/blob/master/cmd/influx/main.go).
-
-```shell
-influx
-> create user bubba with password 'bumblebeetuna'
-> grant all privileges to bubba
-```
-
-And now for good measure set the credentials in you shell environment.
-In the example below we will use $INFLUX_USER and $INFLUX_PWD
-
-Now with the administrivia out of the way, let's connect to our database.
-
-NOTE: If you've opted out of creating a user, you can omit Username and Password in
-the configuration below.
-
-```go
-package main
-
-import (
-	"log"
-	"time"
-
-	"github.com/influxdata/influxdb/client/v2"
-)
-
-const (
-	MyDB = "square_holes"
-	username = "bubba"
-	password = "bumblebeetuna"
-)
-
-func main() {
-	// Make client
-	c, err := client.NewHTTPClient(client.HTTPConfig{
-		Addr: "http://localhost:8086",
-		Username: username,
-		Password: password,
-	})
-
-	if err != nil {
-	    log.Fatalln("Error: ", err)
-	}
-
-	// Create a new point batch
-	bp, err := client.NewBatchPoints(client.BatchPointsConfig{
-		Database:  MyDB,
-		Precision: "s",
-	})
-
-	if err != nil {
-	    log.Fatalln("Error: ", err)
-	}
-
-	// Create a point and add to batch
-	tags := map[string]string{"cpu": "cpu-total"}
-	fields := map[string]interface{}{
-		"idle":   10.1,
-		"system": 53.3,
-		"user":   46.6,
-	}
-	pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now())
-
-	if err != nil {
-	    log.Fatalln("Error: ", err)
-	}
-
-	bp.AddPoint(pt)
-
-	// Write the batch
-	c.Write(bp)
-}
-
-```
-
-### Inserting Data
-
-Time series data aka *points* are written to the database using batch inserts.
-The mechanism is to create one or more points and then create a batch aka
-*batch points* and write these to a given database and series. A series is a
-combination of a measurement (time/values) and a set of tags.
-
-In this sample we will create a batch of a 1,000 points. Each point has a time and
-a single value as well as 2 tags indicating a shape and color. We write these points
-to a database called _square_holes_ using a measurement named _shapes_.
-
-NOTE: You can specify a RetentionPolicy as part of the batch points. If not
-provided InfluxDB will use the database _default_ retention policy.
-
-```go
-func writePoints(clnt client.Client) {
-	sampleSize := 1000
-	rand.Seed(42)
-
-	bp, _ := client.NewBatchPoints(client.BatchPointsConfig{
-		Database:  "systemstats",
-		Precision: "us",
-	})
-
-	for i := 0; i < sampleSize; i++ {
-		regions := []string{"us-west1", "us-west2", "us-west3", "us-east1"}
-		tags := map[string]string{
-			"cpu":    "cpu-total",
-			"host":   fmt.Sprintf("host%d", rand.Intn(1000)),
-			"region": regions[rand.Intn(len(regions))],
-		}
-
-		idle := rand.Float64() * 100.0
-		fields := map[string]interface{}{
-			"idle": idle,
-			"busy": 100.0 - idle,
-		}
-
-		bp.AddPoint(client.NewPoint(
-			"cpu_usage",
-			tags,
-			fields,
-			time.Now(),
-		))
-	}
-
-	err := clnt.Write(bp)
-	if err != nil {
-		log.Fatal(err)
-	}
-}
-```
-
-
-### Querying Data
-
-One nice advantage of using **InfluxDB** the ability to query your data using familiar
-SQL constructs. In this example we can create a convenience function to query the database
-as follows:
-
-```go
-// queryDB convenience function to query the database
-func queryDB(clnt client.Client, cmd string) (res []client.Result, err error) {
-	q := client.Query{
-		Command:  cmd,
-		Database: MyDB,
-	}
-	if response, err := clnt.Query(q); err == nil {
-		if response.Error() != nil {
-			return res, response.Error()
-		}
-		res = response.Results
-	} else {
-		return res, err
-	}
-	return res, nil
-}
-```
-
-#### Creating a Database
-
-```go
-_, err := queryDB(clnt, fmt.Sprintf("CREATE DATABASE %s", MyDB))
-if err != nil {
-	log.Fatal(err)
-}
-```
-
-#### Count Records
-
-```go
-q := fmt.Sprintf("SELECT count(%s) FROM %s", "value", MyMeasurement)
-res, err := queryDB(clnt, q)
-if err != nil {
-	log.Fatal(err)
-}
-count := res[0].Series[0].Values[0][1]
-log.Printf("Found a total of %v records\n", count)
-```
-
-#### Find the last 10 _shapes_ records
-
-```go
-q := fmt.Sprintf("SELECT * FROM %s LIMIT %d", MyMeasurement, 20)
-res, err = queryDB(clnt, q)
-if err != nil {
-	log.Fatal(err)
-}
-
-for i, row := range res[0].Series[0].Values {
-	t, err := time.Parse(time.RFC3339, row[0].(string))
-	if err != nil {
-		log.Fatal(err)
-	}
-	val := row[1].(string)
-	log.Printf("[%2d] %s: %s\n", i, t.Format(time.Stamp), val)
-}
-```
-
-### Using the UDP Client
-
-The **InfluxDB** client also supports writing over UDP.
-
-```go
-func WriteUDP() {
-	// Make client
-	c := client.NewUDPClient("localhost:8089")
-
-	// Create a new point batch
-	bp, _ := client.NewBatchPoints(client.BatchPointsConfig{
-		Precision: "s",
-	})
-
-	// Create a point and add to batch
-	tags := map[string]string{"cpu": "cpu-total"}
-	fields := map[string]interface{}{
-		"idle":   10.1,
-		"system": 53.3,
-		"user":   46.6,
-	}
-	pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now())
-	if err != nil {
-		panic(err.Error())
-	}
-	bp.AddPoint(pt)
-
-	// Write the batch
-	c.Write(bp)
-}
-```
-
-## Go Docs
-
-Please refer to
-[http://godoc.org/github.com/influxdata/influxdb/client/v2](http://godoc.org/github.com/influxdata/influxdb/client/v2)
-for documentation.
-
-## See Also
-
-You can also examine how the client library is used by the
-[InfluxDB CLI](https://github.com/influxdata/influxdb/blob/master/cmd/influx/main.go).

+ 0 - 789
Godeps/_workspace/src/github.com/influxdata/influxdb/client/influxdb.go

@@ -1,789 +0,0 @@
-package client
-
-import (
-	"bytes"
-	"crypto/tls"
-	"encoding/json"
-	"errors"
-	"fmt"
-	"io"
-	"io/ioutil"
-	"net"
-	"net/http"
-	"net/url"
-	"strconv"
-	"strings"
-	"time"
-
-	"github.com/influxdata/influxdb/models"
-)
-
-const (
-	// DefaultHost is the default host used to connect to an InfluxDB instance
-	DefaultHost = "localhost"
-
-	// DefaultPort is the default port used to connect to an InfluxDB instance
-	DefaultPort = 8086
-
-	// DefaultTimeout is the default connection timeout used to connect to an InfluxDB instance
-	DefaultTimeout = 0
-)
-
-// Query is used to send a command to the server. Both Command and Database are required.
-type Query struct {
-	Command  string
-	Database string
-
-	// Chunked tells the server to send back chunked responses. This places
-	// less load on the server by sending back chunks of the response rather
-	// than waiting for the entire response all at once.
-	Chunked bool
-
-	// ChunkSize sets the maximum number of rows that will be returned per
-	// chunk. Chunks are either divided based on their series or if they hit
-	// the chunk size limit.
-	//
-	// Chunked must be set to true for this option to be used.
-	ChunkSize int
-}
-
-// ParseConnectionString will parse a string to create a valid connection URL
-func ParseConnectionString(path string, ssl bool) (url.URL, error) {
-	var host string
-	var port int
-
-	h, p, err := net.SplitHostPort(path)
-	if err != nil {
-		if path == "" {
-			host = DefaultHost
-		} else {
-			host = path
-		}
-		// If they didn't specify a port, always use the default port
-		port = DefaultPort
-	} else {
-		host = h
-		port, err = strconv.Atoi(p)
-		if err != nil {
-			return url.URL{}, fmt.Errorf("invalid port number %q: %s\n", path, err)
-		}
-	}
-
-	u := url.URL{
-		Scheme: "http",
-	}
-	if ssl {
-		u.Scheme = "https"
-	}
-
-	u.Host = net.JoinHostPort(host, strconv.Itoa(port))
-
-	return u, nil
-}
-
-// Config is used to specify what server to connect to.
-// URL: The URL of the server connecting to.
-// Username/Password are optional. They will be passed via basic auth if provided.
-// UserAgent: If not provided, will default "InfluxDBClient",
-// Timeout: If not provided, will default to 0 (no timeout)
-type Config struct {
-	URL       url.URL
-	Username  string
-	Password  string
-	UserAgent string
-	Timeout   time.Duration
-	Precision string
-	UnsafeSsl bool
-}
-
-// NewConfig will create a config to be used in connecting to the client
-func NewConfig() Config {
-	return Config{
-		Timeout: DefaultTimeout,
-	}
-}
-
-// Client is used to make calls to the server.
-type Client struct {
-	url        url.URL
-	username   string
-	password   string
-	httpClient *http.Client
-	userAgent  string
-	precision  string
-}
-
-const (
-	// ConsistencyOne requires at least one data node acknowledged a write.
-	ConsistencyOne = "one"
-
-	// ConsistencyAll requires all data nodes to acknowledge a write.
-	ConsistencyAll = "all"
-
-	// ConsistencyQuorum requires a quorum of data nodes to acknowledge a write.
-	ConsistencyQuorum = "quorum"
-
-	// ConsistencyAny allows for hinted hand off, potentially no write happened yet.
-	ConsistencyAny = "any"
-)
-
-// NewClient will instantiate and return a connected client to issue commands to the server.
-func NewClient(c Config) (*Client, error) {
-	tlsConfig := &tls.Config{
-		InsecureSkipVerify: c.UnsafeSsl,
-	}
-
-	tr := &http.Transport{
-		TLSClientConfig: tlsConfig,
-	}
-
-	client := Client{
-		url:        c.URL,
-		username:   c.Username,
-		password:   c.Password,
-		httpClient: &http.Client{Timeout: c.Timeout, Transport: tr},
-		userAgent:  c.UserAgent,
-		precision:  c.Precision,
-	}
-	if client.userAgent == "" {
-		client.userAgent = "InfluxDBClient"
-	}
-	return &client, nil
-}
-
-// SetAuth will update the username and passwords
-func (c *Client) SetAuth(u, p string) {
-	c.username = u
-	c.password = p
-}
-
-// SetPrecision will update the precision
-func (c *Client) SetPrecision(precision string) {
-	c.precision = precision
-}
-
-// Query sends a command to the server and returns the Response
-func (c *Client) Query(q Query) (*Response, error) {
-	u := c.url
-
-	u.Path = "query"
-	values := u.Query()
-	values.Set("q", q.Command)
-	values.Set("db", q.Database)
-	if q.Chunked {
-		values.Set("chunked", "true")
-		if q.ChunkSize > 0 {
-			values.Set("chunk_size", strconv.Itoa(q.ChunkSize))
-		}
-	}
-	if c.precision != "" {
-		values.Set("epoch", c.precision)
-	}
-	u.RawQuery = values.Encode()
-
-	req, err := http.NewRequest("POST", u.String(), nil)
-	if err != nil {
-		return nil, err
-	}
-	req.Header.Set("User-Agent", c.userAgent)
-	if c.username != "" {
-		req.SetBasicAuth(c.username, c.password)
-	}
-
-	resp, err := c.httpClient.Do(req)
-	if err != nil {
-		return nil, err
-	}
-	defer resp.Body.Close()
-
-	var response Response
-	if q.Chunked {
-		cr := NewChunkedResponse(resp.Body)
-		for {
-			r, err := cr.NextResponse()
-			if err != nil {
-				// If we got an error while decoding the response, send that back.
-				return nil, err
-			}
-
-			if r == nil {
-				break
-			}
-
-			response.Results = append(response.Results, r.Results...)
-			if r.Err != nil {
-				response.Err = r.Err
-				break
-			}
-		}
-	} else {
-		dec := json.NewDecoder(resp.Body)
-		dec.UseNumber()
-		if err := dec.Decode(&response); err != nil {
-			// Ignore EOF errors if we got an invalid status code.
-			if !(err == io.EOF && resp.StatusCode != http.StatusOK) {
-				return nil, err
-			}
-		}
-	}
-
-	// If we don't have an error in our json response, and didn't get StatusOK,
-	// then send back an error.
-	if resp.StatusCode != http.StatusOK && response.Error() == nil {
-		return &response, fmt.Errorf("received status code %d from server", resp.StatusCode)
-	}
-	return &response, nil
-}
-
-// Write takes BatchPoints and allows for writing of multiple points with defaults
-// If successful, error is nil and Response is nil
-// If an error occurs, Response may contain additional information if populated.
-func (c *Client) Write(bp BatchPoints) (*Response, error) {
-	u := c.url
-	u.Path = "write"
-
-	var b bytes.Buffer
-	for _, p := range bp.Points {
-		err := checkPointTypes(p)
-		if err != nil {
-			return nil, err
-		}
-		if p.Raw != "" {
-			if _, err := b.WriteString(p.Raw); err != nil {
-				return nil, err
-			}
-		} else {
-			for k, v := range bp.Tags {
-				if p.Tags == nil {
-					p.Tags = make(map[string]string, len(bp.Tags))
-				}
-				p.Tags[k] = v
-			}
-
-			if _, err := b.WriteString(p.MarshalString()); err != nil {
-				return nil, err
-			}
-		}
-
-		if err := b.WriteByte('\n'); err != nil {
-			return nil, err
-		}
-	}
-
-	req, err := http.NewRequest("POST", u.String(), &b)
-	if err != nil {
-		return nil, err
-	}
-	req.Header.Set("Content-Type", "")
-	req.Header.Set("User-Agent", c.userAgent)
-	if c.username != "" {
-		req.SetBasicAuth(c.username, c.password)
-	}
-
-	precision := bp.Precision
-	if precision == "" {
-		precision = c.precision
-	}
-
-	params := req.URL.Query()
-	params.Set("db", bp.Database)
-	params.Set("rp", bp.RetentionPolicy)
-	params.Set("precision", precision)
-	params.Set("consistency", bp.WriteConsistency)
-	req.URL.RawQuery = params.Encode()
-
-	resp, err := c.httpClient.Do(req)
-	if err != nil {
-		return nil, err
-	}
-	defer resp.Body.Close()
-
-	var response Response
-	body, err := ioutil.ReadAll(resp.Body)
-	if err != nil {
-		return nil, err
-	}
-
-	if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK {
-		var err = fmt.Errorf(string(body))
-		response.Err = err
-		return &response, err
-	}
-
-	return nil, nil
-}
-
-// WriteLineProtocol takes a string with line returns to delimit each write
-// If successful, error is nil and Response is nil
-// If an error occurs, Response may contain additional information if populated.
-func (c *Client) WriteLineProtocol(data, database, retentionPolicy, precision, writeConsistency string) (*Response, error) {
-	u := c.url
-	u.Path = "write"
-
-	r := strings.NewReader(data)
-
-	req, err := http.NewRequest("POST", u.String(), r)
-	if err != nil {
-		return nil, err
-	}
-	req.Header.Set("Content-Type", "")
-	req.Header.Set("User-Agent", c.userAgent)
-	if c.username != "" {
-		req.SetBasicAuth(c.username, c.password)
-	}
-	params := req.URL.Query()
-	params.Set("db", database)
-	params.Set("rp", retentionPolicy)
-	params.Set("precision", precision)
-	params.Set("consistency", writeConsistency)
-	req.URL.RawQuery = params.Encode()
-
-	resp, err := c.httpClient.Do(req)
-	if err != nil {
-		return nil, err
-	}
-	defer resp.Body.Close()
-
-	var response Response
-	body, err := ioutil.ReadAll(resp.Body)
-	if err != nil {
-		return nil, err
-	}
-
-	if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK {
-		err := fmt.Errorf(string(body))
-		response.Err = err
-		return &response, err
-	}
-
-	return nil, nil
-}
-
-// Ping will check to see if the server is up
-// Ping returns how long the request took, the version of the server it connected to, and an error if one occurred.
-func (c *Client) Ping() (time.Duration, string, error) {
-	now := time.Now()
-	u := c.url
-	u.Path = "ping"
-
-	req, err := http.NewRequest("GET", u.String(), nil)
-	if err != nil {
-		return 0, "", err
-	}
-	req.Header.Set("User-Agent", c.userAgent)
-	if c.username != "" {
-		req.SetBasicAuth(c.username, c.password)
-	}
-
-	resp, err := c.httpClient.Do(req)
-	if err != nil {
-		return 0, "", err
-	}
-	defer resp.Body.Close()
-
-	version := resp.Header.Get("X-Influxdb-Version")
-	return time.Since(now), version, nil
-}
-
-// Structs
-
-// Message represents a user message.
-type Message struct {
-	Level string `json:"level,omitempty"`
-	Text  string `json:"text,omitempty"`
-}
-
-// Result represents a resultset returned from a single statement.
-type Result struct {
-	Series   []models.Row
-	Messages []*Message
-	Err      error
-}
-
-// MarshalJSON encodes the result into JSON.
-func (r *Result) MarshalJSON() ([]byte, error) {
-	// Define a struct that outputs "error" as a string.
-	var o struct {
-		Series   []models.Row `json:"series,omitempty"`
-		Messages []*Message   `json:"messages,omitempty"`
-		Err      string       `json:"error,omitempty"`
-	}
-
-	// Copy fields to output struct.
-	o.Series = r.Series
-	o.Messages = r.Messages
-	if r.Err != nil {
-		o.Err = r.Err.Error()
-	}
-
-	return json.Marshal(&o)
-}
-
-// UnmarshalJSON decodes the data into the Result struct
-func (r *Result) UnmarshalJSON(b []byte) error {
-	var o struct {
-		Series   []models.Row `json:"series,omitempty"`
-		Messages []*Message   `json:"messages,omitempty"`
-		Err      string       `json:"error,omitempty"`
-	}
-
-	dec := json.NewDecoder(bytes.NewBuffer(b))
-	dec.UseNumber()
-	err := dec.Decode(&o)
-	if err != nil {
-		return err
-	}
-	r.Series = o.Series
-	r.Messages = o.Messages
-	if o.Err != "" {
-		r.Err = errors.New(o.Err)
-	}
-	return nil
-}
-
-// Response represents a list of statement results.
-type Response struct {
-	Results []Result
-	Err     error
-}
-
-// MarshalJSON encodes the response into JSON.
-func (r *Response) MarshalJSON() ([]byte, error) {
-	// Define a struct that outputs "error" as a string.
-	var o struct {
-		Results []Result `json:"results,omitempty"`
-		Err     string   `json:"error,omitempty"`
-	}
-
-	// Copy fields to output struct.
-	o.Results = r.Results
-	if r.Err != nil {
-		o.Err = r.Err.Error()
-	}
-
-	return json.Marshal(&o)
-}
-
-// UnmarshalJSON decodes the data into the Response struct
-func (r *Response) UnmarshalJSON(b []byte) error {
-	var o struct {
-		Results []Result `json:"results,omitempty"`
-		Err     string   `json:"error,omitempty"`
-	}
-
-	dec := json.NewDecoder(bytes.NewBuffer(b))
-	dec.UseNumber()
-	err := dec.Decode(&o)
-	if err != nil {
-		return err
-	}
-	r.Results = o.Results
-	if o.Err != "" {
-		r.Err = errors.New(o.Err)
-	}
-	return nil
-}
-
-// Error returns the first error from any statement.
-// Returns nil if no errors occurred on any statements.
-func (r *Response) Error() error {
-	if r.Err != nil {
-		return r.Err
-	}
-	for _, result := range r.Results {
-		if result.Err != nil {
-			return result.Err
-		}
-	}
-	return nil
-}
-
-// ChunkedResponse represents a response from the server that
-// uses chunking to stream the output.
-type ChunkedResponse struct {
-	dec *json.Decoder
-}
-
-// NewChunkedResponse reads a stream and produces responses from the stream.
-func NewChunkedResponse(r io.Reader) *ChunkedResponse {
-	dec := json.NewDecoder(r)
-	dec.UseNumber()
-	return &ChunkedResponse{dec: dec}
-}
-
-// NextResponse reads the next line of the stream and returns a response.
-func (r *ChunkedResponse) NextResponse() (*Response, error) {
-	var response Response
-	if err := r.dec.Decode(&response); err != nil {
-		if err == io.EOF {
-			return nil, nil
-		}
-		return nil, err
-	}
-	return &response, nil
-}
-
-// Point defines the fields that will be written to the database
-// Measurement, Time, and Fields are required
-// Precision can be specified if the time is in epoch format (integer).
-// Valid values for Precision are n, u, ms, s, m, and h
-type Point struct {
-	Measurement string
-	Tags        map[string]string
-	Time        time.Time
-	Fields      map[string]interface{}
-	Precision   string
-	Raw         string
-}
-
-// MarshalJSON will format the time in RFC3339Nano
-// Precision is also ignored as it is only used for writing, not reading
-// Or another way to say it is we always send back in nanosecond precision
-func (p *Point) MarshalJSON() ([]byte, error) {
-	point := struct {
-		Measurement string                 `json:"measurement,omitempty"`
-		Tags        map[string]string      `json:"tags,omitempty"`
-		Time        string                 `json:"time,omitempty"`
-		Fields      map[string]interface{} `json:"fields,omitempty"`
-		Precision   string                 `json:"precision,omitempty"`
-	}{
-		Measurement: p.Measurement,
-		Tags:        p.Tags,
-		Fields:      p.Fields,
-		Precision:   p.Precision,
-	}
-	// Let it omit empty if it's really zero
-	if !p.Time.IsZero() {
-		point.Time = p.Time.UTC().Format(time.RFC3339Nano)
-	}
-	return json.Marshal(&point)
-}
-
-// MarshalString renders string representation of a Point with specified
-// precision. The default precision is nanoseconds.
-func (p *Point) MarshalString() string {
-	pt, err := models.NewPoint(p.Measurement, p.Tags, p.Fields, p.Time)
-	if err != nil {
-		return "# ERROR: " + err.Error() + " " + p.Measurement
-	}
-	if p.Precision == "" || p.Precision == "ns" || p.Precision == "n" {
-		return pt.String()
-	}
-	return pt.PrecisionString(p.Precision)
-}
-
-// UnmarshalJSON decodes the data into the Point struct
-func (p *Point) UnmarshalJSON(b []byte) error {
-	var normal struct {
-		Measurement string                 `json:"measurement"`
-		Tags        map[string]string      `json:"tags"`
-		Time        time.Time              `json:"time"`
-		Precision   string                 `json:"precision"`
-		Fields      map[string]interface{} `json:"fields"`
-	}
-	var epoch struct {
-		Measurement string                 `json:"measurement"`
-		Tags        map[string]string      `json:"tags"`
-		Time        *int64                 `json:"time"`
-		Precision   string                 `json:"precision"`
-		Fields      map[string]interface{} `json:"fields"`
-	}
-
-	if err := func() error {
-		var err error
-		dec := json.NewDecoder(bytes.NewBuffer(b))
-		dec.UseNumber()
-		if err = dec.Decode(&epoch); err != nil {
-			return err
-		}
-		// Convert from epoch to time.Time, but only if Time
-		// was actually set.
-		var ts time.Time
-		if epoch.Time != nil {
-			ts, err = EpochToTime(*epoch.Time, epoch.Precision)
-			if err != nil {
-				return err
-			}
-		}
-		p.Measurement = epoch.Measurement
-		p.Tags = epoch.Tags
-		p.Time = ts
-		p.Precision = epoch.Precision
-		p.Fields = normalizeFields(epoch.Fields)
-		return nil
-	}(); err == nil {
-		return nil
-	}
-
-	dec := json.NewDecoder(bytes.NewBuffer(b))
-	dec.UseNumber()
-	if err := dec.Decode(&normal); err != nil {
-		return err
-	}
-	normal.Time = SetPrecision(normal.Time, normal.Precision)
-	p.Measurement = normal.Measurement
-	p.Tags = normal.Tags
-	p.Time = normal.Time
-	p.Precision = normal.Precision
-	p.Fields = normalizeFields(normal.Fields)
-
-	return nil
-}
-
-// Remove any notion of json.Number
-func normalizeFields(fields map[string]interface{}) map[string]interface{} {
-	newFields := map[string]interface{}{}
-
-	for k, v := range fields {
-		switch v := v.(type) {
-		case json.Number:
-			jv, e := v.Float64()
-			if e != nil {
-				panic(fmt.Sprintf("unable to convert json.Number to float64: %s", e))
-			}
-			newFields[k] = jv
-		default:
-			newFields[k] = v
-		}
-	}
-	return newFields
-}
-
-// BatchPoints is used to send batched data in a single write.
-// Database and Points are required
-// If no retention policy is specified, it will use the databases default retention policy.
-// If tags are specified, they will be "merged" with all points. If a point already has that tag, it will be ignored.
-// If time is specified, it will be applied to any point with an empty time.
-// Precision can be specified if the time is in epoch format (integer).
-// Valid values for Precision are n, u, ms, s, m, and h
-type BatchPoints struct {
-	Points           []Point           `json:"points,omitempty"`
-	Database         string            `json:"database,omitempty"`
-	RetentionPolicy  string            `json:"retentionPolicy,omitempty"`
-	Tags             map[string]string `json:"tags,omitempty"`
-	Time             time.Time         `json:"time,omitempty"`
-	Precision        string            `json:"precision,omitempty"`
-	WriteConsistency string            `json:"-"`
-}
-
-// UnmarshalJSON decodes the data into the BatchPoints struct
-func (bp *BatchPoints) UnmarshalJSON(b []byte) error {
-	var normal struct {
-		Points          []Point           `json:"points"`
-		Database        string            `json:"database"`
-		RetentionPolicy string            `json:"retentionPolicy"`
-		Tags            map[string]string `json:"tags"`
-		Time            time.Time         `json:"time"`
-		Precision       string            `json:"precision"`
-	}
-	var epoch struct {
-		Points          []Point           `json:"points"`
-		Database        string            `json:"database"`
-		RetentionPolicy string            `json:"retentionPolicy"`
-		Tags            map[string]string `json:"tags"`
-		Time            *int64            `json:"time"`
-		Precision       string            `json:"precision"`
-	}
-
-	if err := func() error {
-		var err error
-		if err = json.Unmarshal(b, &epoch); err != nil {
-			return err
-		}
-		// Convert from epoch to time.Time
-		var ts time.Time
-		if epoch.Time != nil {
-			ts, err = EpochToTime(*epoch.Time, epoch.Precision)
-			if err != nil {
-				return err
-			}
-		}
-		bp.Points = epoch.Points
-		bp.Database = epoch.Database
-		bp.RetentionPolicy = epoch.RetentionPolicy
-		bp.Tags = epoch.Tags
-		bp.Time = ts
-		bp.Precision = epoch.Precision
-		return nil
-	}(); err == nil {
-		return nil
-	}
-
-	if err := json.Unmarshal(b, &normal); err != nil {
-		return err
-	}
-	normal.Time = SetPrecision(normal.Time, normal.Precision)
-	bp.Points = normal.Points
-	bp.Database = normal.Database
-	bp.RetentionPolicy = normal.RetentionPolicy
-	bp.Tags = normal.Tags
-	bp.Time = normal.Time
-	bp.Precision = normal.Precision
-
-	return nil
-}
-
-// utility functions
-
-// Addr provides the current url as a string of the server the client is connected to.
-func (c *Client) Addr() string {
-	return c.url.String()
-}
-
-// checkPointTypes ensures no unsupported types are submitted to influxdb, returning error if they are found.
-func checkPointTypes(p Point) error {
-	for _, v := range p.Fields {
-		switch v.(type) {
-		case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, float32, float64, bool, string, nil:
-			return nil
-		default:
-			return fmt.Errorf("unsupported point type: %T", v)
-		}
-	}
-	return nil
-}
-
-// helper functions
-
-// EpochToTime takes a unix epoch time and uses precision to return back a time.Time
-func EpochToTime(epoch int64, precision string) (time.Time, error) {
-	if precision == "" {
-		precision = "s"
-	}
-	var t time.Time
-	switch precision {
-	case "h":
-		t = time.Unix(0, epoch*int64(time.Hour))
-	case "m":
-		t = time.Unix(0, epoch*int64(time.Minute))
-	case "s":
-		t = time.Unix(0, epoch*int64(time.Second))
-	case "ms":
-		t = time.Unix(0, epoch*int64(time.Millisecond))
-	case "u":
-		t = time.Unix(0, epoch*int64(time.Microsecond))
-	case "n":
-		t = time.Unix(0, epoch)
-	default:
-		return time.Time{}, fmt.Errorf("Unknown precision %q", precision)
-	}
-	return t, nil
-}
-
-// SetPrecision will round a time to the specified precision
-func SetPrecision(t time.Time, precision string) time.Time {
-	switch precision {
-	case "n":
-	case "u":
-		return t.Round(time.Microsecond)
-	case "ms":
-		return t.Round(time.Millisecond)
-	case "s":
-		return t.Round(time.Second)
-	case "m":
-		return t.Round(time.Minute)
-	case "h":
-		return t.Round(time.Hour)
-	}
-	return t
-}

+ 0 - 46
Godeps/_workspace/src/github.com/influxdata/influxdb/models/consistency.go

@@ -1,46 +0,0 @@
-package models
-
-import (
-	"errors"
-	"strings"
-)
-
-// ConsistencyLevel represent a required replication criteria before a write can
-// be returned as successful
-type ConsistencyLevel int
-
-const (
-	// ConsistencyLevelAny allows for hinted hand off, potentially no write happened yet
-	ConsistencyLevelAny ConsistencyLevel = iota
-
-	// ConsistencyLevelOne requires at least one data node acknowledged a write
-	ConsistencyLevelOne
-
-	// ConsistencyLevelQuorum requires a quorum of data nodes to acknowledge a write
-	ConsistencyLevelQuorum
-
-	// ConsistencyLevelAll requires all data nodes to acknowledge a write
-	ConsistencyLevelAll
-)
-
-var (
-	// ErrInvalidConsistencyLevel is returned when parsing the string version
-	// of a consistency level.
-	ErrInvalidConsistencyLevel = errors.New("invalid consistency level")
-)
-
-// ParseConsistencyLevel converts a consistency level string to the corresponding ConsistencyLevel const
-func ParseConsistencyLevel(level string) (ConsistencyLevel, error) {
-	switch strings.ToLower(level) {
-	case "any":
-		return ConsistencyLevelAny, nil
-	case "one":
-		return ConsistencyLevelOne, nil
-	case "quorum":
-		return ConsistencyLevelQuorum, nil
-	case "all":
-		return ConsistencyLevelAll, nil
-	default:
-		return 0, ErrInvalidConsistencyLevel
-	}
-}

+ 0 - 1576
Godeps/_workspace/src/github.com/influxdata/influxdb/models/points.go

@@ -1,1576 +0,0 @@
-package models
-
-import (
-	"bytes"
-	"encoding/binary"
-	"errors"
-	"fmt"
-	"hash/fnv"
-	"math"
-	"sort"
-	"strconv"
-	"strings"
-	"time"
-
-	"github.com/influxdata/influxdb/pkg/escape"
-)
-
-var (
-	measurementEscapeCodes = map[byte][]byte{
-		',': []byte(`\,`),
-		' ': []byte(`\ `),
-	}
-
-	tagEscapeCodes = map[byte][]byte{
-		',': []byte(`\,`),
-		' ': []byte(`\ `),
-		'=': []byte(`\=`),
-	}
-
-	ErrPointMustHaveAField  = errors.New("point without fields is unsupported")
-	ErrInvalidNumber        = errors.New("invalid number")
-	ErrMaxKeyLengthExceeded = errors.New("max key length exceeded")
-)
-
-const (
-	MaxKeyLength = 65535
-)
-
-// Point defines the values that will be written to the database
-type Point interface {
-	Name() string
-	SetName(string)
-
-	Tags() Tags
-	AddTag(key, value string)
-	SetTags(tags Tags)
-
-	Fields() Fields
-
-	Time() time.Time
-	SetTime(t time.Time)
-	UnixNano() int64
-
-	HashID() uint64
-	Key() []byte
-
-	Data() []byte
-	SetData(buf []byte)
-
-	// String returns a string representation of the point, if there is a
-	// timestamp associated with the point then it will be specified with the default
-	// precision of nanoseconds
-	String() string
-
-	// Bytes returns a []byte representation of the point similar to string.
-	MarshalBinary() ([]byte, error)
-
-	// PrecisionString returns a string representation of the point, if there
-	// is a timestamp associated with the point then it will be specified in the
-	// given unit
-	PrecisionString(precision string) string
-
-	// RoundedString returns a string representation of the point, if there
-	// is a timestamp associated with the point, then it will be rounded to the
-	// given duration
-	RoundedString(d time.Duration) string
-}
-
-// Points represents a sortable list of points by timestamp.
-type Points []Point
-
-func (a Points) Len() int           { return len(a) }
-func (a Points) Less(i, j int) bool { return a[i].Time().Before(a[j].Time()) }
-func (a Points) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
-
-// point is the default implementation of Point.
-type point struct {
-	time time.Time
-
-	// text encoding of measurement and tags
-	// key must always be stored sorted by tags, if the original line was not sorted,
-	// we need to resort it
-	key []byte
-
-	// text encoding of field data
-	fields []byte
-
-	// text encoding of timestamp
-	ts []byte
-
-	// binary encoded field data
-	data []byte
-
-	// cached version of parsed fields from data
-	cachedFields map[string]interface{}
-
-	// cached version of parsed name from key
-	cachedName string
-}
-
-const (
-	// the number of characters for the largest possible int64 (9223372036854775807)
-	maxInt64Digits = 19
-
-	// the number of characters for the smallest possible int64 (-9223372036854775808)
-	minInt64Digits = 20
-
-	// the number of characters required for the largest float64 before a range check
-	// would occur during parsing
-	maxFloat64Digits = 25
-
-	// the number of characters required for smallest float64 before a range check occur
-	// would occur during parsing
-	minFloat64Digits = 27
-)
-
-// ParsePoints returns a slice of Points from a text representation of a point
-// with each point separated by newlines.  If any points fail to parse, a non-nil error
-// will be returned in addition to the points that parsed successfully.
-func ParsePoints(buf []byte) ([]Point, error) {
-	return ParsePointsWithPrecision(buf, time.Now().UTC(), "n")
-}
-
-// ParsePointsString is identical to ParsePoints but accepts a string
-// buffer.
-func ParsePointsString(buf string) ([]Point, error) {
-	return ParsePoints([]byte(buf))
-}
-
-// ParseKey returns the measurement name and tags from a point.
-func ParseKey(buf string) (string, Tags, error) {
-	// Ignore the error because scanMeasurement returns "missing fields" which we ignore
-	// when just parsing a key
-	state, i, _ := scanMeasurement([]byte(buf), 0)
-
-	var tags Tags
-	if state == tagKeyState {
-		tags = parseTags([]byte(buf))
-		// scanMeasurement returns the location of the comma if there are tags, strip that off
-		return string(buf[:i-1]), tags, nil
-	}
-	return string(buf[:i]), tags, nil
-}
-
-// ParsePointsWithPrecision is similar to ParsePoints, but allows the
-// caller to provide a precision for time.
-func ParsePointsWithPrecision(buf []byte, defaultTime time.Time, precision string) ([]Point, error) {
-	points := []Point{}
-	var (
-		pos    int
-		block  []byte
-		failed []string
-	)
-	for {
-		pos, block = scanLine(buf, pos)
-		pos++
-
-		if len(block) == 0 {
-			break
-		}
-
-		// lines which start with '#' are comments
-		start := skipWhitespace(block, 0)
-
-		// If line is all whitespace, just skip it
-		if start >= len(block) {
-			continue
-		}
-
-		if block[start] == '#' {
-			continue
-		}
-
-		// strip the newline if one is present
-		if block[len(block)-1] == '\n' {
-			block = block[:len(block)-1]
-		}
-
-		pt, err := parsePoint(block[start:len(block)], defaultTime, precision)
-		if err != nil {
-			failed = append(failed, fmt.Sprintf("unable to parse '%s': %v", string(block[start:len(block)]), err))
-		} else {
-			points = append(points, pt)
-		}
-
-		if pos >= len(buf) {
-			break
-		}
-
-	}
-	if len(failed) > 0 {
-		return points, fmt.Errorf("%s", strings.Join(failed, "\n"))
-	}
-	return points, nil
-
-}
-
-func parsePoint(buf []byte, defaultTime time.Time, precision string) (Point, error) {
-	// scan the first block which is measurement[,tag1=value1,tag2=value=2...]
-	pos, key, err := scanKey(buf, 0)
-	if err != nil {
-		return nil, err
-	}
-
-	// measurement name is required
-	if len(key) == 0 {
-		return nil, fmt.Errorf("missing measurement")
-	}
-
-	if len(key) > MaxKeyLength {
-		return nil, fmt.Errorf("max key length exceeded: %v > %v", len(key), MaxKeyLength)
-	}
-
-	// scan the second block is which is field1=value1[,field2=value2,...]
-	pos, fields, err := scanFields(buf, pos)
-	if err != nil {
-		return nil, err
-	}
-
-	// at least one field is required
-	if len(fields) == 0 {
-		return nil, fmt.Errorf("missing fields")
-	}
-
-	// scan the last block which is an optional integer timestamp
-	pos, ts, err := scanTime(buf, pos)
-
-	if err != nil {
-		return nil, err
-	}
-
-	pt := &point{
-		key:    key,
-		fields: fields,
-		ts:     ts,
-	}
-
-	if len(ts) == 0 {
-		pt.time = defaultTime
-		pt.SetPrecision(precision)
-	} else {
-		ts, err := strconv.ParseInt(string(ts), 10, 64)
-		if err != nil {
-			return nil, err
-		}
-		pt.time, err = SafeCalcTime(ts, precision)
-		if err != nil {
-			return nil, err
-		}
-	}
-	return pt, nil
-}
-
-// GetPrecisionMultiplier will return a multiplier for the precision specified
-func GetPrecisionMultiplier(precision string) int64 {
-	d := time.Nanosecond
-	switch precision {
-	case "u":
-		d = time.Microsecond
-	case "ms":
-		d = time.Millisecond
-	case "s":
-		d = time.Second
-	case "m":
-		d = time.Minute
-	case "h":
-		d = time.Hour
-	}
-	return int64(d)
-}
-
-// scanKey scans buf starting at i for the measurement and tag portion of the point.
-// It returns the ending position and the byte slice of key within buf.  If there
-// are tags, they will be sorted if they are not already.
-func scanKey(buf []byte, i int) (int, []byte, error) {
-	start := skipWhitespace(buf, i)
-
-	i = start
-
-	// Determines whether the tags are sort, assume they are
-	sorted := true
-
-	// indices holds the indexes within buf of the start of each tag.  For example,
-	// a buf of 'cpu,host=a,region=b,zone=c' would have indices slice of [4,11,20]
-	// which indicates that the first tag starts at buf[4], seconds at buf[11], and
-	// last at buf[20]
-	indices := make([]int, 100)
-
-	// tracks how many commas we've seen so we know how many values are indices.
-	// Since indices is an arbitrarily large slice,
-	// we need to know how many values in the buffer are in use.
-	commas := 0
-
-	// First scan the Point's measurement.
-	state, i, err := scanMeasurement(buf, i)
-	if err != nil {
-		return i, buf[start:i], err
-	}
-
-	// Optionally scan tags if needed.
-	if state == tagKeyState {
-		i, commas, indices, err = scanTags(buf, i, indices)
-		if err != nil {
-			return i, buf[start:i], err
-		}
-	}
-
-	// Now we know where the key region is within buf, and the locations of tags, we
-	// need to determine if duplicate tags exist and if the tags are sorted.  This iterates
-	// 1/2 of the list comparing each end with each other, walking towards the center from
-	// both sides.
-	for j := 0; j < commas/2; j++ {
-		// get the left and right tags
-		_, left := scanTo(buf[indices[j]:indices[j+1]-1], 0, '=')
-		_, right := scanTo(buf[indices[commas-j-1]:indices[commas-j]-1], 0, '=')
-
-		// If the tags are equal, then there are duplicate tags, and we should abort
-		if bytes.Equal(left, right) {
-			return i, buf[start:i], fmt.Errorf("duplicate tags")
-		}
-
-		// If left is greater than right, the tags are not sorted.  We must continue
-		// since their could be duplicate tags still.
-		if bytes.Compare(left, right) > 0 {
-			sorted = false
-		}
-	}
-
-	// If the tags are not sorted, then sort them.  This sort is inline and
-	// uses the tag indices we created earlier.  The actual buffer is not sorted, the
-	// indices are using the buffer for value comparison.  After the indices are sorted,
-	// the buffer is reconstructed from the sorted indices.
-	if !sorted && commas > 0 {
-		// Get the measurement name for later
-		measurement := buf[start : indices[0]-1]
-
-		// Sort the indices
-		indices := indices[:commas]
-		insertionSort(0, commas, buf, indices)
-
-		// Create a new key using the measurement and sorted indices
-		b := make([]byte, len(buf[start:i]))
-		pos := copy(b, measurement)
-		for _, i := range indices {
-			b[pos] = ','
-			pos++
-			_, v := scanToSpaceOr(buf, i, ',')
-			pos += copy(b[pos:], v)
-		}
-
-		return i, b, nil
-	}
-
-	return i, buf[start:i], nil
-}
-
-// The following constants allow us to specify which state to move to
-// next, when scanning sections of a Point.
-const (
-	tagKeyState = iota
-	tagValueState
-	fieldsState
-)
-
-// scanMeasurement examines the measurement part of a Point, returning
-// the next state to move to, and the current location in the buffer.
-func scanMeasurement(buf []byte, i int) (int, int, error) {
-	// Check first byte of measurement, anything except a comma is fine.
-	// It can't be a space, since whitespace is stripped prior to this
-	// function call.
-	if buf[i] == ',' {
-		return -1, i, fmt.Errorf("missing measurement")
-	}
-
-	for {
-		i++
-		if i >= len(buf) {
-			// cpu
-			return -1, i, fmt.Errorf("missing fields")
-		}
-
-		if buf[i-1] == '\\' {
-			// Skip character (it's escaped).
-			continue
-		}
-
-		// Unescaped comma; move onto scanning the tags.
-		if buf[i] == ',' {
-			return tagKeyState, i + 1, nil
-		}
-
-		// Unescaped space; move onto scanning the fields.
-		if buf[i] == ' ' {
-			// cpu value=1.0
-			return fieldsState, i, nil
-		}
-	}
-}
-
-// scanTags examines all the tags in a Point, keeping track of and
-// returning the updated indices slice, number of commas and location
-// in buf where to start examining the Point fields.
-func scanTags(buf []byte, i int, indices []int) (int, int, []int, error) {
-	var (
-		err    error
-		commas int
-		state  = tagKeyState
-	)
-
-	for {
-		switch state {
-		case tagKeyState:
-			// Grow our indices slice if we have too many tags.
-			if commas >= len(indices) {
-				newIndics := make([]int, cap(indices)*2)
-				copy(newIndics, indices)
-				indices = newIndics
-			}
-			indices[commas] = i
-			commas++
-
-			i, err = scanTagsKey(buf, i)
-			state = tagValueState // tag value always follows a tag key
-		case tagValueState:
-			state, i, err = scanTagsValue(buf, i)
-		case fieldsState:
-			indices[commas] = i + 1
-			return i, commas, indices, nil
-		}
-
-		if err != nil {
-			return i, commas, indices, err
-		}
-	}
-}
-
-// scanTagsKey scans each character in a tag key.
-func scanTagsKey(buf []byte, i int) (int, error) {
-	// First character of the key.
-	if i >= len(buf) || buf[i] == ' ' || buf[i] == ',' || buf[i] == '=' {
-		// cpu,{'', ' ', ',', '='}
-		return i, fmt.Errorf("missing tag key")
-	}
-
-	// Examine each character in the tag key until we hit an unescaped
-	// equals (the tag value), or we hit an error (i.e., unescaped
-	// space or comma).
-	for {
-		i++
-
-		// Either we reached the end of the buffer or we hit an
-		// unescaped comma or space.
-		if i >= len(buf) ||
-			((buf[i] == ' ' || buf[i] == ',') && buf[i-1] != '\\') {
-			// cpu,tag{'', ' ', ','}
-			return i, fmt.Errorf("missing tag value")
-		}
-
-		if buf[i] == '=' && buf[i-1] != '\\' {
-			// cpu,tag=
-			return i + 1, nil
-		}
-	}
-}
-
-// scanTagsValue scans each character in a tag value.
-func scanTagsValue(buf []byte, i int) (int, int, error) {
-	// Tag value cannot be empty.
-	if i >= len(buf) || buf[i] == ',' || buf[i] == ' ' {
-		// cpu,tag={',', ' '}
-		return -1, i, fmt.Errorf("missing tag value")
-	}
-
-	// Examine each character in the tag value until we hit an unescaped
-	// comma (move onto next tag key), an unescaped space (move onto
-	// fields), or we error out.
-	for {
-		i++
-		if i >= len(buf) {
-			// cpu,tag=value
-			return -1, i, fmt.Errorf("missing fields")
-		}
-
-		// An unescaped equals sign is an invalid tag value.
-		if buf[i] == '=' && buf[i-1] != '\\' {
-			// cpu,tag={'=', 'fo=o'}
-			return -1, i, fmt.Errorf("invalid tag format")
-		}
-
-		if buf[i] == ',' && buf[i-1] != '\\' {
-			// cpu,tag=foo,
-			return tagKeyState, i + 1, nil
-		}
-
-		// cpu,tag=foo value=1.0
-		// cpu, tag=foo\= value=1.0
-		if buf[i] == ' ' && buf[i-1] != '\\' {
-			return fieldsState, i, nil
-		}
-	}
-}
-
-func insertionSort(l, r int, buf []byte, indices []int) {
-	for i := l + 1; i < r; i++ {
-		for j := i; j > l && less(buf, indices, j, j-1); j-- {
-			indices[j], indices[j-1] = indices[j-1], indices[j]
-		}
-	}
-}
-
-func less(buf []byte, indices []int, i, j int) bool {
-	// This grabs the tag names for i & j, it ignores the values
-	_, a := scanTo(buf, indices[i], '=')
-	_, b := scanTo(buf, indices[j], '=')
-	return bytes.Compare(a, b) < 0
-}
-
-func isFieldEscapeChar(b byte) bool {
-	for c := range escape.Codes {
-		if c == b {
-			return true
-		}
-	}
-	return false
-}
-
-// scanFields scans buf, starting at i for the fields section of a point.  It returns
-// the ending position and the byte slice of the fields within buf
-func scanFields(buf []byte, i int) (int, []byte, error) {
-	start := skipWhitespace(buf, i)
-	i = start
-	quoted := false
-
-	// tracks how many '=' we've seen
-	equals := 0
-
-	// tracks how many commas we've seen
-	commas := 0
-
-	for {
-		// reached the end of buf?
-		if i >= len(buf) {
-			break
-		}
-
-		// escaped characters?
-		if buf[i] == '\\' && i+1 < len(buf) {
-			i += 2
-			continue
-		}
-
-		// If the value is quoted, scan until we get to the end quote
-		// Only quote values in the field value since quotes are not significant
-		// in the field key
-		if buf[i] == '"' && equals > commas {
-			quoted = !quoted
-			i++
-			continue
-		}
-
-		// If we see an =, ensure that there is at least on char before and after it
-		if buf[i] == '=' && !quoted {
-			equals++
-
-			// check for "... =123" but allow "a\ =123"
-			if buf[i-1] == ' ' && buf[i-2] != '\\' {
-				return i, buf[start:i], fmt.Errorf("missing field key")
-			}
-
-			// check for "...a=123,=456" but allow "a=123,a\,=456"
-			if buf[i-1] == ',' && buf[i-2] != '\\' {
-				return i, buf[start:i], fmt.Errorf("missing field key")
-			}
-
-			// check for "... value="
-			if i+1 >= len(buf) {
-				return i, buf[start:i], fmt.Errorf("missing field value")
-			}
-
-			// check for "... value=,value2=..."
-			if buf[i+1] == ',' || buf[i+1] == ' ' {
-				return i, buf[start:i], fmt.Errorf("missing field value")
-			}
-
-			if isNumeric(buf[i+1]) || buf[i+1] == '-' || buf[i+1] == 'N' || buf[i+1] == 'n' {
-				var err error
-				i, err = scanNumber(buf, i+1)
-				if err != nil {
-					return i, buf[start:i], err
-				}
-				continue
-			}
-			// If next byte is not a double-quote, the value must be a boolean
-			if buf[i+1] != '"' {
-				var err error
-				i, _, err = scanBoolean(buf, i+1)
-				if err != nil {
-					return i, buf[start:i], err
-				}
-				continue
-			}
-		}
-
-		if buf[i] == ',' && !quoted {
-			commas++
-		}
-
-		// reached end of block?
-		if buf[i] == ' ' && !quoted {
-			break
-		}
-		i++
-	}
-
-	if quoted {
-		return i, buf[start:i], fmt.Errorf("unbalanced quotes")
-	}
-
-	// check that all field sections had key and values (e.g. prevent "a=1,b"
-	if equals == 0 || commas != equals-1 {
-		return i, buf[start:i], fmt.Errorf("invalid field format")
-	}
-
-	return i, buf[start:i], nil
-}
-
-// scanTime scans buf, starting at i for the time section of a point.  It returns
-// the ending position and the byte slice of the fields within buf and error if the
-// timestamp is not in the correct numeric format
-func scanTime(buf []byte, i int) (int, []byte, error) {
-	start := skipWhitespace(buf, i)
-	i = start
-	for {
-		// reached the end of buf?
-		if i >= len(buf) {
-			break
-		}
-
-		// Timestamps should be integers, make sure they are so we don't need to actually
-		// parse the timestamp until needed
-		if buf[i] < '0' || buf[i] > '9' {
-			// Handle negative timestamps
-			if i == start && buf[i] == '-' {
-				i++
-				continue
-			}
-			return i, buf[start:i], fmt.Errorf("bad timestamp")
-		}
-
-		// reached end of block?
-		if buf[i] == '\n' {
-			break
-		}
-		i++
-	}
-	return i, buf[start:i], nil
-}
-
-func isNumeric(b byte) bool {
-	return (b >= '0' && b <= '9') || b == '.'
-}
-
-// scanNumber returns the end position within buf, start at i after
-// scanning over buf for an integer, or float.  It returns an
-// error if a invalid number is scanned.
-func scanNumber(buf []byte, i int) (int, error) {
-	start := i
-	var isInt bool
-
-	// Is negative number?
-	if i < len(buf) && buf[i] == '-' {
-		i++
-		// There must be more characters now, as just '-' is illegal.
-		if i == len(buf) {
-			return i, ErrInvalidNumber
-		}
-	}
-
-	// how many decimal points we've see
-	decimal := false
-
-	// indicates the number is float in scientific notation
-	scientific := false
-
-	for {
-		if i >= len(buf) {
-			break
-		}
-
-		if buf[i] == ',' || buf[i] == ' ' {
-			break
-		}
-
-		if buf[i] == 'i' && i > start && !isInt {
-			isInt = true
-			i++
-			continue
-		}
-
-		if buf[i] == '.' {
-			// Can't have more than 1 decimal (e.g. 1.1.1 should fail)
-			if decimal {
-				return i, ErrInvalidNumber
-			}
-			decimal = true
-		}
-
-		// `e` is valid for floats but not as the first char
-		if i > start && (buf[i] == 'e' || buf[i] == 'E') {
-			scientific = true
-			i++
-			continue
-		}
-
-		// + and - are only valid at this point if they follow an e (scientific notation)
-		if (buf[i] == '+' || buf[i] == '-') && (buf[i-1] == 'e' || buf[i-1] == 'E') {
-			i++
-			continue
-		}
-
-		// NaN is an unsupported value
-		if i+2 < len(buf) && (buf[i] == 'N' || buf[i] == 'n') {
-			return i, ErrInvalidNumber
-		}
-
-		if !isNumeric(buf[i]) {
-			return i, ErrInvalidNumber
-		}
-		i++
-	}
-
-	if isInt && (decimal || scientific) {
-		return i, ErrInvalidNumber
-	}
-
-	numericDigits := i - start
-	if isInt {
-		numericDigits--
-	}
-	if decimal {
-		numericDigits--
-	}
-	if buf[start] == '-' {
-		numericDigits--
-	}
-
-	if numericDigits == 0 {
-		return i, ErrInvalidNumber
-	}
-
-	// It's more common that numbers will be within min/max range for their type but we need to prevent
-	// out or range numbers from being parsed successfully.  This uses some simple heuristics to decide
-	// if we should parse the number to the actual type.  It does not do it all the time because it incurs
-	// extra allocations and we end up converting the type again when writing points to disk.
-	if isInt {
-		// Make sure the last char is an 'i' for integers (e.g. 9i10 is not valid)
-		if buf[i-1] != 'i' {
-			return i, ErrInvalidNumber
-		}
-		// Parse the int to check bounds the number of digits could be larger than the max range
-		// We subtract 1 from the index to remove the `i` from our tests
-		if len(buf[start:i-1]) >= maxInt64Digits || len(buf[start:i-1]) >= minInt64Digits {
-			if _, err := strconv.ParseInt(string(buf[start:i-1]), 10, 64); err != nil {
-				return i, fmt.Errorf("unable to parse integer %s: %s", buf[start:i-1], err)
-			}
-		}
-	} else {
-		// Parse the float to check bounds if it's scientific or the number of digits could be larger than the max range
-		if scientific || len(buf[start:i]) >= maxFloat64Digits || len(buf[start:i]) >= minFloat64Digits {
-			if _, err := strconv.ParseFloat(string(buf[start:i]), 10); err != nil {
-				return i, fmt.Errorf("invalid float")
-			}
-		}
-	}
-
-	return i, nil
-}
-
-// scanBoolean returns the end position within buf, start at i after
-// scanning over buf for boolean. Valid values for a boolean are
-// t, T, true, TRUE, f, F, false, FALSE.  It returns an error if a invalid boolean
-// is scanned.
-func scanBoolean(buf []byte, i int) (int, []byte, error) {
-	start := i
-
-	if i < len(buf) && (buf[i] != 't' && buf[i] != 'f' && buf[i] != 'T' && buf[i] != 'F') {
-		return i, buf[start:i], fmt.Errorf("invalid boolean")
-	}
-
-	i++
-	for {
-		if i >= len(buf) {
-			break
-		}
-
-		if buf[i] == ',' || buf[i] == ' ' {
-			break
-		}
-		i++
-	}
-
-	// Single char bool (t, T, f, F) is ok
-	if i-start == 1 {
-		return i, buf[start:i], nil
-	}
-
-	// length must be 4 for true or TRUE
-	if (buf[start] == 't' || buf[start] == 'T') && i-start != 4 {
-		return i, buf[start:i], fmt.Errorf("invalid boolean")
-	}
-
-	// length must be 5 for false or FALSE
-	if (buf[start] == 'f' || buf[start] == 'F') && i-start != 5 {
-		return i, buf[start:i], fmt.Errorf("invalid boolean")
-	}
-
-	// Otherwise
-	valid := false
-	switch buf[start] {
-	case 't':
-		valid = bytes.Equal(buf[start:i], []byte("true"))
-	case 'f':
-		valid = bytes.Equal(buf[start:i], []byte("false"))
-	case 'T':
-		valid = bytes.Equal(buf[start:i], []byte("TRUE")) || bytes.Equal(buf[start:i], []byte("True"))
-	case 'F':
-		valid = bytes.Equal(buf[start:i], []byte("FALSE")) || bytes.Equal(buf[start:i], []byte("False"))
-	}
-
-	if !valid {
-		return i, buf[start:i], fmt.Errorf("invalid boolean")
-	}
-
-	return i, buf[start:i], nil
-
-}
-
-// skipWhitespace returns the end position within buf, starting at i after
-// scanning over spaces in tags
-func skipWhitespace(buf []byte, i int) int {
-	for i < len(buf) {
-		if buf[i] != ' ' && buf[i] != '\t' && buf[i] != 0 {
-			break
-		}
-		i++
-	}
-	return i
-}
-
-// scanLine returns the end position in buf and the next line found within
-// buf.
-func scanLine(buf []byte, i int) (int, []byte) {
-	start := i
-	quoted := false
-	fields := false
-
-	// tracks how many '=' and commas we've seen
-	// this duplicates some of the functionality in scanFields
-	equals := 0
-	commas := 0
-	for {
-		// reached the end of buf?
-		if i >= len(buf) {
-			break
-		}
-
-		// skip past escaped characters
-		if buf[i] == '\\' {
-			i += 2
-			continue
-		}
-
-		if buf[i] == ' ' {
-			fields = true
-		}
-
-		// If we see a double quote, makes sure it is not escaped
-		if fields {
-			if !quoted && buf[i] == '=' {
-				i++
-				equals++
-				continue
-			} else if !quoted && buf[i] == ',' {
-				i++
-				commas++
-				continue
-			} else if buf[i] == '"' && equals > commas {
-				i++
-				quoted = !quoted
-				continue
-			}
-		}
-
-		if buf[i] == '\n' && !quoted {
-			break
-		}
-
-		i++
-	}
-
-	return i, buf[start:i]
-}
-
-// scanTo returns the end position in buf and the next consecutive block
-// of bytes, starting from i and ending with stop byte, where stop byte
-// has not been escaped.
-//
-// If there are leading spaces, they are skipped.
-func scanTo(buf []byte, i int, stop byte) (int, []byte) {
-	start := i
-	for {
-		// reached the end of buf?
-		if i >= len(buf) {
-			break
-		}
-
-		// Reached unescaped stop value?
-		if buf[i] == stop && (i == 0 || buf[i-1] != '\\') {
-			break
-		}
-		i++
-	}
-
-	return i, buf[start:i]
-}
-
-// scanTo returns the end position in buf and the next consecutive block
-// of bytes, starting from i and ending with stop byte.  If there are leading
-// spaces, they are skipped.
-func scanToSpaceOr(buf []byte, i int, stop byte) (int, []byte) {
-	start := i
-	if buf[i] == stop || buf[i] == ' ' {
-		return i, buf[start:i]
-	}
-
-	for {
-		i++
-		if buf[i-1] == '\\' {
-			continue
-		}
-
-		// reached the end of buf?
-		if i >= len(buf) {
-			return i, buf[start:i]
-		}
-
-		// reached end of block?
-		if buf[i] == stop || buf[i] == ' ' {
-			return i, buf[start:i]
-		}
-	}
-}
-
-func scanTagValue(buf []byte, i int) (int, []byte) {
-	start := i
-	for {
-		if i >= len(buf) {
-			break
-		}
-
-		if buf[i] == ',' && buf[i-1] != '\\' {
-			break
-		}
-		i++
-	}
-	return i, buf[start:i]
-}
-
-func scanFieldValue(buf []byte, i int) (int, []byte) {
-	start := i
-	quoted := false
-	for {
-		if i >= len(buf) {
-			break
-		}
-
-		// Only escape char for a field value is a double-quote
-		if buf[i] == '\\' && i+1 < len(buf) && buf[i+1] == '"' {
-			i += 2
-			continue
-		}
-
-		// Quoted value? (e.g. string)
-		if buf[i] == '"' {
-			i++
-			quoted = !quoted
-			continue
-		}
-
-		if buf[i] == ',' && !quoted {
-			break
-		}
-		i++
-	}
-	return i, buf[start:i]
-}
-
-func escapeMeasurement(in []byte) []byte {
-	for b, esc := range measurementEscapeCodes {
-		in = bytes.Replace(in, []byte{b}, esc, -1)
-	}
-	return in
-}
-
-func unescapeMeasurement(in []byte) []byte {
-	for b, esc := range measurementEscapeCodes {
-		in = bytes.Replace(in, esc, []byte{b}, -1)
-	}
-	return in
-}
-
-func escapeTag(in []byte) []byte {
-	for b, esc := range tagEscapeCodes {
-		if bytes.IndexByte(in, b) != -1 {
-			in = bytes.Replace(in, []byte{b}, esc, -1)
-		}
-	}
-	return in
-}
-
-func unescapeTag(in []byte) []byte {
-	for b, esc := range tagEscapeCodes {
-		if bytes.IndexByte(in, b) != -1 {
-			in = bytes.Replace(in, esc, []byte{b}, -1)
-		}
-	}
-	return in
-}
-
-// escapeStringField returns a copy of in with any double quotes or
-// backslashes with escaped values
-func escapeStringField(in string) string {
-	var out []byte
-	i := 0
-	for {
-		if i >= len(in) {
-			break
-		}
-		// escape double-quotes
-		if in[i] == '\\' {
-			out = append(out, '\\')
-			out = append(out, '\\')
-			i++
-			continue
-		}
-		// escape double-quotes
-		if in[i] == '"' {
-			out = append(out, '\\')
-			out = append(out, '"')
-			i++
-			continue
-		}
-		out = append(out, in[i])
-		i++
-
-	}
-	return string(out)
-}
-
-// unescapeStringField returns a copy of in with any escaped double-quotes
-// or backslashes unescaped
-func unescapeStringField(in string) string {
-	if strings.IndexByte(in, '\\') == -1 {
-		return in
-	}
-
-	var out []byte
-	i := 0
-	for {
-		if i >= len(in) {
-			break
-		}
-		// unescape backslashes
-		if in[i] == '\\' && i+1 < len(in) && in[i+1] == '\\' {
-			out = append(out, '\\')
-			i += 2
-			continue
-		}
-		// unescape double-quotes
-		if in[i] == '\\' && i+1 < len(in) && in[i+1] == '"' {
-			out = append(out, '"')
-			i += 2
-			continue
-		}
-		out = append(out, in[i])
-		i++
-
-	}
-	return string(out)
-}
-
-// NewPoint returns a new point with the given measurement name, tags, fields and timestamp.  If
-// an unsupported field value (NaN) or out of range time is passed, this function returns an error.
-func NewPoint(name string, tags Tags, fields Fields, time time.Time) (Point, error) {
-	if len(fields) == 0 {
-		return nil, ErrPointMustHaveAField
-	}
-	if !time.IsZero() {
-		if err := CheckTime(time); err != nil {
-			return nil, err
-		}
-	}
-
-	for key, value := range fields {
-		if fv, ok := value.(float64); ok {
-			// Ensure the caller validates and handles invalid field values
-			if math.IsNaN(fv) {
-				return nil, fmt.Errorf("NaN is an unsupported value for field %s", key)
-			}
-		}
-		if len(key) == 0 {
-			return nil, fmt.Errorf("all fields must have non-empty names")
-		}
-	}
-
-	key := MakeKey([]byte(name), tags)
-	if len(key) > MaxKeyLength {
-		return nil, fmt.Errorf("max key length exceeded: %v > %v", len(key), MaxKeyLength)
-	}
-
-	return &point{
-		key:    key,
-		time:   time,
-		fields: fields.MarshalBinary(),
-	}, nil
-}
-
-// NewPointFromBytes returns a new Point from a marshalled Point.
-func NewPointFromBytes(b []byte) (Point, error) {
-	p := &point{}
-	if err := p.UnmarshalBinary(b); err != nil {
-		return nil, err
-	}
-	if len(p.Fields()) == 0 {
-		return nil, ErrPointMustHaveAField
-	}
-	return p, nil
-}
-
-// MustNewPoint returns a new point with the given measurement name, tags, fields and timestamp.  If
-// an unsupported field value (NaN) is passed, this function panics.
-func MustNewPoint(name string, tags Tags, fields Fields, time time.Time) Point {
-	pt, err := NewPoint(name, tags, fields, time)
-	if err != nil {
-		panic(err.Error())
-	}
-	return pt
-}
-
-func (p *point) Data() []byte {
-	return p.data
-}
-
-func (p *point) SetData(b []byte) {
-	p.data = b
-}
-
-func (p *point) Key() []byte {
-	return p.key
-}
-
-func (p *point) name() []byte {
-	_, name := scanTo(p.key, 0, ',')
-	return name
-}
-
-// Name return the measurement name for the point
-func (p *point) Name() string {
-	if p.cachedName != "" {
-		return p.cachedName
-	}
-	p.cachedName = string(escape.Unescape(p.name()))
-	return p.cachedName
-}
-
-// SetName updates the measurement name for the point
-func (p *point) SetName(name string) {
-	p.cachedName = ""
-	p.key = MakeKey([]byte(name), p.Tags())
-}
-
-// Time return the timestamp for the point
-func (p *point) Time() time.Time {
-	return p.time
-}
-
-// SetTime updates the timestamp for the point
-func (p *point) SetTime(t time.Time) {
-	p.time = t
-}
-
-// Tags returns the tag set for the point
-func (p *point) Tags() Tags {
-	return parseTags(p.key)
-}
-
-func parseTags(buf []byte) Tags {
-	tags := map[string]string{}
-
-	if len(buf) != 0 {
-		pos, name := scanTo(buf, 0, ',')
-
-		// it's an empyt key, so there are no tags
-		if len(name) == 0 {
-			return tags
-		}
-
-		i := pos + 1
-		var key, value []byte
-		for {
-			if i >= len(buf) {
-				break
-			}
-			i, key = scanTo(buf, i, '=')
-			i, value = scanTagValue(buf, i+1)
-
-			if len(value) == 0 {
-				continue
-			}
-
-			tags[string(unescapeTag(key))] = string(unescapeTag(value))
-
-			i++
-		}
-	}
-	return tags
-}
-
-// MakeKey creates a key for a set of tags.
-func MakeKey(name []byte, tags Tags) []byte {
-	// unescape the name and then re-escape it to avoid double escaping.
-	// The key should always be stored in escaped form.
-	return append(escapeMeasurement(unescapeMeasurement(name)), tags.HashKey()...)
-}
-
-// SetTags replaces the tags for the point
-func (p *point) SetTags(tags Tags) {
-	p.key = MakeKey([]byte(p.Name()), tags)
-}
-
-// AddTag adds or replaces a tag value for a point
-func (p *point) AddTag(key, value string) {
-	tags := p.Tags()
-	tags[key] = value
-	p.key = MakeKey([]byte(p.Name()), tags)
-}
-
-// Fields returns the fields for the point
-func (p *point) Fields() Fields {
-	if p.cachedFields != nil {
-		return p.cachedFields
-	}
-	p.cachedFields = p.unmarshalBinary()
-	return p.cachedFields
-}
-
-// SetPrecision will round a time to the specified precision
-func (p *point) SetPrecision(precision string) {
-	switch precision {
-	case "n":
-	case "u":
-		p.SetTime(p.Time().Truncate(time.Microsecond))
-	case "ms":
-		p.SetTime(p.Time().Truncate(time.Millisecond))
-	case "s":
-		p.SetTime(p.Time().Truncate(time.Second))
-	case "m":
-		p.SetTime(p.Time().Truncate(time.Minute))
-	case "h":
-		p.SetTime(p.Time().Truncate(time.Hour))
-	}
-}
-
-func (p *point) String() string {
-	if p.Time().IsZero() {
-		return string(p.Key()) + " " + string(p.fields)
-	}
-	return string(p.Key()) + " " + string(p.fields) + " " + strconv.FormatInt(p.UnixNano(), 10)
-}
-
-func (p *point) MarshalBinary() ([]byte, error) {
-	tb, err := p.time.MarshalBinary()
-	if err != nil {
-		return nil, err
-	}
-
-	b := make([]byte, 8+len(p.key)+len(p.fields)+len(tb))
-	i := 0
-
-	binary.BigEndian.PutUint32(b[i:], uint32(len(p.key)))
-	i += 4
-
-	i += copy(b[i:], p.key)
-
-	binary.BigEndian.PutUint32(b[i:i+4], uint32(len(p.fields)))
-	i += 4
-
-	i += copy(b[i:], p.fields)
-
-	copy(b[i:], tb)
-	return b, nil
-}
-
-func (p *point) UnmarshalBinary(b []byte) error {
-	var i int
-	keyLen := int(binary.BigEndian.Uint32(b[:4]))
-	i += int(4)
-
-	p.key = b[i : i+keyLen]
-	i += keyLen
-
-	fieldLen := int(binary.BigEndian.Uint32(b[i : i+4]))
-	i += int(4)
-
-	p.fields = b[i : i+fieldLen]
-	i += fieldLen
-
-	p.time = time.Now()
-	p.time.UnmarshalBinary(b[i:])
-	return nil
-}
-
-func (p *point) PrecisionString(precision string) string {
-	if p.Time().IsZero() {
-		return fmt.Sprintf("%s %s", p.Key(), string(p.fields))
-	}
-	return fmt.Sprintf("%s %s %d", p.Key(), string(p.fields),
-		p.UnixNano()/GetPrecisionMultiplier(precision))
-}
-
-func (p *point) RoundedString(d time.Duration) string {
-	if p.Time().IsZero() {
-		return fmt.Sprintf("%s %s", p.Key(), string(p.fields))
-	}
-	return fmt.Sprintf("%s %s %d", p.Key(), string(p.fields),
-		p.time.Round(d).UnixNano())
-}
-
-func (p *point) unmarshalBinary() Fields {
-	return newFieldsFromBinary(p.fields)
-}
-
-func (p *point) HashID() uint64 {
-	h := fnv.New64a()
-	h.Write(p.key)
-	sum := h.Sum64()
-	return sum
-}
-
-func (p *point) UnixNano() int64 {
-	return p.Time().UnixNano()
-}
-
-// Tags represents a mapping between a Point's tag names and their
-// values.
-type Tags map[string]string
-
-// HashKey hashes all of a tag's keys.
-func (t Tags) HashKey() []byte {
-	// Empty maps marshal to empty bytes.
-	if len(t) == 0 {
-		return nil
-	}
-
-	escaped := Tags{}
-	for k, v := range t {
-		ek := escapeTag([]byte(k))
-		ev := escapeTag([]byte(v))
-
-		if len(ev) > 0 {
-			escaped[string(ek)] = string(ev)
-		}
-	}
-
-	// Extract keys and determine final size.
-	sz := len(escaped) + (len(escaped) * 2) // separators
-	keys := make([]string, len(escaped)+1)
-	i := 0
-	for k, v := range escaped {
-		keys[i] = k
-		i++
-		sz += len(k) + len(v)
-	}
-	keys = keys[:i]
-	sort.Strings(keys)
-	// Generate marshaled bytes.
-	b := make([]byte, sz)
-	buf := b
-	idx := 0
-	for _, k := range keys {
-		buf[idx] = ','
-		idx++
-		copy(buf[idx:idx+len(k)], k)
-		idx += len(k)
-		buf[idx] = '='
-		idx++
-		v := escaped[k]
-		copy(buf[idx:idx+len(v)], v)
-		idx += len(v)
-	}
-	return b[:idx]
-}
-
-// Fields represents a mapping between a Point's field names and their
-// values.
-type Fields map[string]interface{}
-
-func parseNumber(val []byte) (interface{}, error) {
-	if val[len(val)-1] == 'i' {
-		val = val[:len(val)-1]
-		return strconv.ParseInt(string(val), 10, 64)
-	}
-	for i := 0; i < len(val); i++ {
-		// If there is a decimal or an N (NaN), I (Inf), parse as float
-		if val[i] == '.' || val[i] == 'N' || val[i] == 'n' || val[i] == 'I' || val[i] == 'i' || val[i] == 'e' {
-			return strconv.ParseFloat(string(val), 64)
-		}
-		if val[i] < '0' && val[i] > '9' {
-			return string(val), nil
-		}
-	}
-	return strconv.ParseFloat(string(val), 64)
-}
-
-func newFieldsFromBinary(buf []byte) Fields {
-	fields := make(Fields, 8)
-	var (
-		i              int
-		name, valueBuf []byte
-		value          interface{}
-		err            error
-	)
-	for i < len(buf) {
-
-		i, name = scanTo(buf, i, '=')
-		name = escape.Unescape(name)
-
-		i, valueBuf = scanFieldValue(buf, i+1)
-		if len(name) > 0 {
-			if len(valueBuf) == 0 {
-				fields[string(name)] = nil
-				continue
-			}
-
-			// If the first char is a double-quote, then unmarshal as string
-			if valueBuf[0] == '"' {
-				value = unescapeStringField(string(valueBuf[1 : len(valueBuf)-1]))
-				// Check for numeric characters and special NaN or Inf
-			} else if (valueBuf[0] >= '0' && valueBuf[0] <= '9') || valueBuf[0] == '-' || valueBuf[0] == '.' ||
-				valueBuf[0] == 'N' || valueBuf[0] == 'n' || // NaN
-				valueBuf[0] == 'I' || valueBuf[0] == 'i' { // Inf
-
-				value, err = parseNumber(valueBuf)
-				if err != nil {
-					panic(fmt.Sprintf("unable to parse number value '%v': %v", string(valueBuf), err))
-				}
-
-				// Otherwise parse it as bool
-			} else {
-				value, err = strconv.ParseBool(string(valueBuf))
-				if err != nil {
-					panic(fmt.Sprintf("unable to parse bool value '%v': %v\n", string(valueBuf), err))
-				}
-			}
-			fields[string(name)] = value
-		}
-		i++
-	}
-	return fields
-}
-
-// MarshalBinary encodes all the fields to their proper type and returns the binary
-// represenation
-// NOTE: uint64 is specifically not supported due to potential overflow when we decode
-// again later to an int64
-func (p Fields) MarshalBinary() []byte {
-	b := []byte{}
-	keys := make([]string, len(p))
-	i := 0
-	for k := range p {
-		keys[i] = k
-		i++
-	}
-	sort.Strings(keys)
-
-	for _, k := range keys {
-		v := p[k]
-		b = append(b, []byte(escape.String(k))...)
-		b = append(b, '=')
-		switch t := v.(type) {
-		case int:
-			b = append(b, []byte(strconv.FormatInt(int64(t), 10))...)
-			b = append(b, 'i')
-		case int8:
-			b = append(b, []byte(strconv.FormatInt(int64(t), 10))...)
-			b = append(b, 'i')
-		case int16:
-			b = append(b, []byte(strconv.FormatInt(int64(t), 10))...)
-			b = append(b, 'i')
-		case int32:
-			b = append(b, []byte(strconv.FormatInt(int64(t), 10))...)
-			b = append(b, 'i')
-		case int64:
-			b = append(b, []byte(strconv.FormatInt(t, 10))...)
-			b = append(b, 'i')
-		case uint:
-			b = append(b, []byte(strconv.FormatInt(int64(t), 10))...)
-			b = append(b, 'i')
-		case uint8:
-			b = append(b, []byte(strconv.FormatInt(int64(t), 10))...)
-			b = append(b, 'i')
-		case uint16:
-			b = append(b, []byte(strconv.FormatInt(int64(t), 10))...)
-			b = append(b, 'i')
-		case uint32:
-			b = append(b, []byte(strconv.FormatInt(int64(t), 10))...)
-			b = append(b, 'i')
-		case float32:
-			val := []byte(strconv.FormatFloat(float64(t), 'f', -1, 32))
-			b = append(b, val...)
-		case float64:
-			val := []byte(strconv.FormatFloat(t, 'f', -1, 64))
-			b = append(b, val...)
-		case bool:
-			b = append(b, []byte(strconv.FormatBool(t))...)
-		case []byte:
-			b = append(b, t...)
-		case string:
-			b = append(b, '"')
-			b = append(b, []byte(escapeStringField(t))...)
-			b = append(b, '"')
-		case nil:
-			// skip
-		default:
-			// Can't determine the type, so convert to string
-			b = append(b, '"')
-			b = append(b, []byte(escapeStringField(fmt.Sprintf("%v", v)))...)
-			b = append(b, '"')
-
-		}
-		b = append(b, ',')
-	}
-	if len(b) > 0 {
-		return b[0 : len(b)-1]
-	}
-	return b
-}
-
-type indexedSlice struct {
-	indices []int
-	b       []byte
-}
-
-func (s *indexedSlice) Less(i, j int) bool {
-	_, a := scanTo(s.b, s.indices[i], '=')
-	_, b := scanTo(s.b, s.indices[j], '=')
-	return bytes.Compare(a, b) < 0
-}
-
-func (s *indexedSlice) Swap(i, j int) {
-	s.indices[i], s.indices[j] = s.indices[j], s.indices[i]
-}
-
-func (s *indexedSlice) Len() int {
-	return len(s.indices)
-}

+ 0 - 60
Godeps/_workspace/src/github.com/influxdata/influxdb/models/rows.go

@@ -1,60 +0,0 @@
-package models
-
-import (
-	"hash/fnv"
-	"sort"
-)
-
-// Row represents a single row returned from the execution of a statement.
-type Row struct {
-	Name    string            `json:"name,omitempty"`
-	Tags    map[string]string `json:"tags,omitempty"`
-	Columns []string          `json:"columns,omitempty"`
-	Values  [][]interface{}   `json:"values,omitempty"`
-	Err     error             `json:"err,omitempty"`
-}
-
-// SameSeries returns true if r contains values for the same series as o.
-func (r *Row) SameSeries(o *Row) bool {
-	return r.tagsHash() == o.tagsHash() && r.Name == o.Name
-}
-
-// tagsHash returns a hash of tag key/value pairs.
-func (r *Row) tagsHash() uint64 {
-	h := fnv.New64a()
-	keys := r.tagsKeys()
-	for _, k := range keys {
-		h.Write([]byte(k))
-		h.Write([]byte(r.Tags[k]))
-	}
-	return h.Sum64()
-}
-
-// tagKeys returns a sorted list of tag keys.
-func (r *Row) tagsKeys() []string {
-	a := make([]string, 0, len(r.Tags))
-	for k := range r.Tags {
-		a = append(a, k)
-	}
-	sort.Strings(a)
-	return a
-}
-
-// Rows represents a collection of rows. Rows implements sort.Interface.
-type Rows []*Row
-
-func (p Rows) Len() int { return len(p) }
-
-func (p Rows) Less(i, j int) bool {
-	// Sort by name first.
-	if p[i].Name != p[j].Name {
-		return p[i].Name < p[j].Name
-	}
-
-	// Sort by tag set hash. Tags don't have a meaningful sort order so we
-	// just compute a hash and sort by that instead. This allows the tests
-	// to receive rows in a predictable order every time.
-	return p[i].tagsHash() < p[j].tagsHash()
-}
-
-func (p Rows) Swap(i, j int) { p[i], p[j] = p[j], p[i] }

+ 0 - 51
Godeps/_workspace/src/github.com/influxdata/influxdb/models/time.go

@@ -1,51 +0,0 @@
-package models
-
-// Helper time methods since parsing time can easily overflow and we only support a
-// specific time range.
-
-import (
-	"fmt"
-	"math"
-	"time"
-)
-
-var (
-	// MaxNanoTime is the maximum time that can be represented via int64 nanoseconds since the epoch.
-	MaxNanoTime = time.Unix(0, math.MaxInt64).UTC()
-	// MinNanoTime is the minumum time that can be represented via int64 nanoseconds since the epoch.
-	MinNanoTime = time.Unix(0, math.MinInt64).UTC()
-
-	// ErrTimeOutOfRange gets returned when time is out of the representable range using int64 nanoseconds since the epoch.
-	ErrTimeOutOfRange = fmt.Errorf("time outside range %s - %s", MinNanoTime, MaxNanoTime)
-)
-
-// SafeCalcTime safely calculates the time given. Will return error if the time is outside the
-// supported range.
-func SafeCalcTime(timestamp int64, precision string) (time.Time, error) {
-	mult := GetPrecisionMultiplier(precision)
-	if t, ok := safeSignedMult(timestamp, mult); ok {
-		return time.Unix(0, t).UTC(), nil
-	}
-
-	return time.Time{}, ErrTimeOutOfRange
-}
-
-// CheckTime checks that a time is within the safe range.
-func CheckTime(t time.Time) error {
-	if t.Before(MinNanoTime) || t.After(MaxNanoTime) {
-		return ErrTimeOutOfRange
-	}
-	return nil
-}
-
-// Perform the multiplication and check to make sure it didn't overflow.
-func safeSignedMult(a, b int64) (int64, bool) {
-	if a == 0 || b == 0 || a == 1 || b == 1 {
-		return a * b, true
-	}
-	if a == math.MinInt64 || b == math.MaxInt64 {
-		return 0, false
-	}
-	c := a * b
-	return c, c/b == a
-}

+ 0 - 53
Godeps/_workspace/src/github.com/influxdata/influxdb/pkg/escape/bytes.go

@@ -1,53 +0,0 @@
-package escape
-
-import "bytes"
-
-func Bytes(in []byte) []byte {
-	for b, esc := range Codes {
-		in = bytes.Replace(in, []byte{b}, esc, -1)
-	}
-	return in
-}
-
-func Unescape(in []byte) []byte {
-	if len(in) == 0 {
-		return nil
-	}
-
-	if bytes.IndexByte(in, '\\') == -1 {
-		return in
-	}
-
-	i := 0
-	inLen := len(in)
-	var out []byte
-
-	for {
-		if i >= inLen {
-			break
-		}
-		if in[i] == '\\' && i+1 < inLen {
-			switch in[i+1] {
-			case ',':
-				out = append(out, ',')
-				i += 2
-				continue
-			case '"':
-				out = append(out, '"')
-				i += 2
-				continue
-			case ' ':
-				out = append(out, ' ')
-				i += 2
-				continue
-			case '=':
-				out = append(out, '=')
-				i += 2
-				continue
-			}
-		}
-		out = append(out, in[i])
-		i += 1
-	}
-	return out
-}

+ 0 - 34
Godeps/_workspace/src/github.com/influxdata/influxdb/pkg/escape/strings.go

@@ -1,34 +0,0 @@
-package escape
-
-import "strings"
-
-var (
-	Codes = map[byte][]byte{
-		',': []byte(`\,`),
-		'"': []byte(`\"`),
-		' ': []byte(`\ `),
-		'=': []byte(`\=`),
-	}
-
-	codesStr = map[string]string{}
-)
-
-func init() {
-	for k, v := range Codes {
-		codesStr[string(k)] = string(v)
-	}
-}
-
-func UnescapeString(in string) string {
-	for b, esc := range codesStr {
-		in = strings.Replace(in, esc, b, -1)
-	}
-	return in
-}
-
-func String(in string) string {
-	for b, esc := range codesStr {
-		in = strings.Replace(in, b, esc, -1)
-	}
-	return in
-}