Browse Source

Merge branch 'new-logger'

Conflicts:
	CHANGELOG.md
Torkel Ödegaard 9 years ago
parent
commit
b25cb60259
58 changed files with 2485 additions and 3583 deletions
  1. 4 0
      CHANGELOG.md
  2. 11 11
      Godeps/Godeps.json
  3. 16 0
      Godeps/_workspace/src/github.com/go-stack/stack/.travis.yml
  4. 13 0
      Godeps/_workspace/src/github.com/go-stack/stack/LICENSE.md
  5. 38 0
      Godeps/_workspace/src/github.com/go-stack/stack/README.md
  6. 349 0
      Godeps/_workspace/src/github.com/go-stack/stack/stack.go
  7. 10 0
      Godeps/_workspace/src/github.com/inconshreveable/log15/.travis.yml
  8. 11 0
      Godeps/_workspace/src/github.com/inconshreveable/log15/CONTRIBUTORS
  9. 13 0
      Godeps/_workspace/src/github.com/inconshreveable/log15/LICENSE
  10. 70 0
      Godeps/_workspace/src/github.com/inconshreveable/log15/README.md
  11. 333 0
      Godeps/_workspace/src/github.com/inconshreveable/log15/doc.go
  12. 257 0
      Godeps/_workspace/src/github.com/inconshreveable/log15/format.go
  13. 356 0
      Godeps/_workspace/src/github.com/inconshreveable/log15/handler.go
  14. 26 0
      Godeps/_workspace/src/github.com/inconshreveable/log15/handler_go13.go
  15. 23 0
      Godeps/_workspace/src/github.com/inconshreveable/log15/handler_go14.go
  16. 208 0
      Godeps/_workspace/src/github.com/inconshreveable/log15/logger.go
  17. 67 0
      Godeps/_workspace/src/github.com/inconshreveable/log15/root.go
  18. 55 0
      Godeps/_workspace/src/github.com/inconshreveable/log15/syslog.go
  19. 21 0
      Godeps/_workspace/src/github.com/inconshreveable/log15/term/LICENSE
  20. 13 0
      Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_appengine.go
  21. 12 0
      Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_darwin.go
  22. 18 0
      Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_freebsd.go
  23. 14 0
      Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_linux.go
  24. 20 0
      Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_notwindows.go
  25. 7 0
      Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_openbsd.go
  26. 26 0
      Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_windows.go
  27. 0 267
      Godeps/_workspace/src/github.com/influxdata/influxdb/client/README.md
  28. 0 789
      Godeps/_workspace/src/github.com/influxdata/influxdb/client/influxdb.go
  29. 0 46
      Godeps/_workspace/src/github.com/influxdata/influxdb/models/consistency.go
  30. 0 1576
      Godeps/_workspace/src/github.com/influxdata/influxdb/models/points.go
  31. 0 60
      Godeps/_workspace/src/github.com/influxdata/influxdb/models/rows.go
  32. 0 51
      Godeps/_workspace/src/github.com/influxdata/influxdb/models/time.go
  33. 0 53
      Godeps/_workspace/src/github.com/influxdata/influxdb/pkg/escape/bytes.go
  34. 0 34
      Godeps/_workspace/src/github.com/influxdata/influxdb/pkg/escape/strings.go
  35. 2 5
      conf/defaults.ini
  36. 1 4
      conf/sample.ini
  37. 3 0
      pkg/api/api.go
  38. 6 0
      pkg/api/metrics.go
  39. 5 3
      pkg/cmd/grafana-server/main.go
  40. 11 5
      pkg/cmd/grafana-server/web.go
  41. 0 157
      pkg/log/console.go
  42. 21 48
      pkg/log/file.go
  43. 5 0
      pkg/log/handlers.go
  44. 31 0
      pkg/log/interface.go
  45. 132 255
      pkg/log/log.go
  46. 23 29
      pkg/log/syslog.go
  47. 11 5
      pkg/login/settings.go
  48. 5 3
      pkg/metrics/publish.go
  49. 4 7
      pkg/metrics/settings.go
  50. 6 16
      pkg/middleware/logger.go
  51. 5 0
      pkg/middleware/middleware.go
  52. 174 0
      pkg/middleware/recovery.go
  53. 1 2
      pkg/plugins/models.go
  54. 7 4
      pkg/plugins/plugins.go
  55. 2 1
      pkg/services/sqlstore/migrations/migrations_test.go
  56. 9 18
      pkg/services/sqlstore/migrator/migrator.go
  57. 7 18
      pkg/services/sqlstore/sqlstore.go
  58. 23 116
      pkg/setting/setting.go

+ 4 - 0
CHANGELOG.md

@@ -9,6 +9,10 @@
 * **Page Footer**: Added page footer with links to docs, shows Grafana version and info if new version is available, closes [#4889](https://github.com/grafana/grafana/pull/4889)
 * **InfluxDB**: Add spread function, closes [#5211](https://github.com/grafana/grafana/issues/5211)
 * **Scripts**: Use restart instead of start for deb package script, closes [#5282](https://github.com/grafana/grafana/pull/5282)
+* **Logging**: Moved to structured logging lib, and moved to component specific level filters via config file, closes [#4590](https://github.com/grafana/grafana/issues/4590)
+
+## Breaking changes
+* **Logging** : Changed default logging output format (now structured into message, and key value pairs, with logger key acting as component). You can also no change in config to json log ouput.
 
 # 3.0.4 Patch release (2016-05-25)
 * **Panel**: Fixed blank dashboard issue when switching to other dashboard while in fullscreen edit mode, fixes [#5163](https://github.com/grafana/grafana/pull/5163)

+ 11 - 11
Godeps/Godeps.json

@@ -205,6 +205,11 @@
 			"Comment": "v1.2-171-g267b128",
 			"Rev": "267b128680c46286b9ca13475c3cca5de8f79bd7"
 		},
+		{
+			"ImportPath": "github.com/go-stack/stack",
+			"Comment": "v1.5.2",
+			"Rev": "100eb0c0a9c5b306ca2fb4f165df21d80ada4b82"
+		},
 		{
 			"ImportPath": "github.com/go-xorm/core",
 			"Comment": "v0.4.4-7-g9e608f7",
@@ -228,19 +233,14 @@
 			"Rev": "7e3c02b30806fa5779d3bdfc152ce4c6f40e7b38"
 		},
 		{
-			"ImportPath": "github.com/influxdata/influxdb/client",
-			"Comment": "v0.13.0-74-g2c9d0fc",
-			"Rev": "2c9d0fcc04eba3ffc88f2aafe8466874e384d80d"
-		},
-		{
-			"ImportPath": "github.com/influxdata/influxdb/models",
-			"Comment": "v0.13.0-74-g2c9d0fc",
-			"Rev": "2c9d0fcc04eba3ffc88f2aafe8466874e384d80d"
+			"ImportPath": "github.com/inconshreveable/log15",
+			"Comment": "v2.3-61-g20bca5a",
+			"Rev": "20bca5a7a57282e241fac83ec9ea42538027f1c1"
 		},
 		{
-			"ImportPath": "github.com/influxdata/influxdb/pkg/escape",
-			"Comment": "v0.13.0-74-g2c9d0fc",
-			"Rev": "2c9d0fcc04eba3ffc88f2aafe8466874e384d80d"
+			"ImportPath": "github.com/inconshreveable/log15/term",
+			"Comment": "v2.3-61-g20bca5a",
+			"Rev": "20bca5a7a57282e241fac83ec9ea42538027f1c1"
 		},
 		{
 			"ImportPath": "github.com/jmespath/go-jmespath",

+ 16 - 0
Godeps/_workspace/src/github.com/go-stack/stack/.travis.yml

@@ -0,0 +1,16 @@
+language: go
+sudo: false
+go:
+  - 1.2
+  - 1.3
+  - 1.4
+  - 1.5
+  - 1.6
+  - tip
+
+before_install:
+  - go get github.com/mattn/goveralls
+  - go get golang.org/x/tools/cmd/cover
+
+script:
+  - goveralls -service=travis-ci

+ 13 - 0
Godeps/_workspace/src/github.com/go-stack/stack/LICENSE.md

@@ -0,0 +1,13 @@
+Copyright 2014 Chris Hines
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.

+ 38 - 0
Godeps/_workspace/src/github.com/go-stack/stack/README.md

@@ -0,0 +1,38 @@
+[![GoDoc](https://godoc.org/github.com/go-stack/stack?status.svg)](https://godoc.org/github.com/go-stack/stack)
+[![Go Report Card](https://goreportcard.com/badge/go-stack/stack)](https://goreportcard.com/report/go-stack/stack)
+[![TravisCI](https://travis-ci.org/go-stack/stack.svg?branch=master)](https://travis-ci.org/go-stack/stack)
+[![Coverage Status](https://coveralls.io/repos/github/go-stack/stack/badge.svg?branch=master)](https://coveralls.io/github/go-stack/stack?branch=master)
+
+# stack
+
+Package stack implements utilities to capture, manipulate, and format call
+stacks. It provides a simpler API than package runtime.
+
+The implementation takes care of the minutia and special cases of interpreting
+the program counter (pc) values returned by runtime.Callers.
+
+## Versioning
+
+Package stack publishes releases via [semver](http://semver.org/) compatible Git
+tags prefixed with a single 'v'. The master branch always contains the latest
+release. The develop branch contains unreleased commits.
+
+## Formatting
+
+Package stack's types implement fmt.Formatter, which provides a simple and
+flexible way to declaratively configure formatting when used with logging or
+error tracking packages.
+
+```go
+func DoTheThing() {
+    c := stack.Caller(0)
+    log.Print(c)          // "source.go:10"
+    log.Printf("%+v", c)  // "pkg/path/source.go:10"
+    log.Printf("%n", c)   // "DoTheThing"
+
+    s := stack.Trace().TrimRuntime()
+    log.Print(s)          // "[source.go:15 caller.go:42 main.go:14]"
+}
+```
+
+See the docs for all of the supported formatting options.

+ 349 - 0
Godeps/_workspace/src/github.com/go-stack/stack/stack.go

@@ -0,0 +1,349 @@
+// Package stack implements utilities to capture, manipulate, and format call
+// stacks. It provides a simpler API than package runtime.
+//
+// The implementation takes care of the minutia and special cases of
+// interpreting the program counter (pc) values returned by runtime.Callers.
+//
+// Package stack's types implement fmt.Formatter, which provides a simple and
+// flexible way to declaratively configure formatting when used with logging
+// or error tracking packages.
+package stack
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"io"
+	"runtime"
+	"strconv"
+	"strings"
+)
+
+// Call records a single function invocation from a goroutine stack.
+type Call struct {
+	fn *runtime.Func
+	pc uintptr
+}
+
+// Caller returns a Call from the stack of the current goroutine. The argument
+// skip is the number of stack frames to ascend, with 0 identifying the
+// calling function.
+func Caller(skip int) Call {
+	var pcs [2]uintptr
+	n := runtime.Callers(skip+1, pcs[:])
+
+	var c Call
+
+	if n < 2 {
+		return c
+	}
+
+	c.pc = pcs[1]
+	if runtime.FuncForPC(pcs[0]) != sigpanic {
+		c.pc--
+	}
+	c.fn = runtime.FuncForPC(c.pc)
+	return c
+}
+
+// String implements fmt.Stinger. It is equivalent to fmt.Sprintf("%v", c).
+func (c Call) String() string {
+	return fmt.Sprint(c)
+}
+
+// MarshalText implements encoding.TextMarshaler. It formats the Call the same
+// as fmt.Sprintf("%v", c).
+func (c Call) MarshalText() ([]byte, error) {
+	if c.fn == nil {
+		return nil, ErrNoFunc
+	}
+	buf := bytes.Buffer{}
+	fmt.Fprint(&buf, c)
+	return buf.Bytes(), nil
+}
+
+// ErrNoFunc means that the Call has a nil *runtime.Func. The most likely
+// cause is a Call with the zero value.
+var ErrNoFunc = errors.New("no call stack information")
+
+// Format implements fmt.Formatter with support for the following verbs.
+//
+//    %s    source file
+//    %d    line number
+//    %n    function name
+//    %v    equivalent to %s:%d
+//
+// It accepts the '+' and '#' flags for most of the verbs as follows.
+//
+//    %+s   path of source file relative to the compile time GOPATH
+//    %#s   full path of source file
+//    %+n   import path qualified function name
+//    %+v   equivalent to %+s:%d
+//    %#v   equivalent to %#s:%d
+func (c Call) Format(s fmt.State, verb rune) {
+	if c.fn == nil {
+		fmt.Fprintf(s, "%%!%c(NOFUNC)", verb)
+		return
+	}
+
+	switch verb {
+	case 's', 'v':
+		file, line := c.fn.FileLine(c.pc)
+		switch {
+		case s.Flag('#'):
+			// done
+		case s.Flag('+'):
+			file = file[pkgIndex(file, c.fn.Name()):]
+		default:
+			const sep = "/"
+			if i := strings.LastIndex(file, sep); i != -1 {
+				file = file[i+len(sep):]
+			}
+		}
+		io.WriteString(s, file)
+		if verb == 'v' {
+			buf := [7]byte{':'}
+			s.Write(strconv.AppendInt(buf[:1], int64(line), 10))
+		}
+
+	case 'd':
+		_, line := c.fn.FileLine(c.pc)
+		buf := [6]byte{}
+		s.Write(strconv.AppendInt(buf[:0], int64(line), 10))
+
+	case 'n':
+		name := c.fn.Name()
+		if !s.Flag('+') {
+			const pathSep = "/"
+			if i := strings.LastIndex(name, pathSep); i != -1 {
+				name = name[i+len(pathSep):]
+			}
+			const pkgSep = "."
+			if i := strings.Index(name, pkgSep); i != -1 {
+				name = name[i+len(pkgSep):]
+			}
+		}
+		io.WriteString(s, name)
+	}
+}
+
+// PC returns the program counter for this call frame; multiple frames may
+// have the same PC value.
+func (c Call) PC() uintptr {
+	return c.pc
+}
+
+// name returns the import path qualified name of the function containing the
+// call.
+func (c Call) name() string {
+	if c.fn == nil {
+		return "???"
+	}
+	return c.fn.Name()
+}
+
+func (c Call) file() string {
+	if c.fn == nil {
+		return "???"
+	}
+	file, _ := c.fn.FileLine(c.pc)
+	return file
+}
+
+func (c Call) line() int {
+	if c.fn == nil {
+		return 0
+	}
+	_, line := c.fn.FileLine(c.pc)
+	return line
+}
+
+// CallStack records a sequence of function invocations from a goroutine
+// stack.
+type CallStack []Call
+
+// String implements fmt.Stinger. It is equivalent to fmt.Sprintf("%v", cs).
+func (cs CallStack) String() string {
+	return fmt.Sprint(cs)
+}
+
+var (
+	openBracketBytes  = []byte("[")
+	closeBracketBytes = []byte("]")
+	spaceBytes        = []byte(" ")
+)
+
+// MarshalText implements encoding.TextMarshaler. It formats the CallStack the
+// same as fmt.Sprintf("%v", cs).
+func (cs CallStack) MarshalText() ([]byte, error) {
+	buf := bytes.Buffer{}
+	buf.Write(openBracketBytes)
+	for i, pc := range cs {
+		if pc.fn == nil {
+			return nil, ErrNoFunc
+		}
+		if i > 0 {
+			buf.Write(spaceBytes)
+		}
+		fmt.Fprint(&buf, pc)
+	}
+	buf.Write(closeBracketBytes)
+	return buf.Bytes(), nil
+}
+
+// Format implements fmt.Formatter by printing the CallStack as square brackets
+// ([, ]) surrounding a space separated list of Calls each formatted with the
+// supplied verb and options.
+func (cs CallStack) Format(s fmt.State, verb rune) {
+	s.Write(openBracketBytes)
+	for i, pc := range cs {
+		if i > 0 {
+			s.Write(spaceBytes)
+		}
+		pc.Format(s, verb)
+	}
+	s.Write(closeBracketBytes)
+}
+
+// findSigpanic intentionally executes faulting code to generate a stack trace
+// containing an entry for runtime.sigpanic.
+func findSigpanic() *runtime.Func {
+	var fn *runtime.Func
+	var p *int
+	func() int {
+		defer func() {
+			if p := recover(); p != nil {
+				var pcs [512]uintptr
+				n := runtime.Callers(2, pcs[:])
+				for _, pc := range pcs[:n] {
+					f := runtime.FuncForPC(pc)
+					if f.Name() == "runtime.sigpanic" {
+						fn = f
+						break
+					}
+				}
+			}
+		}()
+		// intentional nil pointer dereference to trigger sigpanic
+		return *p
+	}()
+	return fn
+}
+
+var sigpanic = findSigpanic()
+
+// Trace returns a CallStack for the current goroutine with element 0
+// identifying the calling function.
+func Trace() CallStack {
+	var pcs [512]uintptr
+	n := runtime.Callers(2, pcs[:])
+	cs := make([]Call, n)
+
+	for i, pc := range pcs[:n] {
+		pcFix := pc
+		if i > 0 && cs[i-1].fn != sigpanic {
+			pcFix--
+		}
+		cs[i] = Call{
+			fn: runtime.FuncForPC(pcFix),
+			pc: pcFix,
+		}
+	}
+
+	return cs
+}
+
+// TrimBelow returns a slice of the CallStack with all entries below c
+// removed.
+func (cs CallStack) TrimBelow(c Call) CallStack {
+	for len(cs) > 0 && cs[0].pc != c.pc {
+		cs = cs[1:]
+	}
+	return cs
+}
+
+// TrimAbove returns a slice of the CallStack with all entries above c
+// removed.
+func (cs CallStack) TrimAbove(c Call) CallStack {
+	for len(cs) > 0 && cs[len(cs)-1].pc != c.pc {
+		cs = cs[:len(cs)-1]
+	}
+	return cs
+}
+
+// pkgIndex returns the index that results in file[index:] being the path of
+// file relative to the compile time GOPATH, and file[:index] being the
+// $GOPATH/src/ portion of file. funcName must be the name of a function in
+// file as returned by runtime.Func.Name.
+func pkgIndex(file, funcName string) int {
+	// As of Go 1.6.2 there is no direct way to know the compile time GOPATH
+	// at runtime, but we can infer the number of path segments in the GOPATH.
+	// We note that runtime.Func.Name() returns the function name qualified by
+	// the import path, which does not include the GOPATH. Thus we can trim
+	// segments from the beginning of the file path until the number of path
+	// separators remaining is one more than the number of path separators in
+	// the function name. For example, given:
+	//
+	//    GOPATH     /home/user
+	//    file       /home/user/src/pkg/sub/file.go
+	//    fn.Name()  pkg/sub.Type.Method
+	//
+	// We want to produce:
+	//
+	//    file[:idx] == /home/user/src/
+	//    file[idx:] == pkg/sub/file.go
+	//
+	// From this we can easily see that fn.Name() has one less path separator
+	// than our desired result for file[idx:]. We count separators from the
+	// end of the file path until it finds two more than in the function name
+	// and then move one character forward to preserve the initial path
+	// segment without a leading separator.
+	const sep = "/"
+	i := len(file)
+	for n := strings.Count(funcName, sep) + 2; n > 0; n-- {
+		i = strings.LastIndex(file[:i], sep)
+		if i == -1 {
+			i = -len(sep)
+			break
+		}
+	}
+	// get back to 0 or trim the leading separator
+	return i + len(sep)
+}
+
+var runtimePath string
+
+func init() {
+	var pcs [1]uintptr
+	runtime.Callers(0, pcs[:])
+	fn := runtime.FuncForPC(pcs[0])
+	file, _ := fn.FileLine(pcs[0])
+
+	idx := pkgIndex(file, fn.Name())
+
+	runtimePath = file[:idx]
+	if runtime.GOOS == "windows" {
+		runtimePath = strings.ToLower(runtimePath)
+	}
+}
+
+func inGoroot(c Call) bool {
+	file := c.file()
+	if len(file) == 0 || file[0] == '?' {
+		return true
+	}
+	if runtime.GOOS == "windows" {
+		file = strings.ToLower(file)
+	}
+	return strings.HasPrefix(file, runtimePath) || strings.HasSuffix(file, "/_testmain.go")
+}
+
+// TrimRuntime returns a slice of the CallStack with the topmost entries from
+// the go runtime removed. It considers any calls originating from unknown
+// files, files under GOROOT, or _testmain.go as part of the runtime.
+func (cs CallStack) TrimRuntime() CallStack {
+	for len(cs) > 0 && inGoroot(cs[len(cs)-1]) {
+		cs = cs[:len(cs)-1]
+	}
+	return cs
+}

+ 10 - 0
Godeps/_workspace/src/github.com/inconshreveable/log15/.travis.yml

@@ -0,0 +1,10 @@
+language: go
+
+go:
+  - 1.1
+  - 1.2
+  - 1.3
+  - 1.4
+  - 1.5
+  - 1.6
+  - tip

+ 11 - 0
Godeps/_workspace/src/github.com/inconshreveable/log15/CONTRIBUTORS

@@ -0,0 +1,11 @@
+Contributors to log15:
+
+- Aaron L 
+- Alan Shreve 
+- Chris Hines 
+- Ciaran Downey 
+- Dmitry Chestnykh 
+- Evan Shaw 
+- Péter Szilágyi 
+- Trevor Gattis 
+- Vincent Vanackere 

+ 13 - 0
Godeps/_workspace/src/github.com/inconshreveable/log15/LICENSE

@@ -0,0 +1,13 @@
+Copyright 2014 Alan Shreve
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.

+ 70 - 0
Godeps/_workspace/src/github.com/inconshreveable/log15/README.md

@@ -0,0 +1,70 @@
+![obligatory xkcd](http://imgs.xkcd.com/comics/standards.png)
+
+# log15 [![godoc reference](https://godoc.org/github.com/inconshreveable/log15?status.png)](https://godoc.org/github.com/inconshreveable/log15) [![Build Status](https://travis-ci.org/inconshreveable/log15.svg?branch=master)](https://travis-ci.org/inconshreveable/log15)
+
+Package log15 provides an opinionated, simple toolkit for best-practice logging in Go (golang) that is both human and machine readable. It is modeled after the Go standard library's [`io`](http://golang.org/pkg/io/) and [`net/http`](http://golang.org/pkg/net/http/) packages and is an alternative to the standard library's [`log`](http://golang.org/pkg/log/) package. 
+
+## Features
+- A simple, easy-to-understand API
+- Promotes structured logging by encouraging use of key/value pairs
+- Child loggers which inherit and add their own private context
+- Lazy evaluation of expensive operations
+- Simple Handler interface allowing for construction of flexible, custom logging configurations with a tiny API.
+- Color terminal support
+- Built-in support for logging to files, streams, syslog, and the network
+- Support for forking records to multiple handlers, buffering records for output, failing over from failed handler writes, + more
+
+## Versioning
+The API of the master branch of log15 should always be considered unstable. If you want to rely on a stable API,
+you must vendor the library.
+
+## Importing
+
+```go
+import log "github.com/inconshreveable/log15"
+```
+
+## Examples
+
+```go
+// all loggers can have key/value context
+srvlog := log.New("module", "app/server")
+
+// all log messages can have key/value context 
+srvlog.Warn("abnormal conn rate", "rate", curRate, "low", lowRate, "high", highRate)
+
+// child loggers with inherited context
+connlog := srvlog.New("raddr", c.RemoteAddr())
+connlog.Info("connection open")
+
+// lazy evaluation
+connlog.Debug("ping remote", "latency", log.Lazy{pingRemote})
+
+// flexible configuration
+srvlog.SetHandler(log.MultiHandler(
+    log.StreamHandler(os.Stderr, log.LogfmtFormat()),
+    log.LvlFilterHandler(
+        log.LvlError,
+        log.Must.FileHandler("errors.json", log.JsonFormat())))
+```
+
+## Breaking API Changes
+The following commits broke API stability. This reference is intended to help you understand the consequences of updating to a newer version
+of log15.
+
+- 57a084d014d4150152b19e4e531399a7145d1540 - Added a `Get()` method to the `Logger` interface to retrieve the current handler
+- 93404652ee366648fa622b64d1e2b67d75a3094a - `Record` field `Call` changed to `stack.Call` with switch to `github.com/go-stack/stack`
+- a5e7613673c73281f58e15a87d2cf0cf111e8152 - Restored `syslog.Priority` argument to the `SyslogXxx` handler constructors
+
+## FAQ
+
+### The varargs style is brittle and error prone! Can I have type safety please?
+Yes. Use `log.Ctx`:
+
+```go
+srvlog := log.New(log.Ctx{"module": "app/server"})
+srvlog.Warn("abnormal conn rate", log.Ctx{"rate": curRate, "low": lowRate, "high": highRate})
+```
+
+## License
+Apache

+ 333 - 0
Godeps/_workspace/src/github.com/inconshreveable/log15/doc.go

@@ -0,0 +1,333 @@
+/*
+Package log15 provides an opinionated, simple toolkit for best-practice logging that is
+both human and machine readable. It is modeled after the standard library's io and net/http
+packages.
+
+This package enforces you to only log key/value pairs. Keys must be strings. Values may be
+any type that you like. The default output format is logfmt, but you may also choose to use
+JSON instead if that suits you. Here's how you log:
+
+    log.Info("page accessed", "path", r.URL.Path, "user_id", user.id)
+
+This will output a line that looks like:
+
+     lvl=info t=2014-05-02T16:07:23-0700 msg="page accessed" path=/org/71/profile user_id=9
+
+Getting Started
+
+To get started, you'll want to import the library:
+
+    import log "github.com/inconshreveable/log15"
+
+
+Now you're ready to start logging:
+
+    func main() {
+        log.Info("Program starting", "args", os.Args())
+    }
+
+
+Convention
+
+Because recording a human-meaningful message is common and good practice, the first argument to every
+logging method is the value to the *implicit* key 'msg'.
+
+Additionally, the level you choose for a message will be automatically added with the key 'lvl', and so
+will the current timestamp with key 't'.
+
+You may supply any additional context as a set of key/value pairs to the logging function. log15 allows
+you to favor terseness, ordering, and speed over safety. This is a reasonable tradeoff for
+logging functions. You don't need to explicitly state keys/values, log15 understands that they alternate
+in the variadic argument list:
+
+    log.Warn("size out of bounds", "low", lowBound, "high", highBound, "val", val)
+
+If you really do favor your type-safety, you may choose to pass a log.Ctx instead:
+
+    log.Warn("size out of bounds", log.Ctx{"low": lowBound, "high": highBound, "val": val})
+
+
+Context loggers
+
+Frequently, you want to add context to a logger so that you can track actions associated with it. An http
+request is a good example. You can easily create new loggers that have context that is automatically included
+with each log line:
+
+    requestlogger := log.New("path", r.URL.Path)
+
+    // later
+    requestlogger.Debug("db txn commit", "duration", txnTimer.Finish())
+
+This will output a log line that includes the path context that is attached to the logger:
+
+    lvl=dbug t=2014-05-02T16:07:23-0700 path=/repo/12/add_hook msg="db txn commit" duration=0.12
+
+
+Handlers
+
+The Handler interface defines where log lines are printed to and how they are formated. Handler is a
+single interface that is inspired by net/http's handler interface:
+
+    type Handler interface {
+        Log(r *Record) error
+    }
+
+
+Handlers can filter records, format them, or dispatch to multiple other Handlers.
+This package implements a number of Handlers for common logging patterns that are
+easily composed to create flexible, custom logging structures.
+
+Here's an example handler that prints logfmt output to Stdout:
+
+    handler := log.StreamHandler(os.Stdout, log.LogfmtFormat())
+
+Here's an example handler that defers to two other handlers. One handler only prints records
+from the rpc package in logfmt to standard out. The other prints records at Error level
+or above in JSON formatted output to the file /var/log/service.json
+
+    handler := log.MultiHandler(
+        log.LvlFilterHandler(log.LvlError, log.Must.FileHandler("/var/log/service.json", log.JsonFormat())),
+        log.MatchFilterHandler("pkg", "app/rpc" log.StdoutHandler())
+    )
+
+Logging File Names and Line Numbers
+
+This package implements three Handlers that add debugging information to the
+context, CallerFileHandler, CallerFuncHandler and CallerStackHandler. Here's
+an example that adds the source file and line number of each logging call to
+the context.
+
+    h := log.CallerFileHandler(log.StdoutHandler())
+    log.Root().SetHandler(h)
+    ...
+    log.Error("open file", "err", err)
+
+This will output a line that looks like:
+
+    lvl=eror t=2014-05-02T16:07:23-0700 msg="open file" err="file not found" caller=data.go:42
+
+Here's an example that logs the call stack rather than just the call site.
+
+    h := log.CallerStackHandler("%+v", log.StdoutHandler())
+    log.Root().SetHandler(h)
+    ...
+    log.Error("open file", "err", err)
+
+This will output a line that looks like:
+
+    lvl=eror t=2014-05-02T16:07:23-0700 msg="open file" err="file not found" stack="[pkg/data.go:42 pkg/cmd/main.go]"
+
+The "%+v" format instructs the handler to include the path of the source file
+relative to the compile time GOPATH. The github.com/go-stack/stack package
+documents the full list of formatting verbs and modifiers available.
+
+Custom Handlers
+
+The Handler interface is so simple that it's also trivial to write your own. Let's create an
+example handler which tries to write to one handler, but if that fails it falls back to
+writing to another handler and includes the error that it encountered when trying to write
+to the primary. This might be useful when trying to log over a network socket, but if that
+fails you want to log those records to a file on disk.
+
+    type BackupHandler struct {
+        Primary Handler
+        Secondary Handler
+    }
+
+    func (h *BackupHandler) Log (r *Record) error {
+        err := h.Primary.Log(r)
+        if err != nil {
+            r.Ctx = append(ctx, "primary_err", err)
+            return h.Secondary.Log(r)
+        }
+        return nil
+    }
+
+This pattern is so useful that a generic version that handles an arbitrary number of Handlers
+is included as part of this library called FailoverHandler.
+
+Logging Expensive Operations
+
+Sometimes, you want to log values that are extremely expensive to compute, but you don't want to pay
+the price of computing them if you haven't turned up your logging level to a high level of detail.
+
+This package provides a simple type to annotate a logging operation that you want to be evaluated
+lazily, just when it is about to be logged, so that it would not be evaluated if an upstream Handler
+filters it out. Just wrap any function which takes no arguments with the log.Lazy type. For example:
+
+    func factorRSAKey() (factors []int) {
+        // return the factors of a very large number
+    }
+
+    log.Debug("factors", log.Lazy{factorRSAKey})
+
+If this message is not logged for any reason (like logging at the Error level), then
+factorRSAKey is never evaluated.
+
+Dynamic context values
+
+The same log.Lazy mechanism can be used to attach context to a logger which you want to be
+evaluated when the message is logged, but not when the logger is created. For example, let's imagine
+a game where you have Player objects:
+
+    type Player struct {
+        name string
+        alive bool
+        log.Logger
+    }
+
+You always want to log a player's name and whether they're alive or dead, so when you create the player
+object, you might do:
+
+    p := &Player{name: name, alive: true}
+    p.Logger = log.New("name", p.name, "alive", p.alive)
+
+Only now, even after a player has died, the logger will still report they are alive because the logging
+context is evaluated when the logger was created. By using the Lazy wrapper, we can defer the evaluation
+of whether the player is alive or not to each log message, so that the log records will reflect the player's
+current state no matter when the log message is written:
+
+    p := &Player{name: name, alive: true}
+    isAlive := func() bool { return p.alive }
+    player.Logger = log.New("name", p.name, "alive", log.Lazy{isAlive})
+
+Terminal Format
+
+If log15 detects that stdout is a terminal, it will configure the default
+handler for it (which is log.StdoutHandler) to use TerminalFormat. This format
+logs records nicely for your terminal, including color-coded output based
+on log level.
+
+Error Handling
+
+Becasuse log15 allows you to step around the type system, there are a few ways you can specify
+invalid arguments to the logging functions. You could, for example, wrap something that is not
+a zero-argument function with log.Lazy or pass a context key that is not a string. Since logging libraries
+are typically the mechanism by which errors are reported, it would be onerous for the logging functions
+to return errors. Instead, log15 handles errors by making these guarantees to you:
+
+- Any log record containing an error will still be printed with the error explained to you as part of the log record.
+
+- Any log record containing an error will include the context key LOG15_ERROR, enabling you to easily
+(and if you like, automatically) detect if any of your logging calls are passing bad values.
+
+Understanding this, you might wonder why the Handler interface can return an error value in its Log method. Handlers
+are encouraged to return errors only if they fail to write their log records out to an external source like if the
+syslog daemon is not responding. This allows the construction of useful handlers which cope with those failures
+like the FailoverHandler.
+
+Library Use
+
+log15 is intended to be useful for library authors as a way to provide configurable logging to
+users of their library. Best practice for use in a library is to always disable all output for your logger
+by default and to provide a public Logger instance that consumers of your library can configure. Like so:
+
+    package yourlib
+
+    import "github.com/inconshreveable/log15"
+
+    var Log = log.New()
+
+    func init() {
+        Log.SetHandler(log.DiscardHandler())
+    }
+
+Users of your library may then enable it if they like:
+
+    import "github.com/inconshreveable/log15"
+    import "example.com/yourlib"
+
+    func main() {
+        handler := // custom handler setup
+        yourlib.Log.SetHandler(handler)
+    }
+
+Best practices attaching logger context
+
+The ability to attach context to a logger is a powerful one. Where should you do it and why?
+I favor embedding a Logger directly into any persistent object in my application and adding
+unique, tracing context keys to it. For instance, imagine I am writing a web browser:
+
+    type Tab struct {
+        url string
+        render *RenderingContext
+        // ...
+
+        Logger
+    }
+
+    func NewTab(url string) *Tab {
+        return &Tab {
+            // ...
+            url: url,
+
+            Logger: log.New("url", url),
+        }
+    }
+
+When a new tab is created, I assign a logger to it with the url of
+the tab as context so it can easily be traced through the logs.
+Now, whenever we perform any operation with the tab, we'll log with its
+embedded logger and it will include the tab title automatically:
+
+    tab.Debug("moved position", "idx", tab.idx)
+
+There's only one problem. What if the tab url changes? We could
+use log.Lazy to make sure the current url is always written, but that
+would mean that we couldn't trace a tab's full lifetime through our
+logs after the user navigate to a new URL.
+
+Instead, think about what values to attach to your loggers the
+same way you think about what to use as a key in a SQL database schema.
+If it's possible to use a natural key that is unique for the lifetime of the
+object, do so. But otherwise, log15's ext package has a handy RandId
+function to let you generate what you might call "surrogate keys"
+They're just random hex identifiers to use for tracing. Back to our
+Tab example, we would prefer to set up our Logger like so:
+
+        import logext "github.com/inconshreveable/log15/ext"
+
+        t := &Tab {
+            // ...
+            url: url,
+        }
+
+        t.Logger = log.New("id", logext.RandId(8), "url", log.Lazy{t.getUrl})
+        return t
+
+Now we'll have a unique traceable identifier even across loading new urls, but
+we'll still be able to see the tab's current url in the log messages.
+
+Must
+
+For all Handler functions which can return an error, there is a version of that
+function which will return no error but panics on failure. They are all available
+on the Must object. For example:
+
+    log.Must.FileHandler("/path", log.JsonFormat)
+    log.Must.NetHandler("tcp", ":1234", log.JsonFormat)
+
+Inspiration and Credit
+
+All of the following excellent projects inspired the design of this library:
+
+code.google.com/p/log4go
+
+github.com/op/go-logging
+
+github.com/technoweenie/grohl
+
+github.com/Sirupsen/logrus
+
+github.com/kr/logfmt
+
+github.com/spacemonkeygo/spacelog
+
+golang's stdlib, notably io and net/http
+
+The Name
+
+https://xkcd.com/927/
+
+*/
+package log15

+ 257 - 0
Godeps/_workspace/src/github.com/inconshreveable/log15/format.go

@@ -0,0 +1,257 @@
+package log15
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"reflect"
+	"strconv"
+	"strings"
+	"time"
+)
+
+const (
+	timeFormat     = "2006-01-02T15:04:05-0700"
+	termTimeFormat = "01-02|15:04:05"
+	floatFormat    = 'f'
+	termMsgJust    = 40
+)
+
+type Format interface {
+	Format(r *Record) []byte
+}
+
+// FormatFunc returns a new Format object which uses
+// the given function to perform record formatting.
+func FormatFunc(f func(*Record) []byte) Format {
+	return formatFunc(f)
+}
+
+type formatFunc func(*Record) []byte
+
+func (f formatFunc) Format(r *Record) []byte {
+	return f(r)
+}
+
+// TerminalFormat formats log records optimized for human readability on
+// a terminal with color-coded level output and terser human friendly timestamp.
+// This format should only be used for interactive programs or while developing.
+//
+//     [TIME] [LEVEL] MESAGE key=value key=value ...
+//
+// Example:
+//
+//     [May 16 20:58:45] [DBUG] remove route ns=haproxy addr=127.0.0.1:50002
+//
+func TerminalFormat() Format {
+	return FormatFunc(func(r *Record) []byte {
+		var color = 0
+		switch r.Lvl {
+		case LvlCrit:
+			color = 35
+		case LvlError:
+			color = 31
+		case LvlWarn:
+			color = 33
+		case LvlInfo:
+			color = 32
+		case LvlDebug:
+			color = 36
+		}
+
+		b := &bytes.Buffer{}
+		lvl := strings.ToUpper(r.Lvl.String())
+		if color > 0 {
+			fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %s ", color, lvl, r.Time.Format(termTimeFormat), r.Msg)
+		} else {
+			fmt.Fprintf(b, "[%s] [%s] %s ", lvl, r.Time.Format(termTimeFormat), r.Msg)
+		}
+
+		// try to justify the log output for short messages
+		if len(r.Ctx) > 0 && len(r.Msg) < termMsgJust {
+			b.Write(bytes.Repeat([]byte{' '}, termMsgJust-len(r.Msg)))
+		}
+
+		// print the keys logfmt style
+		logfmt(b, r.Ctx, color)
+		return b.Bytes()
+	})
+}
+
+// LogfmtFormat prints records in logfmt format, an easy machine-parseable but human-readable
+// format for key/value pairs.
+//
+// For more details see: http://godoc.org/github.com/kr/logfmt
+//
+func LogfmtFormat() Format {
+	return FormatFunc(func(r *Record) []byte {
+		common := []interface{}{r.KeyNames.Time, r.Time, r.KeyNames.Lvl, r.Lvl, r.KeyNames.Msg, r.Msg}
+		buf := &bytes.Buffer{}
+		logfmt(buf, append(common, r.Ctx...), 0)
+		return buf.Bytes()
+	})
+}
+
+func logfmt(buf *bytes.Buffer, ctx []interface{}, color int) {
+	for i := 0; i < len(ctx); i += 2 {
+		if i != 0 {
+			buf.WriteByte(' ')
+		}
+
+		k, ok := ctx[i].(string)
+		v := formatLogfmtValue(ctx[i+1])
+		if !ok {
+			k, v = errorKey, formatLogfmtValue(k)
+		}
+
+		// XXX: we should probably check that all of your key bytes aren't invalid
+		if color > 0 {
+			fmt.Fprintf(buf, "\x1b[%dm%s\x1b[0m=%s", color, k, v)
+		} else {
+			fmt.Fprintf(buf, "%s=%s", k, v)
+		}
+	}
+
+	buf.WriteByte('\n')
+}
+
+// JsonFormat formats log records as JSON objects separated by newlines.
+// It is the equivalent of JsonFormatEx(false, true).
+func JsonFormat() Format {
+	return JsonFormatEx(false, true)
+}
+
+// JsonFormatEx formats log records as JSON objects. If pretty is true,
+// records will be pretty-printed. If lineSeparated is true, records
+// will be logged with a new line between each record.
+func JsonFormatEx(pretty, lineSeparated bool) Format {
+	jsonMarshal := json.Marshal
+	if pretty {
+		jsonMarshal = func(v interface{}) ([]byte, error) {
+			return json.MarshalIndent(v, "", "    ")
+		}
+	}
+
+	return FormatFunc(func(r *Record) []byte {
+		props := make(map[string]interface{})
+
+		props[r.KeyNames.Time] = r.Time
+		props[r.KeyNames.Lvl] = r.Lvl.String()
+		props[r.KeyNames.Msg] = r.Msg
+
+		for i := 0; i < len(r.Ctx); i += 2 {
+			k, ok := r.Ctx[i].(string)
+			if !ok {
+				props[errorKey] = fmt.Sprintf("%+v is not a string key", r.Ctx[i])
+			}
+			props[k] = formatJsonValue(r.Ctx[i+1])
+		}
+
+		b, err := jsonMarshal(props)
+		if err != nil {
+			b, _ = jsonMarshal(map[string]string{
+				errorKey: err.Error(),
+			})
+			return b
+		}
+
+		if lineSeparated {
+			b = append(b, '\n')
+		}
+
+		return b
+	})
+}
+
+func formatShared(value interface{}) (result interface{}) {
+	defer func() {
+		if err := recover(); err != nil {
+			if v := reflect.ValueOf(value); v.Kind() == reflect.Ptr && v.IsNil() {
+				result = "nil"
+			} else {
+				panic(err)
+			}
+		}
+	}()
+
+	switch v := value.(type) {
+	case time.Time:
+		return v.Format(timeFormat)
+
+	case error:
+		return v.Error()
+
+	case fmt.Stringer:
+		return v.String()
+
+	default:
+		return v
+	}
+}
+
+func formatJsonValue(value interface{}) interface{} {
+	value = formatShared(value)
+	switch value.(type) {
+	case int, int8, int16, int32, int64, float32, float64, uint, uint8, uint16, uint32, uint64, string:
+		return value
+	default:
+		return fmt.Sprintf("%+v", value)
+	}
+}
+
+// formatValue formats a value for serialization
+func formatLogfmtValue(value interface{}) string {
+	if value == nil {
+		return "nil"
+	}
+
+	value = formatShared(value)
+	switch v := value.(type) {
+	case bool:
+		return strconv.FormatBool(v)
+	case float32:
+		return strconv.FormatFloat(float64(v), floatFormat, 3, 64)
+	case float64:
+		return strconv.FormatFloat(v, floatFormat, 3, 64)
+	case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64:
+		return fmt.Sprintf("%d", value)
+	case string:
+		return escapeString(v)
+	default:
+		return escapeString(fmt.Sprintf("%+v", value))
+	}
+}
+
+func escapeString(s string) string {
+	needQuotes := false
+	e := bytes.Buffer{}
+	e.WriteByte('"')
+	for _, r := range s {
+		if r <= ' ' || r == '=' || r == '"' {
+			needQuotes = true
+		}
+
+		switch r {
+		case '\\', '"':
+			e.WriteByte('\\')
+			e.WriteByte(byte(r))
+		case '\n':
+			e.WriteByte('\\')
+			e.WriteByte('n')
+		case '\r':
+			e.WriteByte('\\')
+			e.WriteByte('r')
+		case '\t':
+			e.WriteByte('\\')
+			e.WriteByte('t')
+		default:
+			e.WriteRune(r)
+		}
+	}
+	e.WriteByte('"')
+	start, stop := 0, e.Len()
+	if !needQuotes {
+		start, stop = 1, stop-1
+	}
+	return string(e.Bytes()[start:stop])
+}

+ 356 - 0
Godeps/_workspace/src/github.com/inconshreveable/log15/handler.go

@@ -0,0 +1,356 @@
+package log15
+
+import (
+	"fmt"
+	"io"
+	"net"
+	"os"
+	"reflect"
+	"sync"
+
+	"github.com/go-stack/stack"
+)
+
+// A Logger prints its log records by writing to a Handler.
+// The Handler interface defines where and how log records are written.
+// Handlers are composable, providing you great flexibility in combining
+// them to achieve the logging structure that suits your applications.
+type Handler interface {
+	Log(r *Record) error
+}
+
+// FuncHandler returns a Handler that logs records with the given
+// function.
+func FuncHandler(fn func(r *Record) error) Handler {
+	return funcHandler(fn)
+}
+
+type funcHandler func(r *Record) error
+
+func (h funcHandler) Log(r *Record) error {
+	return h(r)
+}
+
+// StreamHandler writes log records to an io.Writer
+// with the given format. StreamHandler can be used
+// to easily begin writing log records to other
+// outputs.
+//
+// StreamHandler wraps itself with LazyHandler and SyncHandler
+// to evaluate Lazy objects and perform safe concurrent writes.
+func StreamHandler(wr io.Writer, fmtr Format) Handler {
+	h := FuncHandler(func(r *Record) error {
+		_, err := wr.Write(fmtr.Format(r))
+		return err
+	})
+	return LazyHandler(SyncHandler(h))
+}
+
+// SyncHandler can be wrapped around a handler to guarantee that
+// only a single Log operation can proceed at a time. It's necessary
+// for thread-safe concurrent writes.
+func SyncHandler(h Handler) Handler {
+	var mu sync.Mutex
+	return FuncHandler(func(r *Record) error {
+		defer mu.Unlock()
+		mu.Lock()
+		return h.Log(r)
+	})
+}
+
+// FileHandler returns a handler which writes log records to the give file
+// using the given format. If the path
+// already exists, FileHandler will append to the given file. If it does not,
+// FileHandler will create the file with mode 0644.
+func FileHandler(path string, fmtr Format) (Handler, error) {
+	f, err := os.OpenFile(path, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0644)
+	if err != nil {
+		return nil, err
+	}
+	return closingHandler{f, StreamHandler(f, fmtr)}, nil
+}
+
+// NetHandler opens a socket to the given address and writes records
+// over the connection.
+func NetHandler(network, addr string, fmtr Format) (Handler, error) {
+	conn, err := net.Dial(network, addr)
+	if err != nil {
+		return nil, err
+	}
+
+	return closingHandler{conn, StreamHandler(conn, fmtr)}, nil
+}
+
+// XXX: closingHandler is essentially unused at the moment
+// it's meant for a future time when the Handler interface supports
+// a possible Close() operation
+type closingHandler struct {
+	io.WriteCloser
+	Handler
+}
+
+func (h *closingHandler) Close() error {
+	return h.WriteCloser.Close()
+}
+
+// CallerFileHandler returns a Handler that adds the line number and file of
+// the calling function to the context with key "caller".
+func CallerFileHandler(h Handler) Handler {
+	return FuncHandler(func(r *Record) error {
+		r.Ctx = append(r.Ctx, "caller", fmt.Sprint(r.Call))
+		return h.Log(r)
+	})
+}
+
+// CallerFuncHandler returns a Handler that adds the calling function name to
+// the context with key "fn".
+func CallerFuncHandler(h Handler) Handler {
+	return FuncHandler(func(r *Record) error {
+		r.Ctx = append(r.Ctx, "fn", fmt.Sprintf("%+n", r.Call))
+		return h.Log(r)
+	})
+}
+
+// CallerStackHandler returns a Handler that adds a stack trace to the context
+// with key "stack". The stack trace is formated as a space separated list of
+// call sites inside matching []'s. The most recent call site is listed first.
+// Each call site is formatted according to format. See the documentation of
+// package github.com/go-stack/stack for the list of supported formats.
+func CallerStackHandler(format string, h Handler) Handler {
+	return FuncHandler(func(r *Record) error {
+		s := stack.Trace().TrimBelow(r.Call).TrimRuntime()
+		if len(s) > 0 {
+			r.Ctx = append(r.Ctx, "stack", fmt.Sprintf(format, s))
+		}
+		return h.Log(r)
+	})
+}
+
+// FilterHandler returns a Handler that only writes records to the
+// wrapped Handler if the given function evaluates true. For example,
+// to only log records where the 'err' key is not nil:
+//
+//    logger.SetHandler(FilterHandler(func(r *Record) bool {
+//        for i := 0; i < len(r.Ctx); i += 2 {
+//            if r.Ctx[i] == "err" {
+//                return r.Ctx[i+1] != nil
+//            }
+//        }
+//        return false
+//    }, h))
+//
+func FilterHandler(fn func(r *Record) bool, h Handler) Handler {
+	return FuncHandler(func(r *Record) error {
+		if fn(r) {
+			return h.Log(r)
+		}
+		return nil
+	})
+}
+
+// MatchFilterHandler returns a Handler that only writes records
+// to the wrapped Handler if the given key in the logged
+// context matches the value. For example, to only log records
+// from your ui package:
+//
+//    log.MatchFilterHandler("pkg", "app/ui", log.StdoutHandler)
+//
+func MatchFilterHandler(key string, value interface{}, h Handler) Handler {
+	return FilterHandler(func(r *Record) (pass bool) {
+		switch key {
+		case r.KeyNames.Lvl:
+			return r.Lvl == value
+		case r.KeyNames.Time:
+			return r.Time == value
+		case r.KeyNames.Msg:
+			return r.Msg == value
+		}
+
+		for i := 0; i < len(r.Ctx); i += 2 {
+			if r.Ctx[i] == key {
+				return r.Ctx[i+1] == value
+			}
+		}
+		return false
+	}, h)
+}
+
+// LvlFilterHandler returns a Handler that only writes
+// records which are less than the given verbosity
+// level to the wrapped Handler. For example, to only
+// log Error/Crit records:
+//
+//     log.LvlFilterHandler(log.Error, log.StdoutHandler)
+//
+func LvlFilterHandler(maxLvl Lvl, h Handler) Handler {
+	return FilterHandler(func(r *Record) (pass bool) {
+		return r.Lvl <= maxLvl
+	}, h)
+}
+
+// A MultiHandler dispatches any write to each of its handlers.
+// This is useful for writing different types of log information
+// to different locations. For example, to log to a file and
+// standard error:
+//
+//     log.MultiHandler(
+//         log.Must.FileHandler("/var/log/app.log", log.LogfmtFormat()),
+//         log.StderrHandler)
+//
+func MultiHandler(hs ...Handler) Handler {
+	return FuncHandler(func(r *Record) error {
+		for _, h := range hs {
+			// what to do about failures?
+			h.Log(r)
+		}
+		return nil
+	})
+}
+
+// A FailoverHandler writes all log records to the first handler
+// specified, but will failover and write to the second handler if
+// the first handler has failed, and so on for all handlers specified.
+// For example you might want to log to a network socket, but failover
+// to writing to a file if the network fails, and then to
+// standard out if the file write fails:
+//
+//     log.FailoverHandler(
+//         log.Must.NetHandler("tcp", ":9090", log.JsonFormat()),
+//         log.Must.FileHandler("/var/log/app.log", log.LogfmtFormat()),
+//         log.StdoutHandler)
+//
+// All writes that do not go to the first handler will add context with keys of
+// the form "failover_err_{idx}" which explain the error encountered while
+// trying to write to the handlers before them in the list.
+func FailoverHandler(hs ...Handler) Handler {
+	return FuncHandler(func(r *Record) error {
+		var err error
+		for i, h := range hs {
+			err = h.Log(r)
+			if err == nil {
+				return nil
+			} else {
+				r.Ctx = append(r.Ctx, fmt.Sprintf("failover_err_%d", i), err)
+			}
+		}
+
+		return err
+	})
+}
+
+// ChannelHandler writes all records to the given channel.
+// It blocks if the channel is full. Useful for async processing
+// of log messages, it's used by BufferedHandler.
+func ChannelHandler(recs chan<- *Record) Handler {
+	return FuncHandler(func(r *Record) error {
+		recs <- r
+		return nil
+	})
+}
+
+// BufferedHandler writes all records to a buffered
+// channel of the given size which flushes into the wrapped
+// handler whenever it is available for writing. Since these
+// writes happen asynchronously, all writes to a BufferedHandler
+// never return an error and any errors from the wrapped handler are ignored.
+func BufferedHandler(bufSize int, h Handler) Handler {
+	recs := make(chan *Record, bufSize)
+	go func() {
+		for m := range recs {
+			_ = h.Log(m)
+		}
+	}()
+	return ChannelHandler(recs)
+}
+
+// LazyHandler writes all values to the wrapped handler after evaluating
+// any lazy functions in the record's context. It is already wrapped
+// around StreamHandler and SyslogHandler in this library, you'll only need
+// it if you write your own Handler.
+func LazyHandler(h Handler) Handler {
+	return FuncHandler(func(r *Record) error {
+		// go through the values (odd indices) and reassign
+		// the values of any lazy fn to the result of its execution
+		hadErr := false
+		for i := 1; i < len(r.Ctx); i += 2 {
+			lz, ok := r.Ctx[i].(Lazy)
+			if ok {
+				v, err := evaluateLazy(lz)
+				if err != nil {
+					hadErr = true
+					r.Ctx[i] = err
+				} else {
+					if cs, ok := v.(stack.CallStack); ok {
+						v = cs.TrimBelow(r.Call).TrimRuntime()
+					}
+					r.Ctx[i] = v
+				}
+			}
+		}
+
+		if hadErr {
+			r.Ctx = append(r.Ctx, errorKey, "bad lazy")
+		}
+
+		return h.Log(r)
+	})
+}
+
+func evaluateLazy(lz Lazy) (interface{}, error) {
+	t := reflect.TypeOf(lz.Fn)
+
+	if t.Kind() != reflect.Func {
+		return nil, fmt.Errorf("INVALID_LAZY, not func: %+v", lz.Fn)
+	}
+
+	if t.NumIn() > 0 {
+		return nil, fmt.Errorf("INVALID_LAZY, func takes args: %+v", lz.Fn)
+	}
+
+	if t.NumOut() == 0 {
+		return nil, fmt.Errorf("INVALID_LAZY, no func return val: %+v", lz.Fn)
+	}
+
+	value := reflect.ValueOf(lz.Fn)
+	results := value.Call([]reflect.Value{})
+	if len(results) == 1 {
+		return results[0].Interface(), nil
+	} else {
+		values := make([]interface{}, len(results))
+		for i, v := range results {
+			values[i] = v.Interface()
+		}
+		return values, nil
+	}
+}
+
+// DiscardHandler reports success for all writes but does nothing.
+// It is useful for dynamically disabling logging at runtime via
+// a Logger's SetHandler method.
+func DiscardHandler() Handler {
+	return FuncHandler(func(r *Record) error {
+		return nil
+	})
+}
+
+// The Must object provides the following Handler creation functions
+// which instead of returning an error parameter only return a Handler
+// and panic on failure: FileHandler, NetHandler, SyslogHandler, SyslogNetHandler
+var Must muster
+
+func must(h Handler, err error) Handler {
+	if err != nil {
+		panic(err)
+	}
+	return h
+}
+
+type muster struct{}
+
+func (m muster) FileHandler(path string, fmtr Format) Handler {
+	return must(FileHandler(path, fmtr))
+}
+
+func (m muster) NetHandler(network, addr string, fmtr Format) Handler {
+	return must(NetHandler(network, addr, fmtr))
+}

+ 26 - 0
Godeps/_workspace/src/github.com/inconshreveable/log15/handler_go13.go

@@ -0,0 +1,26 @@
+// +build !go1.4
+
+package log15
+
+import (
+	"sync/atomic"
+	"unsafe"
+)
+
+// swapHandler wraps another handler that may be swapped out
+// dynamically at runtime in a thread-safe fashion.
+type swapHandler struct {
+	handler unsafe.Pointer
+}
+
+func (h *swapHandler) Log(r *Record) error {
+	return h.Get().Log(r)
+}
+
+func (h *swapHandler) Get() Handler {
+	return *(*Handler)(atomic.LoadPointer(&h.handler))
+}
+
+func (h *swapHandler) Swap(newHandler Handler) {
+	atomic.StorePointer(&h.handler, unsafe.Pointer(&newHandler))
+}

+ 23 - 0
Godeps/_workspace/src/github.com/inconshreveable/log15/handler_go14.go

@@ -0,0 +1,23 @@
+// +build go1.4
+
+package log15
+
+import "sync/atomic"
+
+// swapHandler wraps another handler that may be swapped out
+// dynamically at runtime in a thread-safe fashion.
+type swapHandler struct {
+	handler atomic.Value
+}
+
+func (h *swapHandler) Log(r *Record) error {
+	return (*h.handler.Load().(*Handler)).Log(r)
+}
+
+func (h *swapHandler) Swap(newHandler Handler) {
+	h.handler.Store(&newHandler)
+}
+
+func (h *swapHandler) Get() Handler {
+	return *h.handler.Load().(*Handler)
+}

+ 208 - 0
Godeps/_workspace/src/github.com/inconshreveable/log15/logger.go

@@ -0,0 +1,208 @@
+package log15
+
+import (
+	"fmt"
+	"time"
+
+	"github.com/go-stack/stack"
+)
+
+const timeKey = "t"
+const lvlKey = "lvl"
+const msgKey = "msg"
+const errorKey = "LOG15_ERROR"
+
+type Lvl int
+
+const (
+	LvlCrit Lvl = iota
+	LvlError
+	LvlWarn
+	LvlInfo
+	LvlDebug
+)
+
+// Returns the name of a Lvl
+func (l Lvl) String() string {
+	switch l {
+	case LvlDebug:
+		return "dbug"
+	case LvlInfo:
+		return "info"
+	case LvlWarn:
+		return "warn"
+	case LvlError:
+		return "eror"
+	case LvlCrit:
+		return "crit"
+	default:
+		panic("bad level")
+	}
+}
+
+// Returns the appropriate Lvl from a string name.
+// Useful for parsing command line args and configuration files.
+func LvlFromString(lvlString string) (Lvl, error) {
+	switch lvlString {
+	case "debug", "dbug":
+		return LvlDebug, nil
+	case "info":
+		return LvlInfo, nil
+	case "warn":
+		return LvlWarn, nil
+	case "error", "eror":
+		return LvlError, nil
+	case "crit":
+		return LvlCrit, nil
+	default:
+		return LvlDebug, fmt.Errorf("Unknown level: %v", lvlString)
+	}
+}
+
+// A Record is what a Logger asks its handler to write
+type Record struct {
+	Time     time.Time
+	Lvl      Lvl
+	Msg      string
+	Ctx      []interface{}
+	Call     stack.Call
+	KeyNames RecordKeyNames
+}
+
+type RecordKeyNames struct {
+	Time string
+	Msg  string
+	Lvl  string
+}
+
+// A Logger writes key/value pairs to a Handler
+type Logger interface {
+	// New returns a new Logger that has this logger's context plus the given context
+	New(ctx ...interface{}) Logger
+
+	// GetHandler gets the handler associated with the logger.
+	GetHandler() Handler
+
+	// SetHandler updates the logger to write records to the specified handler.
+	SetHandler(h Handler)
+
+	// Log a message at the given level with context key/value pairs
+	Debug(msg string, ctx ...interface{})
+	Info(msg string, ctx ...interface{})
+	Warn(msg string, ctx ...interface{})
+	Error(msg string, ctx ...interface{})
+	Crit(msg string, ctx ...interface{})
+}
+
+type logger struct {
+	ctx []interface{}
+	h   *swapHandler
+}
+
+func (l *logger) write(msg string, lvl Lvl, ctx []interface{}) {
+	l.h.Log(&Record{
+		Time: time.Now(),
+		Lvl:  lvl,
+		Msg:  msg,
+		Ctx:  newContext(l.ctx, ctx),
+		Call: stack.Caller(2),
+		KeyNames: RecordKeyNames{
+			Time: timeKey,
+			Msg:  msgKey,
+			Lvl:  lvlKey,
+		},
+	})
+}
+
+func (l *logger) New(ctx ...interface{}) Logger {
+	child := &logger{newContext(l.ctx, ctx), new(swapHandler)}
+	child.SetHandler(l.h)
+	return child
+}
+
+func newContext(prefix []interface{}, suffix []interface{}) []interface{} {
+	normalizedSuffix := normalize(suffix)
+	newCtx := make([]interface{}, len(prefix)+len(normalizedSuffix))
+	n := copy(newCtx, prefix)
+	copy(newCtx[n:], normalizedSuffix)
+	return newCtx
+}
+
+func (l *logger) Debug(msg string, ctx ...interface{}) {
+	l.write(msg, LvlDebug, ctx)
+}
+
+func (l *logger) Info(msg string, ctx ...interface{}) {
+	l.write(msg, LvlInfo, ctx)
+}
+
+func (l *logger) Warn(msg string, ctx ...interface{}) {
+	l.write(msg, LvlWarn, ctx)
+}
+
+func (l *logger) Error(msg string, ctx ...interface{}) {
+	l.write(msg, LvlError, ctx)
+}
+
+func (l *logger) Crit(msg string, ctx ...interface{}) {
+	l.write(msg, LvlCrit, ctx)
+}
+
+func (l *logger) GetHandler() Handler {
+	return l.h.Get()
+}
+
+func (l *logger) SetHandler(h Handler) {
+	l.h.Swap(h)
+}
+
+func normalize(ctx []interface{}) []interface{} {
+	// if the caller passed a Ctx object, then expand it
+	if len(ctx) == 1 {
+		if ctxMap, ok := ctx[0].(Ctx); ok {
+			ctx = ctxMap.toArray()
+		}
+	}
+
+	// ctx needs to be even because it's a series of key/value pairs
+	// no one wants to check for errors on logging functions,
+	// so instead of erroring on bad input, we'll just make sure
+	// that things are the right length and users can fix bugs
+	// when they see the output looks wrong
+	if len(ctx)%2 != 0 {
+		ctx = append(ctx, nil, errorKey, "Normalized odd number of arguments by adding nil")
+	}
+
+	return ctx
+}
+
+// Lazy allows you to defer calculation of a logged value that is expensive
+// to compute until it is certain that it must be evaluated with the given filters.
+//
+// Lazy may also be used in conjunction with a Logger's New() function
+// to generate a child logger which always reports the current value of changing
+// state.
+//
+// You may wrap any function which takes no arguments to Lazy. It may return any
+// number of values of any type.
+type Lazy struct {
+	Fn interface{}
+}
+
+// Ctx is a map of key/value pairs to pass as context to a log function
+// Use this only if you really need greater safety around the arguments you pass
+// to the logging functions.
+type Ctx map[string]interface{}
+
+func (c Ctx) toArray() []interface{} {
+	arr := make([]interface{}, len(c)*2)
+
+	i := 0
+	for k, v := range c {
+		arr[i] = k
+		arr[i+1] = v
+		i += 2
+	}
+
+	return arr
+}

+ 67 - 0
Godeps/_workspace/src/github.com/inconshreveable/log15/root.go

@@ -0,0 +1,67 @@
+package log15
+
+import (
+	"os"
+
+	"github.com/inconshreveable/log15/term"
+	"github.com/mattn/go-colorable"
+)
+
+var (
+	root          *logger
+	StdoutHandler = StreamHandler(os.Stdout, LogfmtFormat())
+	StderrHandler = StreamHandler(os.Stderr, LogfmtFormat())
+)
+
+func init() {
+	if term.IsTty(os.Stdout.Fd()) {
+		StdoutHandler = StreamHandler(colorable.NewColorableStdout(), TerminalFormat())
+	}
+
+	if term.IsTty(os.Stderr.Fd()) {
+		StderrHandler = StreamHandler(colorable.NewColorableStderr(), TerminalFormat())
+	}
+
+	root = &logger{[]interface{}{}, new(swapHandler)}
+	root.SetHandler(StdoutHandler)
+}
+
+// New returns a new logger with the given context.
+// New is a convenient alias for Root().New
+func New(ctx ...interface{}) Logger {
+	return root.New(ctx...)
+}
+
+// Root returns the root logger
+func Root() Logger {
+	return root
+}
+
+// The following functions bypass the exported logger methods (logger.Debug,
+// etc.) to keep the call depth the same for all paths to logger.write so
+// runtime.Caller(2) always refers to the call site in client code.
+
+// Debug is a convenient alias for Root().Debug
+func Debug(msg string, ctx ...interface{}) {
+	root.write(msg, LvlDebug, ctx)
+}
+
+// Info is a convenient alias for Root().Info
+func Info(msg string, ctx ...interface{}) {
+	root.write(msg, LvlInfo, ctx)
+}
+
+// Warn is a convenient alias for Root().Warn
+func Warn(msg string, ctx ...interface{}) {
+	root.write(msg, LvlWarn, ctx)
+}
+
+// Error is a convenient alias for Root().Error
+func Error(msg string, ctx ...interface{}) {
+	root.write(msg, LvlError, ctx)
+}
+
+// Crit is a convenient alias for Root().Crit
+func Crit(msg string, ctx ...interface{}) {
+	root.write(msg, LvlCrit, ctx)
+}

+ 55 - 0
Godeps/_workspace/src/github.com/inconshreveable/log15/syslog.go

@@ -0,0 +1,55 @@
+// +build !windows,!plan9
+
+package log15
+
+import (
+	"log/syslog"
+	"strings"
+)
+
+// SyslogHandler opens a connection to the system syslog daemon by calling
+// syslog.New and writes all records to it.
+func SyslogHandler(priority syslog.Priority, tag string, fmtr Format) (Handler, error) {
+	wr, err := syslog.New(priority, tag)
+	return sharedSyslog(fmtr, wr, err)
+}
+
+// SyslogHandler opens a connection to a log daemon over the network and writes
+// all log records to it.
+func SyslogNetHandler(net, addr string, priority syslog.Priority, tag string, fmtr Format) (Handler, error) {
+	wr, err := syslog.Dial(net, addr, priority, tag)
+	return sharedSyslog(fmtr, wr, err)
+}
+
+func sharedSyslog(fmtr Format, sysWr *syslog.Writer, err error) (Handler, error) {
+	if err != nil {
+		return nil, err
+	}
+	h := FuncHandler(func(r *Record) error {
+		var syslogFn = sysWr.Info
+		switch r.Lvl {
+		case LvlCrit:
+			syslogFn = sysWr.Crit
+		case LvlError:
+			syslogFn = sysWr.Err
+		case LvlWarn:
+			syslogFn = sysWr.Warning
+		case LvlInfo:
+			syslogFn = sysWr.Info
+		case LvlDebug:
+			syslogFn = sysWr.Debug
+		}
+
+		s := strings.TrimSpace(string(fmtr.Format(r)))
+		return syslogFn(s)
+	})
+	return LazyHandler(&closingHandler{sysWr, h}), nil
+}
+
+func (m muster) SyslogHandler(priority syslog.Priority, tag string, fmtr Format) Handler {
+	return must(SyslogHandler(priority, tag, fmtr))
+}
+
+func (m muster) SyslogNetHandler(net, addr string, priority syslog.Priority, tag string, fmtr Format) Handler {
+	return must(SyslogNetHandler(net, addr, priority, tag, fmtr))
+}

+ 21 - 0
Godeps/_workspace/src/github.com/inconshreveable/log15/term/LICENSE

@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Simon Eskildsen
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.

+ 13 - 0
Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_appengine.go

@@ -0,0 +1,13 @@
+// Based on ssh/terminal:
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build appengine
+
+package term
+
+// IsTty always returns false on AppEngine.
+func IsTty(fd uintptr) bool {
+	return false
+}

+ 12 - 0
Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_darwin.go

@@ -0,0 +1,12 @@
+// Based on ssh/terminal:
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package term
+
+import "syscall"
+
+const ioctlReadTermios = syscall.TIOCGETA
+
+type Termios syscall.Termios

+ 18 - 0
Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_freebsd.go

@@ -0,0 +1,18 @@
+package term
+
+import (
+	"syscall"
+)
+
+const ioctlReadTermios = syscall.TIOCGETA
+
+// Go 1.2 doesn't include Termios for FreeBSD. This should be added in 1.3 and this could be merged with terminal_darwin.
+type Termios struct {
+	Iflag  uint32
+	Oflag  uint32
+	Cflag  uint32
+	Lflag  uint32
+	Cc     [20]uint8
+	Ispeed uint32
+	Ospeed uint32
+}

+ 14 - 0
Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_linux.go

@@ -0,0 +1,14 @@
+// Based on ssh/terminal:
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+
+package term
+
+import "syscall"
+
+const ioctlReadTermios = syscall.TCGETS
+
+type Termios syscall.Termios

+ 20 - 0
Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_notwindows.go

@@ -0,0 +1,20 @@
+// Based on ssh/terminal:
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux,!appengine darwin freebsd openbsd
+
+package term
+
+import (
+	"syscall"
+	"unsafe"
+)
+
+// IsTty returns true if the given file descriptor is a terminal.
+func IsTty(fd uintptr) bool {
+	var termios Termios
+	_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
+	return err == 0
+}

+ 7 - 0
Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_openbsd.go

@@ -0,0 +1,7 @@
+package term
+
+import "syscall"
+
+const ioctlReadTermios = syscall.TIOCGETA
+
+type Termios syscall.Termios

+ 26 - 0
Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_windows.go

@@ -0,0 +1,26 @@
+// Based on ssh/terminal:
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build windows
+
+package term
+
+import (
+	"syscall"
+	"unsafe"
+)
+
+var kernel32 = syscall.NewLazyDLL("kernel32.dll")
+
+var (
+	procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
+)
+
+// IsTty returns true if the given file descriptor is a terminal.
+func IsTty(fd uintptr) bool {
+	var st uint32
+	r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0)
+	return r != 0 && e == 0
+}

+ 0 - 267
Godeps/_workspace/src/github.com/influxdata/influxdb/client/README.md

@@ -1,267 +0,0 @@
-# InfluxDB Client
-
-[![GoDoc](https://godoc.org/github.com/influxdata/influxdb?status.svg)](http://godoc.org/github.com/influxdata/influxdb/client/v2)
-
-## Description
-
-**NOTE:** The Go client library now has a "v2" version, with the old version
-being deprecated. The new version can be imported at
-`import "github.com/influxdata/influxdb/client/v2"`. It is not backwards-compatible.
-
-A Go client library written and maintained by the **InfluxDB** team.
-This package provides convenience functions to read and write time series data.
-It uses the HTTP protocol to communicate with your **InfluxDB** cluster.
-
-
-## Getting Started
-
-### Connecting To Your Database
-
-Connecting to an **InfluxDB** database is straightforward. You will need a host
-name, a port and the cluster user credentials if applicable. The default port is
-8086. You can customize these settings to your specific installation via the
-**InfluxDB** configuration file.
-
-Though not necessary for experimentation, you may want to create a new user
-and authenticate the connection to your database.
-
-For more information please check out the
-[Admin Docs](https://docs.influxdata.com/influxdb/latest/administration/).
-
-For the impatient, you can create a new admin user _bubba_ by firing off the
-[InfluxDB CLI](https://github.com/influxdata/influxdb/blob/master/cmd/influx/main.go).
-
-```shell
-influx
-> create user bubba with password 'bumblebeetuna'
-> grant all privileges to bubba
-```
-
-And now for good measure set the credentials in you shell environment.
-In the example below we will use $INFLUX_USER and $INFLUX_PWD
-
-Now with the administrivia out of the way, let's connect to our database.
-
-NOTE: If you've opted out of creating a user, you can omit Username and Password in
-the configuration below.
-
-```go
-package main
-
-import (
-	"log"
-	"time"
-
-	"github.com/influxdata/influxdb/client/v2"
-)
-
-const (
-	MyDB = "square_holes"
-	username = "bubba"
-	password = "bumblebeetuna"
-)
-
-func main() {
-	// Make client
-	c, err := client.NewHTTPClient(client.HTTPConfig{
-		Addr: "http://localhost:8086",
-		Username: username,
-		Password: password,
-	})
-
-	if err != nil {
-	    log.Fatalln("Error: ", err)
-	}
-
-	// Create a new point batch
-	bp, err := client.NewBatchPoints(client.BatchPointsConfig{
-		Database:  MyDB,
-		Precision: "s",
-	})
-
-	if err != nil {
-	    log.Fatalln("Error: ", err)
-	}
-
-	// Create a point and add to batch
-	tags := map[string]string{"cpu": "cpu-total"}
-	fields := map[string]interface{}{
-		"idle":   10.1,
-		"system": 53.3,
-		"user":   46.6,
-	}
-	pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now())
-
-	if err != nil {
-	    log.Fatalln("Error: ", err)
-	}
-
-	bp.AddPoint(pt)
-
-	// Write the batch
-	c.Write(bp)
-}
-
-```
-
-### Inserting Data
-
-Time series data aka *points* are written to the database using batch inserts.
-The mechanism is to create one or more points and then create a batch aka
-*batch points* and write these to a given database and series. A series is a
-combination of a measurement (time/values) and a set of tags.
-
-In this sample we will create a batch of a 1,000 points. Each point has a time and
-a single value as well as 2 tags indicating a shape and color. We write these points
-to a database called _square_holes_ using a measurement named _shapes_.
-
-NOTE: You can specify a RetentionPolicy as part of the batch points. If not
-provided InfluxDB will use the database _default_ retention policy.
-
-```go
-func writePoints(clnt client.Client) {
-	sampleSize := 1000
-	rand.Seed(42)
-
-	bp, _ := client.NewBatchPoints(client.BatchPointsConfig{
-		Database:  "systemstats",
-		Precision: "us",
-	})
-
-	for i := 0; i < sampleSize; i++ {
-		regions := []string{"us-west1", "us-west2", "us-west3", "us-east1"}
-		tags := map[string]string{
-			"cpu":    "cpu-total",
-			"host":   fmt.Sprintf("host%d", rand.Intn(1000)),
-			"region": regions[rand.Intn(len(regions))],
-		}
-
-		idle := rand.Float64() * 100.0
-		fields := map[string]interface{}{
-			"idle": idle,
-			"busy": 100.0 - idle,
-		}
-
-		bp.AddPoint(client.NewPoint(
-			"cpu_usage",
-			tags,
-			fields,
-			time.Now(),
-		))
-	}
-
-	err := clnt.Write(bp)
-	if err != nil {
-		log.Fatal(err)
-	}
-}
-```
-
-
-### Querying Data
-
-One nice advantage of using **InfluxDB** the ability to query your data using familiar
-SQL constructs. In this example we can create a convenience function to query the database
-as follows:
-
-```go
-// queryDB convenience function to query the database
-func queryDB(clnt client.Client, cmd string) (res []client.Result, err error) {
-	q := client.Query{
-		Command:  cmd,
-		Database: MyDB,
-	}
-	if response, err := clnt.Query(q); err == nil {
-		if response.Error() != nil {
-			return res, response.Error()
-		}
-		res = response.Results
-	} else {
-		return res, err
-	}
-	return res, nil
-}
-```
-
-#### Creating a Database
-
-```go
-_, err := queryDB(clnt, fmt.Sprintf("CREATE DATABASE %s", MyDB))
-if err != nil {
-	log.Fatal(err)
-}
-```
-
-#### Count Records
-
-```go
-q := fmt.Sprintf("SELECT count(%s) FROM %s", "value", MyMeasurement)
-res, err := queryDB(clnt, q)
-if err != nil {
-	log.Fatal(err)
-}
-count := res[0].Series[0].Values[0][1]
-log.Printf("Found a total of %v records\n", count)
-```
-
-#### Find the last 10 _shapes_ records
-
-```go
-q := fmt.Sprintf("SELECT * FROM %s LIMIT %d", MyMeasurement, 20)
-res, err = queryDB(clnt, q)
-if err != nil {
-	log.Fatal(err)
-}
-
-for i, row := range res[0].Series[0].Values {
-	t, err := time.Parse(time.RFC3339, row[0].(string))
-	if err != nil {
-		log.Fatal(err)
-	}
-	val := row[1].(string)
-	log.Printf("[%2d] %s: %s\n", i, t.Format(time.Stamp), val)
-}
-```
-
-### Using the UDP Client
-
-The **InfluxDB** client also supports writing over UDP.
-
-```go
-func WriteUDP() {
-	// Make client
-	c := client.NewUDPClient("localhost:8089")
-
-	// Create a new point batch
-	bp, _ := client.NewBatchPoints(client.BatchPointsConfig{
-		Precision: "s",
-	})
-
-	// Create a point and add to batch
-	tags := map[string]string{"cpu": "cpu-total"}
-	fields := map[string]interface{}{
-		"idle":   10.1,
-		"system": 53.3,
-		"user":   46.6,
-	}
-	pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now())
-	if err != nil {
-		panic(err.Error())
-	}
-	bp.AddPoint(pt)
-
-	// Write the batch
-	c.Write(bp)
-}
-```
-
-## Go Docs
-
-Please refer to
-[http://godoc.org/github.com/influxdata/influxdb/client/v2](http://godoc.org/github.com/influxdata/influxdb/client/v2)
-for documentation.
-
-## See Also
-
-You can also examine how the client library is used by the
-[InfluxDB CLI](https://github.com/influxdata/influxdb/blob/master/cmd/influx/main.go).

+ 0 - 789
Godeps/_workspace/src/github.com/influxdata/influxdb/client/influxdb.go

@@ -1,789 +0,0 @@
-package client
-
-import (
-	"bytes"
-	"crypto/tls"
-	"encoding/json"
-	"errors"
-	"fmt"
-	"io"
-	"io/ioutil"
-	"net"
-	"net/http"
-	"net/url"
-	"strconv"
-	"strings"
-	"time"
-
-	"github.com/influxdata/influxdb/models"
-)
-
-const (
-	// DefaultHost is the default host used to connect to an InfluxDB instance
-	DefaultHost = "localhost"
-
-	// DefaultPort is the default port used to connect to an InfluxDB instance
-	DefaultPort = 8086
-
-	// DefaultTimeout is the default connection timeout used to connect to an InfluxDB instance
-	DefaultTimeout = 0
-)
-
-// Query is used to send a command to the server. Both Command and Database are required.
-type Query struct {
-	Command  string
-	Database string
-
-	// Chunked tells the server to send back chunked responses. This places
-	// less load on the server by sending back chunks of the response rather
-	// than waiting for the entire response all at once.
-	Chunked bool
-
-	// ChunkSize sets the maximum number of rows that will be returned per
-	// chunk. Chunks are either divided based on their series or if they hit
-	// the chunk size limit.
-	//
-	// Chunked must be set to true for this option to be used.
-	ChunkSize int
-}
-
-// ParseConnectionString will parse a string to create a valid connection URL
-func ParseConnectionString(path string, ssl bool) (url.URL, error) {
-	var host string
-	var port int
-
-	h, p, err := net.SplitHostPort(path)
-	if err != nil {
-		if path == "" {
-			host = DefaultHost
-		} else {
-			host = path
-		}
-		// If they didn't specify a port, always use the default port
-		port = DefaultPort
-	} else {
-		host = h
-		port, err = strconv.Atoi(p)
-		if err != nil {
-			return url.URL{}, fmt.Errorf("invalid port number %q: %s\n", path, err)
-		}
-	}
-
-	u := url.URL{
-		Scheme: "http",
-	}
-	if ssl {
-		u.Scheme = "https"
-	}
-
-	u.Host = net.JoinHostPort(host, strconv.Itoa(port))
-
-	return u, nil
-}
-
-// Config is used to specify what server to connect to.
-// URL: The URL of the server connecting to.
-// Username/Password are optional. They will be passed via basic auth if provided.
-// UserAgent: If not provided, will default "InfluxDBClient",
-// Timeout: If not provided, will default to 0 (no timeout)
-type Config struct {
-	URL       url.URL
-	Username  string
-	Password  string
-	UserAgent string
-	Timeout   time.Duration
-	Precision string
-	UnsafeSsl bool
-}
-
-// NewConfig will create a config to be used in connecting to the client
-func NewConfig() Config {
-	return Config{
-		Timeout: DefaultTimeout,
-	}
-}
-
-// Client is used to make calls to the server.
-type Client struct {
-	url        url.URL
-	username   string
-	password   string
-	httpClient *http.Client
-	userAgent  string
-	precision  string
-}
-
-const (
-	// ConsistencyOne requires at least one data node acknowledged a write.
-	ConsistencyOne = "one"
-
-	// ConsistencyAll requires all data nodes to acknowledge a write.
-	ConsistencyAll = "all"
-
-	// ConsistencyQuorum requires a quorum of data nodes to acknowledge a write.
-	ConsistencyQuorum = "quorum"
-
-	// ConsistencyAny allows for hinted hand off, potentially no write happened yet.
-	ConsistencyAny = "any"
-)
-
-// NewClient will instantiate and return a connected client to issue commands to the server.
-func NewClient(c Config) (*Client, error) {
-	tlsConfig := &tls.Config{
-		InsecureSkipVerify: c.UnsafeSsl,
-	}
-
-	tr := &http.Transport{
-		TLSClientConfig: tlsConfig,
-	}
-
-	client := Client{
-		url:        c.URL,
-		username:   c.Username,
-		password:   c.Password,
-		httpClient: &http.Client{Timeout: c.Timeout, Transport: tr},
-		userAgent:  c.UserAgent,
-		precision:  c.Precision,
-	}
-	if client.userAgent == "" {
-		client.userAgent = "InfluxDBClient"
-	}
-	return &client, nil
-}
-
-// SetAuth will update the username and passwords
-func (c *Client) SetAuth(u, p string) {
-	c.username = u
-	c.password = p
-}
-
-// SetPrecision will update the precision
-func (c *Client) SetPrecision(precision string) {
-	c.precision = precision
-}
-
-// Query sends a command to the server and returns the Response
-func (c *Client) Query(q Query) (*Response, error) {
-	u := c.url
-
-	u.Path = "query"
-	values := u.Query()
-	values.Set("q", q.Command)
-	values.Set("db", q.Database)
-	if q.Chunked {
-		values.Set("chunked", "true")
-		if q.ChunkSize > 0 {
-			values.Set("chunk_size", strconv.Itoa(q.ChunkSize))
-		}
-	}
-	if c.precision != "" {
-		values.Set("epoch", c.precision)
-	}
-	u.RawQuery = values.Encode()
-
-	req, err := http.NewRequest("POST", u.String(), nil)
-	if err != nil {
-		return nil, err
-	}
-	req.Header.Set("User-Agent", c.userAgent)
-	if c.username != "" {
-		req.SetBasicAuth(c.username, c.password)
-	}
-
-	resp, err := c.httpClient.Do(req)
-	if err != nil {
-		return nil, err
-	}
-	defer resp.Body.Close()
-
-	var response Response
-	if q.Chunked {
-		cr := NewChunkedResponse(resp.Body)
-		for {
-			r, err := cr.NextResponse()
-			if err != nil {
-				// If we got an error while decoding the response, send that back.
-				return nil, err
-			}
-
-			if r == nil {
-				break
-			}
-
-			response.Results = append(response.Results, r.Results...)
-			if r.Err != nil {
-				response.Err = r.Err
-				break
-			}
-		}
-	} else {
-		dec := json.NewDecoder(resp.Body)
-		dec.UseNumber()
-		if err := dec.Decode(&response); err != nil {
-			// Ignore EOF errors if we got an invalid status code.
-			if !(err == io.EOF && resp.StatusCode != http.StatusOK) {
-				return nil, err
-			}
-		}
-	}
-
-	// If we don't have an error in our json response, and didn't get StatusOK,
-	// then send back an error.
-	if resp.StatusCode != http.StatusOK && response.Error() == nil {
-		return &response, fmt.Errorf("received status code %d from server", resp.StatusCode)
-	}
-	return &response, nil
-}
-
-// Write takes BatchPoints and allows for writing of multiple points with defaults
-// If successful, error is nil and Response is nil
-// If an error occurs, Response may contain additional information if populated.
-func (c *Client) Write(bp BatchPoints) (*Response, error) {
-	u := c.url
-	u.Path = "write"
-
-	var b bytes.Buffer
-	for _, p := range bp.Points {
-		err := checkPointTypes(p)
-		if err != nil {
-			return nil, err
-		}
-		if p.Raw != "" {
-			if _, err := b.WriteString(p.Raw); err != nil {
-				return nil, err
-			}
-		} else {
-			for k, v := range bp.Tags {
-				if p.Tags == nil {
-					p.Tags = make(map[string]string, len(bp.Tags))
-				}
-				p.Tags[k] = v
-			}
-
-			if _, err := b.WriteString(p.MarshalString()); err != nil {
-				return nil, err
-			}
-		}
-
-		if err := b.WriteByte('\n'); err != nil {
-			return nil, err
-		}
-	}
-
-	req, err := http.NewRequest("POST", u.String(), &b)
-	if err != nil {
-		return nil, err
-	}
-	req.Header.Set("Content-Type", "")
-	req.Header.Set("User-Agent", c.userAgent)
-	if c.username != "" {
-		req.SetBasicAuth(c.username, c.password)
-	}
-
-	precision := bp.Precision
-	if precision == "" {
-		precision = c.precision
-	}
-
-	params := req.URL.Query()
-	params.Set("db", bp.Database)
-	params.Set("rp", bp.RetentionPolicy)
-	params.Set("precision", precision)
-	params.Set("consistency", bp.WriteConsistency)
-	req.URL.RawQuery = params.Encode()
-
-	resp, err := c.httpClient.Do(req)
-	if err != nil {
-		return nil, err
-	}
-	defer resp.Body.Close()
-
-	var response Response
-	body, err := ioutil.ReadAll(resp.Body)
-	if err != nil {
-		return nil, err
-	}
-
-	if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK {
-		var err = fmt.Errorf(string(body))
-		response.Err = err
-		return &response, err
-	}
-
-	return nil, nil
-}
-
-// WriteLineProtocol takes a string with line returns to delimit each write
-// If successful, error is nil and Response is nil
-// If an error occurs, Response may contain additional information if populated.
-func (c *Client) WriteLineProtocol(data, database, retentionPolicy, precision, writeConsistency string) (*Response, error) {
-	u := c.url
-	u.Path = "write"
-
-	r := strings.NewReader(data)
-
-	req, err := http.NewRequest("POST", u.String(), r)
-	if err != nil {
-		return nil, err
-	}
-	req.Header.Set("Content-Type", "")
-	req.Header.Set("User-Agent", c.userAgent)
-	if c.username != "" {
-		req.SetBasicAuth(c.username, c.password)
-	}
-	params := req.URL.Query()
-	params.Set("db", database)
-	params.Set("rp", retentionPolicy)
-	params.Set("precision", precision)
-	params.Set("consistency", writeConsistency)
-	req.URL.RawQuery = params.Encode()
-
-	resp, err := c.httpClient.Do(req)
-	if err != nil {
-		return nil, err
-	}
-	defer resp.Body.Close()
-
-	var response Response
-	body, err := ioutil.ReadAll(resp.Body)
-	if err != nil {
-		return nil, err
-	}
-
-	if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK {
-		err := fmt.Errorf(string(body))
-		response.Err = err
-		return &response, err
-	}
-
-	return nil, nil
-}
-
-// Ping will check to see if the server is up
-// Ping returns how long the request took, the version of the server it connected to, and an error if one occurred.
-func (c *Client) Ping() (time.Duration, string, error) {
-	now := time.Now()
-	u := c.url
-	u.Path = "ping"
-
-	req, err := http.NewRequest("GET", u.String(), nil)
-	if err != nil {
-		return 0, "", err
-	}
-	req.Header.Set("User-Agent", c.userAgent)
-	if c.username != "" {
-		req.SetBasicAuth(c.username, c.password)
-	}
-
-	resp, err := c.httpClient.Do(req)
-	if err != nil {
-		return 0, "", err
-	}
-	defer resp.Body.Close()
-
-	version := resp.Header.Get("X-Influxdb-Version")
-	return time.Since(now), version, nil
-}
-
-// Structs
-
-// Message represents a user message.
-type Message struct {
-	Level string `json:"level,omitempty"`
-	Text  string `json:"text,omitempty"`
-}
-
-// Result represents a resultset returned from a single statement.
-type Result struct {
-	Series   []models.Row
-	Messages []*Message
-	Err      error
-}
-
-// MarshalJSON encodes the result into JSON.
-func (r *Result) MarshalJSON() ([]byte, error) {
-	// Define a struct that outputs "error" as a string.
-	var o struct {
-		Series   []models.Row `json:"series,omitempty"`
-		Messages []*Message   `json:"messages,omitempty"`
-		Err      string       `json:"error,omitempty"`
-	}
-
-	// Copy fields to output struct.
-	o.Series = r.Series
-	o.Messages = r.Messages
-	if r.Err != nil {
-		o.Err = r.Err.Error()
-	}
-
-	return json.Marshal(&o)
-}
-
-// UnmarshalJSON decodes the data into the Result struct
-func (r *Result) UnmarshalJSON(b []byte) error {
-	var o struct {
-		Series   []models.Row `json:"series,omitempty"`
-		Messages []*Message   `json:"messages,omitempty"`
-		Err      string       `json:"error,omitempty"`
-	}
-
-	dec := json.NewDecoder(bytes.NewBuffer(b))
-	dec.UseNumber()
-	err := dec.Decode(&o)
-	if err != nil {
-		return err
-	}
-	r.Series = o.Series
-	r.Messages = o.Messages
-	if o.Err != "" {
-		r.Err = errors.New(o.Err)
-	}
-	return nil
-}
-
-// Response represents a list of statement results.
-type Response struct {
-	Results []Result
-	Err     error
-}
-
-// MarshalJSON encodes the response into JSON.
-func (r *Response) MarshalJSON() ([]byte, error) {
-	// Define a struct that outputs "error" as a string.
-	var o struct {
-		Results []Result `json:"results,omitempty"`
-		Err     string   `json:"error,omitempty"`
-	}
-
-	// Copy fields to output struct.
-	o.Results = r.Results
-	if r.Err != nil {
-		o.Err = r.Err.Error()
-	}
-
-	return json.Marshal(&o)
-}
-
-// UnmarshalJSON decodes the data into the Response struct
-func (r *Response) UnmarshalJSON(b []byte) error {
-	var o struct {
-		Results []Result `json:"results,omitempty"`
-		Err     string   `json:"error,omitempty"`
-	}
-
-	dec := json.NewDecoder(bytes.NewBuffer(b))
-	dec.UseNumber()
-	err := dec.Decode(&o)
-	if err != nil {
-		return err
-	}
-	r.Results = o.Results
-	if o.Err != "" {
-		r.Err = errors.New(o.Err)
-	}
-	return nil
-}
-
-// Error returns the first error from any statement.
-// Returns nil if no errors occurred on any statements.
-func (r *Response) Error() error {
-	if r.Err != nil {
-		return r.Err
-	}
-	for _, result := range r.Results {
-		if result.Err != nil {
-			return result.Err
-		}
-	}
-	return nil
-}
-
-// ChunkedResponse represents a response from the server that
-// uses chunking to stream the output.
-type ChunkedResponse struct {
-	dec *json.Decoder
-}
-
-// NewChunkedResponse reads a stream and produces responses from the stream.
-func NewChunkedResponse(r io.Reader) *ChunkedResponse {
-	dec := json.NewDecoder(r)
-	dec.UseNumber()
-	return &ChunkedResponse{dec: dec}
-}
-
-// NextResponse reads the next line of the stream and returns a response.
-func (r *ChunkedResponse) NextResponse() (*Response, error) {
-	var response Response
-	if err := r.dec.Decode(&response); err != nil {
-		if err == io.EOF {
-			return nil, nil
-		}
-		return nil, err
-	}
-	return &response, nil
-}
-
-// Point defines the fields that will be written to the database
-// Measurement, Time, and Fields are required
-// Precision can be specified if the time is in epoch format (integer).
-// Valid values for Precision are n, u, ms, s, m, and h
-type Point struct {
-	Measurement string
-	Tags        map[string]string
-	Time        time.Time
-	Fields      map[string]interface{}
-	Precision   string
-	Raw         string
-}
-
-// MarshalJSON will format the time in RFC3339Nano
-// Precision is also ignored as it is only used for writing, not reading
-// Or another way to say it is we always send back in nanosecond precision
-func (p *Point) MarshalJSON() ([]byte, error) {
-	point := struct {
-		Measurement string                 `json:"measurement,omitempty"`
-		Tags        map[string]string      `json:"tags,omitempty"`
-		Time        string                 `json:"time,omitempty"`
-		Fields      map[string]interface{} `json:"fields,omitempty"`
-		Precision   string                 `json:"precision,omitempty"`
-	}{
-		Measurement: p.Measurement,
-		Tags:        p.Tags,
-		Fields:      p.Fields,
-		Precision:   p.Precision,
-	}
-	// Let it omit empty if it's really zero
-	if !p.Time.IsZero() {
-		point.Time = p.Time.UTC().Format(time.RFC3339Nano)
-	}
-	return json.Marshal(&point)
-}
-
-// MarshalString renders string representation of a Point with specified
-// precision. The default precision is nanoseconds.
-func (p *Point) MarshalString() string {
-	pt, err := models.NewPoint(p.Measurement, p.Tags, p.Fields, p.Time)
-	if err != nil {
-		return "# ERROR: " + err.Error() + " " + p.Measurement
-	}
-	if p.Precision == "" || p.Precision == "ns" || p.Precision == "n" {
-		return pt.String()
-	}
-	return pt.PrecisionString(p.Precision)
-}
-
-// UnmarshalJSON decodes the data into the Point struct
-func (p *Point) UnmarshalJSON(b []byte) error {
-	var normal struct {
-		Measurement string                 `json:"measurement"`
-		Tags        map[string]string      `json:"tags"`
-		Time        time.Time              `json:"time"`
-		Precision   string                 `json:"precision"`
-		Fields      map[string]interface{} `json:"fields"`
-	}
-	var epoch struct {
-		Measurement string                 `json:"measurement"`
-		Tags        map[string]string      `json:"tags"`
-		Time        *int64                 `json:"time"`
-		Precision   string                 `json:"precision"`
-		Fields      map[string]interface{} `json:"fields"`
-	}
-
-	if err := func() error {
-		var err error
-		dec := json.NewDecoder(bytes.NewBuffer(b))
-		dec.UseNumber()
-		if err = dec.Decode(&epoch); err != nil {
-			return err
-		}
-		// Convert from epoch to time.Time, but only if Time
-		// was actually set.
-		var ts time.Time
-		if epoch.Time != nil {
-			ts, err = EpochToTime(*epoch.Time, epoch.Precision)
-			if err != nil {
-				return err
-			}
-		}
-		p.Measurement = epoch.Measurement
-		p.Tags = epoch.Tags
-		p.Time = ts
-		p.Precision = epoch.Precision
-		p.Fields = normalizeFields(epoch.Fields)
-		return nil
-	}(); err == nil {
-		return nil
-	}
-
-	dec := json.NewDecoder(bytes.NewBuffer(b))
-	dec.UseNumber()
-	if err := dec.Decode(&normal); err != nil {
-		return err
-	}
-	normal.Time = SetPrecision(normal.Time, normal.Precision)
-	p.Measurement = normal.Measurement
-	p.Tags = normal.Tags
-	p.Time = normal.Time
-	p.Precision = normal.Precision
-	p.Fields = normalizeFields(normal.Fields)
-
-	return nil
-}
-
-// Remove any notion of json.Number
-func normalizeFields(fields map[string]interface{}) map[string]interface{} {
-	newFields := map[string]interface{}{}
-
-	for k, v := range fields {
-		switch v := v.(type) {
-		case json.Number:
-			jv, e := v.Float64()
-			if e != nil {
-				panic(fmt.Sprintf("unable to convert json.Number to float64: %s", e))
-			}
-			newFields[k] = jv
-		default:
-			newFields[k] = v
-		}
-	}
-	return newFields
-}
-
-// BatchPoints is used to send batched data in a single write.
-// Database and Points are required
-// If no retention policy is specified, it will use the databases default retention policy.
-// If tags are specified, they will be "merged" with all points. If a point already has that tag, it will be ignored.
-// If time is specified, it will be applied to any point with an empty time.
-// Precision can be specified if the time is in epoch format (integer).
-// Valid values for Precision are n, u, ms, s, m, and h
-type BatchPoints struct {
-	Points           []Point           `json:"points,omitempty"`
-	Database         string            `json:"database,omitempty"`
-	RetentionPolicy  string            `json:"retentionPolicy,omitempty"`
-	Tags             map[string]string `json:"tags,omitempty"`
-	Time             time.Time         `json:"time,omitempty"`
-	Precision        string            `json:"precision,omitempty"`
-	WriteConsistency string            `json:"-"`
-}
-
-// UnmarshalJSON decodes the data into the BatchPoints struct
-func (bp *BatchPoints) UnmarshalJSON(b []byte) error {
-	var normal struct {
-		Points          []Point           `json:"points"`
-		Database        string            `json:"database"`
-		RetentionPolicy string            `json:"retentionPolicy"`
-		Tags            map[string]string `json:"tags"`
-		Time            time.Time         `json:"time"`
-		Precision       string            `json:"precision"`
-	}
-	var epoch struct {
-		Points          []Point           `json:"points"`
-		Database        string            `json:"database"`
-		RetentionPolicy string            `json:"retentionPolicy"`
-		Tags            map[string]string `json:"tags"`
-		Time            *int64            `json:"time"`
-		Precision       string            `json:"precision"`
-	}
-
-	if err := func() error {
-		var err error
-		if err = json.Unmarshal(b, &epoch); err != nil {
-			return err
-		}
-		// Convert from epoch to time.Time
-		var ts time.Time
-		if epoch.Time != nil {
-			ts, err = EpochToTime(*epoch.Time, epoch.Precision)
-			if err != nil {
-				return err
-			}
-		}
-		bp.Points = epoch.Points
-		bp.Database = epoch.Database
-		bp.RetentionPolicy = epoch.RetentionPolicy
-		bp.Tags = epoch.Tags
-		bp.Time = ts
-		bp.Precision = epoch.Precision
-		return nil
-	}(); err == nil {
-		return nil
-	}
-
-	if err := json.Unmarshal(b, &normal); err != nil {
-		return err
-	}
-	normal.Time = SetPrecision(normal.Time, normal.Precision)
-	bp.Points = normal.Points
-	bp.Database = normal.Database
-	bp.RetentionPolicy = normal.RetentionPolicy
-	bp.Tags = normal.Tags
-	bp.Time = normal.Time
-	bp.Precision = normal.Precision
-
-	return nil
-}
-
-// utility functions
-
-// Addr provides the current url as a string of the server the client is connected to.
-func (c *Client) Addr() string {
-	return c.url.String()
-}
-
-// checkPointTypes ensures no unsupported types are submitted to influxdb, returning error if they are found.
-func checkPointTypes(p Point) error {
-	for _, v := range p.Fields {
-		switch v.(type) {
-		case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, float32, float64, bool, string, nil:
-			return nil
-		default:
-			return fmt.Errorf("unsupported point type: %T", v)
-		}
-	}
-	return nil
-}
-
-// helper functions
-
-// EpochToTime takes a unix epoch time and uses precision to return back a time.Time
-func EpochToTime(epoch int64, precision string) (time.Time, error) {
-	if precision == "" {
-		precision = "s"
-	}
-	var t time.Time
-	switch precision {
-	case "h":
-		t = time.Unix(0, epoch*int64(time.Hour))
-	case "m":
-		t = time.Unix(0, epoch*int64(time.Minute))
-	case "s":
-		t = time.Unix(0, epoch*int64(time.Second))
-	case "ms":
-		t = time.Unix(0, epoch*int64(time.Millisecond))
-	case "u":
-		t = time.Unix(0, epoch*int64(time.Microsecond))
-	case "n":
-		t = time.Unix(0, epoch)
-	default:
-		return time.Time{}, fmt.Errorf("Unknown precision %q", precision)
-	}
-	return t, nil
-}
-
-// SetPrecision will round a time to the specified precision
-func SetPrecision(t time.Time, precision string) time.Time {
-	switch precision {
-	case "n":
-	case "u":
-		return t.Round(time.Microsecond)
-	case "ms":
-		return t.Round(time.Millisecond)
-	case "s":
-		return t.Round(time.Second)
-	case "m":
-		return t.Round(time.Minute)
-	case "h":
-		return t.Round(time.Hour)
-	}
-	return t
-}

+ 0 - 46
Godeps/_workspace/src/github.com/influxdata/influxdb/models/consistency.go

@@ -1,46 +0,0 @@
-package models
-
-import (
-	"errors"
-	"strings"
-)
-
-// ConsistencyLevel represent a required replication criteria before a write can
-// be returned as successful
-type ConsistencyLevel int
-
-const (
-	// ConsistencyLevelAny allows for hinted hand off, potentially no write happened yet
-	ConsistencyLevelAny ConsistencyLevel = iota
-
-	// ConsistencyLevelOne requires at least one data node acknowledged a write
-	ConsistencyLevelOne
-
-	// ConsistencyLevelQuorum requires a quorum of data nodes to acknowledge a write
-	ConsistencyLevelQuorum
-
-	// ConsistencyLevelAll requires all data nodes to acknowledge a write
-	ConsistencyLevelAll
-)
-
-var (
-	// ErrInvalidConsistencyLevel is returned when parsing the string version
-	// of a consistency level.
-	ErrInvalidConsistencyLevel = errors.New("invalid consistency level")
-)
-
-// ParseConsistencyLevel converts a consistency level string to the corresponding ConsistencyLevel const
-func ParseConsistencyLevel(level string) (ConsistencyLevel, error) {
-	switch strings.ToLower(level) {
-	case "any":
-		return ConsistencyLevelAny, nil
-	case "one":
-		return ConsistencyLevelOne, nil
-	case "quorum":
-		return ConsistencyLevelQuorum, nil
-	case "all":
-		return ConsistencyLevelAll, nil
-	default:
-		return 0, ErrInvalidConsistencyLevel
-	}
-}

+ 0 - 1576
Godeps/_workspace/src/github.com/influxdata/influxdb/models/points.go

@@ -1,1576 +0,0 @@
-package models
-
-import (
-	"bytes"
-	"encoding/binary"
-	"errors"
-	"fmt"
-	"hash/fnv"
-	"math"
-	"sort"
-	"strconv"
-	"strings"
-	"time"
-
-	"github.com/influxdata/influxdb/pkg/escape"
-)
-
-var (
-	measurementEscapeCodes = map[byte][]byte{
-		',': []byte(`\,`),
-		' ': []byte(`\ `),
-	}
-
-	tagEscapeCodes = map[byte][]byte{
-		',': []byte(`\,`),
-		' ': []byte(`\ `),
-		'=': []byte(`\=`),
-	}
-
-	ErrPointMustHaveAField  = errors.New("point without fields is unsupported")
-	ErrInvalidNumber        = errors.New("invalid number")
-	ErrMaxKeyLengthExceeded = errors.New("max key length exceeded")
-)
-
-const (
-	MaxKeyLength = 65535
-)
-
-// Point defines the values that will be written to the database
-type Point interface {
-	Name() string
-	SetName(string)
-
-	Tags() Tags
-	AddTag(key, value string)
-	SetTags(tags Tags)
-
-	Fields() Fields
-
-	Time() time.Time
-	SetTime(t time.Time)
-	UnixNano() int64
-
-	HashID() uint64
-	Key() []byte
-
-	Data() []byte
-	SetData(buf []byte)
-
-	// String returns a string representation of the point, if there is a
-	// timestamp associated with the point then it will be specified with the default
-	// precision of nanoseconds
-	String() string
-
-	// Bytes returns a []byte representation of the point similar to string.
-	MarshalBinary() ([]byte, error)
-
-	// PrecisionString returns a string representation of the point, if there
-	// is a timestamp associated with the point then it will be specified in the
-	// given unit
-	PrecisionString(precision string) string
-
-	// RoundedString returns a string representation of the point, if there
-	// is a timestamp associated with the point, then it will be rounded to the
-	// given duration
-	RoundedString(d time.Duration) string
-}
-
-// Points represents a sortable list of points by timestamp.
-type Points []Point
-
-func (a Points) Len() int           { return len(a) }
-func (a Points) Less(i, j int) bool { return a[i].Time().Before(a[j].Time()) }
-func (a Points) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
-
-// point is the default implementation of Point.
-type point struct {
-	time time.Time
-
-	// text encoding of measurement and tags
-	// key must always be stored sorted by tags, if the original line was not sorted,
-	// we need to resort it
-	key []byte
-
-	// text encoding of field data
-	fields []byte
-
-	// text encoding of timestamp
-	ts []byte
-
-	// binary encoded field data
-	data []byte
-
-	// cached version of parsed fields from data
-	cachedFields map[string]interface{}
-
-	// cached version of parsed name from key
-	cachedName string
-}
-
-const (
-	// the number of characters for the largest possible int64 (9223372036854775807)
-	maxInt64Digits = 19
-
-	// the number of characters for the smallest possible int64 (-9223372036854775808)
-	minInt64Digits = 20
-
-	// the number of characters required for the largest float64 before a range check
-	// would occur during parsing
-	maxFloat64Digits = 25
-
-	// the number of characters required for smallest float64 before a range check occur
-	// would occur during parsing
-	minFloat64Digits = 27
-)
-
-// ParsePoints returns a slice of Points from a text representation of a point
-// with each point separated by newlines.  If any points fail to parse, a non-nil error
-// will be returned in addition to the points that parsed successfully.
-func ParsePoints(buf []byte) ([]Point, error) {
-	return ParsePointsWithPrecision(buf, time.Now().UTC(), "n")
-}
-
-// ParsePointsString is identical to ParsePoints but accepts a string
-// buffer.
-func ParsePointsString(buf string) ([]Point, error) {
-	return ParsePoints([]byte(buf))
-}
-
-// ParseKey returns the measurement name and tags from a point.
-func ParseKey(buf string) (string, Tags, error) {
-	// Ignore the error because scanMeasurement returns "missing fields" which we ignore
-	// when just parsing a key
-	state, i, _ := scanMeasurement([]byte(buf), 0)
-
-	var tags Tags
-	if state == tagKeyState {
-		tags = parseTags([]byte(buf))
-		// scanMeasurement returns the location of the comma if there are tags, strip that off
-		return string(buf[:i-1]), tags, nil
-	}
-	return string(buf[:i]), tags, nil
-}
-
-// ParsePointsWithPrecision is similar to ParsePoints, but allows the
-// caller to provide a precision for time.
-func ParsePointsWithPrecision(buf []byte, defaultTime time.Time, precision string) ([]Point, error) {
-	points := []Point{}
-	var (
-		pos    int
-		block  []byte
-		failed []string
-	)
-	for {
-		pos, block = scanLine(buf, pos)
-		pos++
-
-		if len(block) == 0 {
-			break
-		}
-
-		// lines which start with '#' are comments
-		start := skipWhitespace(block, 0)
-
-		// If line is all whitespace, just skip it
-		if start >= len(block) {
-			continue
-		}
-
-		if block[start] == '#' {
-			continue
-		}
-
-		// strip the newline if one is present
-		if block[len(block)-1] == '\n' {
-			block = block[:len(block)-1]
-		}
-
-		pt, err := parsePoint(block[start:len(block)], defaultTime, precision)
-		if err != nil {
-			failed = append(failed, fmt.Sprintf("unable to parse '%s': %v", string(block[start:len(block)]), err))
-		} else {
-			points = append(points, pt)
-		}
-
-		if pos >= len(buf) {
-			break
-		}
-
-	}
-	if len(failed) > 0 {
-		return points, fmt.Errorf("%s", strings.Join(failed, "\n"))
-	}
-	return points, nil
-
-}
-
-func parsePoint(buf []byte, defaultTime time.Time, precision string) (Point, error) {
-	// scan the first block which is measurement[,tag1=value1,tag2=value=2...]
-	pos, key, err := scanKey(buf, 0)
-	if err != nil {
-		return nil, err
-	}
-
-	// measurement name is required
-	if len(key) == 0 {
-		return nil, fmt.Errorf("missing measurement")
-	}
-
-	if len(key) > MaxKeyLength {
-		return nil, fmt.Errorf("max key length exceeded: %v > %v", len(key), MaxKeyLength)
-	}
-
-	// scan the second block is which is field1=value1[,field2=value2,...]
-	pos, fields, err := scanFields(buf, pos)
-	if err != nil {
-		return nil, err
-	}
-
-	// at least one field is required
-	if len(fields) == 0 {
-		return nil, fmt.Errorf("missing fields")
-	}
-
-	// scan the last block which is an optional integer timestamp
-	pos, ts, err := scanTime(buf, pos)
-
-	if err != nil {
-		return nil, err
-	}
-
-	pt := &point{
-		key:    key,
-		fields: fields,
-		ts:     ts,
-	}
-
-	if len(ts) == 0 {
-		pt.time = defaultTime
-		pt.SetPrecision(precision)
-	} else {
-		ts, err := strconv.ParseInt(string(ts), 10, 64)
-		if err != nil {
-			return nil, err
-		}
-		pt.time, err = SafeCalcTime(ts, precision)
-		if err != nil {
-			return nil, err
-		}
-	}
-	return pt, nil
-}
-
-// GetPrecisionMultiplier will return a multiplier for the precision specified
-func GetPrecisionMultiplier(precision string) int64 {
-	d := time.Nanosecond
-	switch precision {
-	case "u":
-		d = time.Microsecond
-	case "ms":
-		d = time.Millisecond
-	case "s":
-		d = time.Second
-	case "m":
-		d = time.Minute
-	case "h":
-		d = time.Hour
-	}
-	return int64(d)
-}
-
-// scanKey scans buf starting at i for the measurement and tag portion of the point.
-// It returns the ending position and the byte slice of key within buf.  If there
-// are tags, they will be sorted if they are not already.
-func scanKey(buf []byte, i int) (int, []byte, error) {
-	start := skipWhitespace(buf, i)
-
-	i = start
-
-	// Determines whether the tags are sort, assume they are
-	sorted := true
-
-	// indices holds the indexes within buf of the start of each tag.  For example,
-	// a buf of 'cpu,host=a,region=b,zone=c' would have indices slice of [4,11,20]
-	// which indicates that the first tag starts at buf[4], seconds at buf[11], and
-	// last at buf[20]
-	indices := make([]int, 100)
-
-	// tracks how many commas we've seen so we know how many values are indices.
-	// Since indices is an arbitrarily large slice,
-	// we need to know how many values in the buffer are in use.
-	commas := 0
-
-	// First scan the Point's measurement.
-	state, i, err := scanMeasurement(buf, i)
-	if err != nil {
-		return i, buf[start:i], err
-	}
-
-	// Optionally scan tags if needed.
-	if state == tagKeyState {
-		i, commas, indices, err = scanTags(buf, i, indices)
-		if err != nil {
-			return i, buf[start:i], err
-		}
-	}
-
-	// Now we know where the key region is within buf, and the locations of tags, we
-	// need to determine if duplicate tags exist and if the tags are sorted.  This iterates
-	// 1/2 of the list comparing each end with each other, walking towards the center from
-	// both sides.
-	for j := 0; j < commas/2; j++ {
-		// get the left and right tags
-		_, left := scanTo(buf[indices[j]:indices[j+1]-1], 0, '=')
-		_, right := scanTo(buf[indices[commas-j-1]:indices[commas-j]-1], 0, '=')
-
-		// If the tags are equal, then there are duplicate tags, and we should abort
-		if bytes.Equal(left, right) {
-			return i, buf[start:i], fmt.Errorf("duplicate tags")
-		}
-
-		// If left is greater than right, the tags are not sorted.  We must continue
-		// since their could be duplicate tags still.
-		if bytes.Compare(left, right) > 0 {
-			sorted = false
-		}
-	}
-
-	// If the tags are not sorted, then sort them.  This sort is inline and
-	// uses the tag indices we created earlier.  The actual buffer is not sorted, the
-	// indices are using the buffer for value comparison.  After the indices are sorted,
-	// the buffer is reconstructed from the sorted indices.
-	if !sorted && commas > 0 {
-		// Get the measurement name for later
-		measurement := buf[start : indices[0]-1]
-
-		// Sort the indices
-		indices := indices[:commas]
-		insertionSort(0, commas, buf, indices)
-
-		// Create a new key using the measurement and sorted indices
-		b := make([]byte, len(buf[start:i]))
-		pos := copy(b, measurement)
-		for _, i := range indices {
-			b[pos] = ','
-			pos++
-			_, v := scanToSpaceOr(buf, i, ',')
-			pos += copy(b[pos:], v)
-		}
-
-		return i, b, nil
-	}
-
-	return i, buf[start:i], nil
-}
-
-// The following constants allow us to specify which state to move to
-// next, when scanning sections of a Point.
-const (
-	tagKeyState = iota
-	tagValueState
-	fieldsState
-)
-
-// scanMeasurement examines the measurement part of a Point, returning
-// the next state to move to, and the current location in the buffer.
-func scanMeasurement(buf []byte, i int) (int, int, error) {
-	// Check first byte of measurement, anything except a comma is fine.
-	// It can't be a space, since whitespace is stripped prior to this
-	// function call.
-	if buf[i] == ',' {
-		return -1, i, fmt.Errorf("missing measurement")
-	}
-
-	for {
-		i++
-		if i >= len(buf) {
-			// cpu
-			return -1, i, fmt.Errorf("missing fields")
-		}
-
-		if buf[i-1] == '\\' {
-			// Skip character (it's escaped).
-			continue
-		}
-
-		// Unescaped comma; move onto scanning the tags.
-		if buf[i] == ',' {
-			return tagKeyState, i + 1, nil
-		}
-
-		// Unescaped space; move onto scanning the fields.
-		if buf[i] == ' ' {
-			// cpu value=1.0
-			return fieldsState, i, nil
-		}
-	}
-}
-
-// scanTags examines all the tags in a Point, keeping track of and
-// returning the updated indices slice, number of commas and location
-// in buf where to start examining the Point fields.
-func scanTags(buf []byte, i int, indices []int) (int, int, []int, error) {
-	var (
-		err    error
-		commas int
-		state  = tagKeyState
-	)
-
-	for {
-		switch state {
-		case tagKeyState:
-			// Grow our indices slice if we have too many tags.
-			if commas >= len(indices) {
-				newIndics := make([]int, cap(indices)*2)
-				copy(newIndics, indices)
-				indices = newIndics
-			}
-			indices[commas] = i
-			commas++
-
-			i, err = scanTagsKey(buf, i)
-			state = tagValueState // tag value always follows a tag key
-		case tagValueState:
-			state, i, err = scanTagsValue(buf, i)
-		case fieldsState:
-			indices[commas] = i + 1
-			return i, commas, indices, nil
-		}
-
-		if err != nil {
-			return i, commas, indices, err
-		}
-	}
-}
-
-// scanTagsKey scans each character in a tag key.
-func scanTagsKey(buf []byte, i int) (int, error) {
-	// First character of the key.
-	if i >= len(buf) || buf[i] == ' ' || buf[i] == ',' || buf[i] == '=' {
-		// cpu,{'', ' ', ',', '='}
-		return i, fmt.Errorf("missing tag key")
-	}
-
-	// Examine each character in the tag key until we hit an unescaped
-	// equals (the tag value), or we hit an error (i.e., unescaped
-	// space or comma).
-	for {
-		i++
-
-		// Either we reached the end of the buffer or we hit an
-		// unescaped comma or space.
-		if i >= len(buf) ||
-			((buf[i] == ' ' || buf[i] == ',') && buf[i-1] != '\\') {
-			// cpu,tag{'', ' ', ','}
-			return i, fmt.Errorf("missing tag value")
-		}
-
-		if buf[i] == '=' && buf[i-1] != '\\' {
-			// cpu,tag=
-			return i + 1, nil
-		}
-	}
-}
-
-// scanTagsValue scans each character in a tag value.
-func scanTagsValue(buf []byte, i int) (int, int, error) {
-	// Tag value cannot be empty.
-	if i >= len(buf) || buf[i] == ',' || buf[i] == ' ' {
-		// cpu,tag={',', ' '}
-		return -1, i, fmt.Errorf("missing tag value")
-	}
-
-	// Examine each character in the tag value until we hit an unescaped
-	// comma (move onto next tag key), an unescaped space (move onto
-	// fields), or we error out.
-	for {
-		i++
-		if i >= len(buf) {
-			// cpu,tag=value
-			return -1, i, fmt.Errorf("missing fields")
-		}
-
-		// An unescaped equals sign is an invalid tag value.
-		if buf[i] == '=' && buf[i-1] != '\\' {
-			// cpu,tag={'=', 'fo=o'}
-			return -1, i, fmt.Errorf("invalid tag format")
-		}
-
-		if buf[i] == ',' && buf[i-1] != '\\' {
-			// cpu,tag=foo,
-			return tagKeyState, i + 1, nil
-		}
-
-		// cpu,tag=foo value=1.0
-		// cpu, tag=foo\= value=1.0
-		if buf[i] == ' ' && buf[i-1] != '\\' {
-			return fieldsState, i, nil
-		}
-	}
-}
-
-func insertionSort(l, r int, buf []byte, indices []int) {
-	for i := l + 1; i < r; i++ {
-		for j := i; j > l && less(buf, indices, j, j-1); j-- {
-			indices[j], indices[j-1] = indices[j-1], indices[j]
-		}
-	}
-}
-
-func less(buf []byte, indices []int, i, j int) bool {
-	// This grabs the tag names for i & j, it ignores the values
-	_, a := scanTo(buf, indices[i], '=')
-	_, b := scanTo(buf, indices[j], '=')
-	return bytes.Compare(a, b) < 0
-}
-
-func isFieldEscapeChar(b byte) bool {
-	for c := range escape.Codes {
-		if c == b {
-			return true
-		}
-	}
-	return false
-}
-
-// scanFields scans buf, starting at i for the fields section of a point.  It returns
-// the ending position and the byte slice of the fields within buf
-func scanFields(buf []byte, i int) (int, []byte, error) {
-	start := skipWhitespace(buf, i)
-	i = start
-	quoted := false
-
-	// tracks how many '=' we've seen
-	equals := 0
-
-	// tracks how many commas we've seen
-	commas := 0
-
-	for {
-		// reached the end of buf?
-		if i >= len(buf) {
-			break
-		}
-
-		// escaped characters?
-		if buf[i] == '\\' && i+1 < len(buf) {
-			i += 2
-			continue
-		}
-
-		// If the value is quoted, scan until we get to the end quote
-		// Only quote values in the field value since quotes are not significant
-		// in the field key
-		if buf[i] == '"' && equals > commas {
-			quoted = !quoted
-			i++
-			continue
-		}
-
-		// If we see an =, ensure that there is at least on char before and after it
-		if buf[i] == '=' && !quoted {
-			equals++
-
-			// check for "... =123" but allow "a\ =123"
-			if buf[i-1] == ' ' && buf[i-2] != '\\' {
-				return i, buf[start:i], fmt.Errorf("missing field key")
-			}
-
-			// check for "...a=123,=456" but allow "a=123,a\,=456"
-			if buf[i-1] == ',' && buf[i-2] != '\\' {
-				return i, buf[start:i], fmt.Errorf("missing field key")
-			}
-
-			// check for "... value="
-			if i+1 >= len(buf) {
-				return i, buf[start:i], fmt.Errorf("missing field value")
-			}
-
-			// check for "... value=,value2=..."
-			if buf[i+1] == ',' || buf[i+1] == ' ' {
-				return i, buf[start:i], fmt.Errorf("missing field value")
-			}
-
-			if isNumeric(buf[i+1]) || buf[i+1] == '-' || buf[i+1] == 'N' || buf[i+1] == 'n' {
-				var err error
-				i, err = scanNumber(buf, i+1)
-				if err != nil {
-					return i, buf[start:i], err
-				}
-				continue
-			}
-			// If next byte is not a double-quote, the value must be a boolean
-			if buf[i+1] != '"' {
-				var err error
-				i, _, err = scanBoolean(buf, i+1)
-				if err != nil {
-					return i, buf[start:i], err
-				}
-				continue
-			}
-		}
-
-		if buf[i] == ',' && !quoted {
-			commas++
-		}
-
-		// reached end of block?
-		if buf[i] == ' ' && !quoted {
-			break
-		}
-		i++
-	}
-
-	if quoted {
-		return i, buf[start:i], fmt.Errorf("unbalanced quotes")
-	}
-
-	// check that all field sections had key and values (e.g. prevent "a=1,b"
-	if equals == 0 || commas != equals-1 {
-		return i, buf[start:i], fmt.Errorf("invalid field format")
-	}
-
-	return i, buf[start:i], nil
-}
-
-// scanTime scans buf, starting at i for the time section of a point.  It returns
-// the ending position and the byte slice of the fields within buf and error if the
-// timestamp is not in the correct numeric format
-func scanTime(buf []byte, i int) (int, []byte, error) {
-	start := skipWhitespace(buf, i)
-	i = start
-	for {
-		// reached the end of buf?
-		if i >= len(buf) {
-			break
-		}
-
-		// Timestamps should be integers, make sure they are so we don't need to actually
-		// parse the timestamp until needed
-		if buf[i] < '0' || buf[i] > '9' {
-			// Handle negative timestamps
-			if i == start && buf[i] == '-' {
-				i++
-				continue
-			}
-			return i, buf[start:i], fmt.Errorf("bad timestamp")
-		}
-
-		// reached end of block?
-		if buf[i] == '\n' {
-			break
-		}
-		i++
-	}
-	return i, buf[start:i], nil
-}
-
-func isNumeric(b byte) bool {
-	return (b >= '0' && b <= '9') || b == '.'
-}
-
-// scanNumber returns the end position within buf, start at i after
-// scanning over buf for an integer, or float.  It returns an
-// error if a invalid number is scanned.
-func scanNumber(buf []byte, i int) (int, error) {
-	start := i
-	var isInt bool
-
-	// Is negative number?
-	if i < len(buf) && buf[i] == '-' {
-		i++
-		// There must be more characters now, as just '-' is illegal.
-		if i == len(buf) {
-			return i, ErrInvalidNumber
-		}
-	}
-
-	// how many decimal points we've see
-	decimal := false
-
-	// indicates the number is float in scientific notation
-	scientific := false
-
-	for {
-		if i >= len(buf) {
-			break
-		}
-
-		if buf[i] == ',' || buf[i] == ' ' {
-			break
-		}
-
-		if buf[i] == 'i' && i > start && !isInt {
-			isInt = true
-			i++
-			continue
-		}
-
-		if buf[i] == '.' {
-			// Can't have more than 1 decimal (e.g. 1.1.1 should fail)
-			if decimal {
-				return i, ErrInvalidNumber
-			}
-			decimal = true
-		}
-
-		// `e` is valid for floats but not as the first char
-		if i > start && (buf[i] == 'e' || buf[i] == 'E') {
-			scientific = true
-			i++
-			continue
-		}
-
-		// + and - are only valid at this point if they follow an e (scientific notation)
-		if (buf[i] == '+' || buf[i] == '-') && (buf[i-1] == 'e' || buf[i-1] == 'E') {
-			i++
-			continue
-		}
-
-		// NaN is an unsupported value
-		if i+2 < len(buf) && (buf[i] == 'N' || buf[i] == 'n') {
-			return i, ErrInvalidNumber
-		}
-
-		if !isNumeric(buf[i]) {
-			return i, ErrInvalidNumber
-		}
-		i++
-	}
-
-	if isInt && (decimal || scientific) {
-		return i, ErrInvalidNumber
-	}
-
-	numericDigits := i - start
-	if isInt {
-		numericDigits--
-	}
-	if decimal {
-		numericDigits--
-	}
-	if buf[start] == '-' {
-		numericDigits--
-	}
-
-	if numericDigits == 0 {
-		return i, ErrInvalidNumber
-	}
-
-	// It's more common that numbers will be within min/max range for their type but we need to prevent
-	// out or range numbers from being parsed successfully.  This uses some simple heuristics to decide
-	// if we should parse the number to the actual type.  It does not do it all the time because it incurs
-	// extra allocations and we end up converting the type again when writing points to disk.
-	if isInt {
-		// Make sure the last char is an 'i' for integers (e.g. 9i10 is not valid)
-		if buf[i-1] != 'i' {
-			return i, ErrInvalidNumber
-		}
-		// Parse the int to check bounds the number of digits could be larger than the max range
-		// We subtract 1 from the index to remove the `i` from our tests
-		if len(buf[start:i-1]) >= maxInt64Digits || len(buf[start:i-1]) >= minInt64Digits {
-			if _, err := strconv.ParseInt(string(buf[start:i-1]), 10, 64); err != nil {
-				return i, fmt.Errorf("unable to parse integer %s: %s", buf[start:i-1], err)
-			}
-		}
-	} else {
-		// Parse the float to check bounds if it's scientific or the number of digits could be larger than the max range
-		if scientific || len(buf[start:i]) >= maxFloat64Digits || len(buf[start:i]) >= minFloat64Digits {
-			if _, err := strconv.ParseFloat(string(buf[start:i]), 10); err != nil {
-				return i, fmt.Errorf("invalid float")
-			}
-		}
-	}
-
-	return i, nil
-}
-
-// scanBoolean returns the end position within buf, start at i after
-// scanning over buf for boolean. Valid values for a boolean are
-// t, T, true, TRUE, f, F, false, FALSE.  It returns an error if a invalid boolean
-// is scanned.
-func scanBoolean(buf []byte, i int) (int, []byte, error) {
-	start := i
-
-	if i < len(buf) && (buf[i] != 't' && buf[i] != 'f' && buf[i] != 'T' && buf[i] != 'F') {
-		return i, buf[start:i], fmt.Errorf("invalid boolean")
-	}
-
-	i++
-	for {
-		if i >= len(buf) {
-			break
-		}
-
-		if buf[i] == ',' || buf[i] == ' ' {
-			break
-		}
-		i++
-	}
-
-	// Single char bool (t, T, f, F) is ok
-	if i-start == 1 {
-		return i, buf[start:i], nil
-	}
-
-	// length must be 4 for true or TRUE
-	if (buf[start] == 't' || buf[start] == 'T') && i-start != 4 {
-		return i, buf[start:i], fmt.Errorf("invalid boolean")
-	}
-
-	// length must be 5 for false or FALSE
-	if (buf[start] == 'f' || buf[start] == 'F') && i-start != 5 {
-		return i, buf[start:i], fmt.Errorf("invalid boolean")
-	}
-
-	// Otherwise
-	valid := false
-	switch buf[start] {
-	case 't':
-		valid = bytes.Equal(buf[start:i], []byte("true"))
-	case 'f':
-		valid = bytes.Equal(buf[start:i], []byte("false"))
-	case 'T':
-		valid = bytes.Equal(buf[start:i], []byte("TRUE")) || bytes.Equal(buf[start:i], []byte("True"))
-	case 'F':
-		valid = bytes.Equal(buf[start:i], []byte("FALSE")) || bytes.Equal(buf[start:i], []byte("False"))
-	}
-
-	if !valid {
-		return i, buf[start:i], fmt.Errorf("invalid boolean")
-	}
-
-	return i, buf[start:i], nil
-
-}
-
-// skipWhitespace returns the end position within buf, starting at i after
-// scanning over spaces in tags
-func skipWhitespace(buf []byte, i int) int {
-	for i < len(buf) {
-		if buf[i] != ' ' && buf[i] != '\t' && buf[i] != 0 {
-			break
-		}
-		i++
-	}
-	return i
-}
-
-// scanLine returns the end position in buf and the next line found within
-// buf.
-func scanLine(buf []byte, i int) (int, []byte) {
-	start := i
-	quoted := false
-	fields := false
-
-	// tracks how many '=' and commas we've seen
-	// this duplicates some of the functionality in scanFields
-	equals := 0
-	commas := 0
-	for {
-		// reached the end of buf?
-		if i >= len(buf) {
-			break
-		}
-
-		// skip past escaped characters
-		if buf[i] == '\\' {
-			i += 2
-			continue
-		}
-
-		if buf[i] == ' ' {
-			fields = true
-		}
-
-		// If we see a double quote, makes sure it is not escaped
-		if fields {
-			if !quoted && buf[i] == '=' {
-				i++
-				equals++
-				continue
-			} else if !quoted && buf[i] == ',' {
-				i++
-				commas++
-				continue
-			} else if buf[i] == '"' && equals > commas {
-				i++
-				quoted = !quoted
-				continue
-			}
-		}
-
-		if buf[i] == '\n' && !quoted {
-			break
-		}
-
-		i++
-	}
-
-	return i, buf[start:i]
-}
-
-// scanTo returns the end position in buf and the next consecutive block
-// of bytes, starting from i and ending with stop byte, where stop byte
-// has not been escaped.
-//
-// If there are leading spaces, they are skipped.
-func scanTo(buf []byte, i int, stop byte) (int, []byte) {
-	start := i
-	for {
-		// reached the end of buf?
-		if i >= len(buf) {
-			break
-		}
-
-		// Reached unescaped stop value?
-		if buf[i] == stop && (i == 0 || buf[i-1] != '\\') {
-			break
-		}
-		i++
-	}
-
-	return i, buf[start:i]
-}
-
-// scanTo returns the end position in buf and the next consecutive block
-// of bytes, starting from i and ending with stop byte.  If there are leading
-// spaces, they are skipped.
-func scanToSpaceOr(buf []byte, i int, stop byte) (int, []byte) {
-	start := i
-	if buf[i] == stop || buf[i] == ' ' {
-		return i, buf[start:i]
-	}
-
-	for {
-		i++
-		if buf[i-1] == '\\' {
-			continue
-		}
-
-		// reached the end of buf?
-		if i >= len(buf) {
-			return i, buf[start:i]
-		}
-
-		// reached end of block?
-		if buf[i] == stop || buf[i] == ' ' {
-			return i, buf[start:i]
-		}
-	}
-}
-
-func scanTagValue(buf []byte, i int) (int, []byte) {
-	start := i
-	for {
-		if i >= len(buf) {
-			break
-		}
-
-		if buf[i] == ',' && buf[i-1] != '\\' {
-			break
-		}
-		i++
-	}
-	return i, buf[start:i]
-}
-
-func scanFieldValue(buf []byte, i int) (int, []byte) {
-	start := i
-	quoted := false
-	for {
-		if i >= len(buf) {
-			break
-		}
-
-		// Only escape char for a field value is a double-quote
-		if buf[i] == '\\' && i+1 < len(buf) && buf[i+1] == '"' {
-			i += 2
-			continue
-		}
-
-		// Quoted value? (e.g. string)
-		if buf[i] == '"' {
-			i++
-			quoted = !quoted
-			continue
-		}
-
-		if buf[i] == ',' && !quoted {
-			break
-		}
-		i++
-	}
-	return i, buf[start:i]
-}
-
-func escapeMeasurement(in []byte) []byte {
-	for b, esc := range measurementEscapeCodes {
-		in = bytes.Replace(in, []byte{b}, esc, -1)
-	}
-	return in
-}
-
-func unescapeMeasurement(in []byte) []byte {
-	for b, esc := range measurementEscapeCodes {
-		in = bytes.Replace(in, esc, []byte{b}, -1)
-	}
-	return in
-}
-
-func escapeTag(in []byte) []byte {
-	for b, esc := range tagEscapeCodes {
-		if bytes.IndexByte(in, b) != -1 {
-			in = bytes.Replace(in, []byte{b}, esc, -1)
-		}
-	}
-	return in
-}
-
-func unescapeTag(in []byte) []byte {
-	for b, esc := range tagEscapeCodes {
-		if bytes.IndexByte(in, b) != -1 {
-			in = bytes.Replace(in, esc, []byte{b}, -1)
-		}
-	}
-	return in
-}
-
-// escapeStringField returns a copy of in with any double quotes or
-// backslashes with escaped values
-func escapeStringField(in string) string {
-	var out []byte
-	i := 0
-	for {
-		if i >= len(in) {
-			break
-		}
-		// escape double-quotes
-		if in[i] == '\\' {
-			out = append(out, '\\')
-			out = append(out, '\\')
-			i++
-			continue
-		}
-		// escape double-quotes
-		if in[i] == '"' {
-			out = append(out, '\\')
-			out = append(out, '"')
-			i++
-			continue
-		}
-		out = append(out, in[i])
-		i++
-
-	}
-	return string(out)
-}
-
-// unescapeStringField returns a copy of in with any escaped double-quotes
-// or backslashes unescaped
-func unescapeStringField(in string) string {
-	if strings.IndexByte(in, '\\') == -1 {
-		return in
-	}
-
-	var out []byte
-	i := 0
-	for {
-		if i >= len(in) {
-			break
-		}
-		// unescape backslashes
-		if in[i] == '\\' && i+1 < len(in) && in[i+1] == '\\' {
-			out = append(out, '\\')
-			i += 2
-			continue
-		}
-		// unescape double-quotes
-		if in[i] == '\\' && i+1 < len(in) && in[i+1] == '"' {
-			out = append(out, '"')
-			i += 2
-			continue
-		}
-		out = append(out, in[i])
-		i++
-
-	}
-	return string(out)
-}
-
-// NewPoint returns a new point with the given measurement name, tags, fields and timestamp.  If
-// an unsupported field value (NaN) or out of range time is passed, this function returns an error.
-func NewPoint(name string, tags Tags, fields Fields, time time.Time) (Point, error) {
-	if len(fields) == 0 {
-		return nil, ErrPointMustHaveAField
-	}
-	if !time.IsZero() {
-		if err := CheckTime(time); err != nil {
-			return nil, err
-		}
-	}
-
-	for key, value := range fields {
-		if fv, ok := value.(float64); ok {
-			// Ensure the caller validates and handles invalid field values
-			if math.IsNaN(fv) {
-				return nil, fmt.Errorf("NaN is an unsupported value for field %s", key)
-			}
-		}
-		if len(key) == 0 {
-			return nil, fmt.Errorf("all fields must have non-empty names")
-		}
-	}
-
-	key := MakeKey([]byte(name), tags)
-	if len(key) > MaxKeyLength {
-		return nil, fmt.Errorf("max key length exceeded: %v > %v", len(key), MaxKeyLength)
-	}
-
-	return &point{
-		key:    key,
-		time:   time,
-		fields: fields.MarshalBinary(),
-	}, nil
-}
-
-// NewPointFromBytes returns a new Point from a marshalled Point.
-func NewPointFromBytes(b []byte) (Point, error) {
-	p := &point{}
-	if err := p.UnmarshalBinary(b); err != nil {
-		return nil, err
-	}
-	if len(p.Fields()) == 0 {
-		return nil, ErrPointMustHaveAField
-	}
-	return p, nil
-}
-
-// MustNewPoint returns a new point with the given measurement name, tags, fields and timestamp.  If
-// an unsupported field value (NaN) is passed, this function panics.
-func MustNewPoint(name string, tags Tags, fields Fields, time time.Time) Point {
-	pt, err := NewPoint(name, tags, fields, time)
-	if err != nil {
-		panic(err.Error())
-	}
-	return pt
-}
-
-func (p *point) Data() []byte {
-	return p.data
-}
-
-func (p *point) SetData(b []byte) {
-	p.data = b
-}
-
-func (p *point) Key() []byte {
-	return p.key
-}
-
-func (p *point) name() []byte {
-	_, name := scanTo(p.key, 0, ',')
-	return name
-}
-
-// Name return the measurement name for the point
-func (p *point) Name() string {
-	if p.cachedName != "" {
-		return p.cachedName
-	}
-	p.cachedName = string(escape.Unescape(p.name()))
-	return p.cachedName
-}
-
-// SetName updates the measurement name for the point
-func (p *point) SetName(name string) {
-	p.cachedName = ""
-	p.key = MakeKey([]byte(name), p.Tags())
-}
-
-// Time return the timestamp for the point
-func (p *point) Time() time.Time {
-	return p.time
-}
-
-// SetTime updates the timestamp for the point
-func (p *point) SetTime(t time.Time) {
-	p.time = t
-}
-
-// Tags returns the tag set for the point
-func (p *point) Tags() Tags {
-	return parseTags(p.key)
-}
-
-func parseTags(buf []byte) Tags {
-	tags := map[string]string{}
-
-	if len(buf) != 0 {
-		pos, name := scanTo(buf, 0, ',')
-
-		// it's an empyt key, so there are no tags
-		if len(name) == 0 {
-			return tags
-		}
-
-		i := pos + 1
-		var key, value []byte
-		for {
-			if i >= len(buf) {
-				break
-			}
-			i, key = scanTo(buf, i, '=')
-			i, value = scanTagValue(buf, i+1)
-
-			if len(value) == 0 {
-				continue
-			}
-
-			tags[string(unescapeTag(key))] = string(unescapeTag(value))
-
-			i++
-		}
-	}
-	return tags
-}
-
-// MakeKey creates a key for a set of tags.
-func MakeKey(name []byte, tags Tags) []byte {
-	// unescape the name and then re-escape it to avoid double escaping.
-	// The key should always be stored in escaped form.
-	return append(escapeMeasurement(unescapeMeasurement(name)), tags.HashKey()...)
-}
-
-// SetTags replaces the tags for the point
-func (p *point) SetTags(tags Tags) {
-	p.key = MakeKey([]byte(p.Name()), tags)
-}
-
-// AddTag adds or replaces a tag value for a point
-func (p *point) AddTag(key, value string) {
-	tags := p.Tags()
-	tags[key] = value
-	p.key = MakeKey([]byte(p.Name()), tags)
-}
-
-// Fields returns the fields for the point
-func (p *point) Fields() Fields {
-	if p.cachedFields != nil {
-		return p.cachedFields
-	}
-	p.cachedFields = p.unmarshalBinary()
-	return p.cachedFields
-}
-
-// SetPrecision will round a time to the specified precision
-func (p *point) SetPrecision(precision string) {
-	switch precision {
-	case "n":
-	case "u":
-		p.SetTime(p.Time().Truncate(time.Microsecond))
-	case "ms":
-		p.SetTime(p.Time().Truncate(time.Millisecond))
-	case "s":
-		p.SetTime(p.Time().Truncate(time.Second))
-	case "m":
-		p.SetTime(p.Time().Truncate(time.Minute))
-	case "h":
-		p.SetTime(p.Time().Truncate(time.Hour))
-	}
-}
-
-func (p *point) String() string {
-	if p.Time().IsZero() {
-		return string(p.Key()) + " " + string(p.fields)
-	}
-	return string(p.Key()) + " " + string(p.fields) + " " + strconv.FormatInt(p.UnixNano(), 10)
-}
-
-func (p *point) MarshalBinary() ([]byte, error) {
-	tb, err := p.time.MarshalBinary()
-	if err != nil {
-		return nil, err
-	}
-
-	b := make([]byte, 8+len(p.key)+len(p.fields)+len(tb))
-	i := 0
-
-	binary.BigEndian.PutUint32(b[i:], uint32(len(p.key)))
-	i += 4
-
-	i += copy(b[i:], p.key)
-
-	binary.BigEndian.PutUint32(b[i:i+4], uint32(len(p.fields)))
-	i += 4
-
-	i += copy(b[i:], p.fields)
-
-	copy(b[i:], tb)
-	return b, nil
-}
-
-func (p *point) UnmarshalBinary(b []byte) error {
-	var i int
-	keyLen := int(binary.BigEndian.Uint32(b[:4]))
-	i += int(4)
-
-	p.key = b[i : i+keyLen]
-	i += keyLen
-
-	fieldLen := int(binary.BigEndian.Uint32(b[i : i+4]))
-	i += int(4)
-
-	p.fields = b[i : i+fieldLen]
-	i += fieldLen
-
-	p.time = time.Now()
-	p.time.UnmarshalBinary(b[i:])
-	return nil
-}
-
-func (p *point) PrecisionString(precision string) string {
-	if p.Time().IsZero() {
-		return fmt.Sprintf("%s %s", p.Key(), string(p.fields))
-	}
-	return fmt.Sprintf("%s %s %d", p.Key(), string(p.fields),
-		p.UnixNano()/GetPrecisionMultiplier(precision))
-}
-
-func (p *point) RoundedString(d time.Duration) string {
-	if p.Time().IsZero() {
-		return fmt.Sprintf("%s %s", p.Key(), string(p.fields))
-	}
-	return fmt.Sprintf("%s %s %d", p.Key(), string(p.fields),
-		p.time.Round(d).UnixNano())
-}
-
-func (p *point) unmarshalBinary() Fields {
-	return newFieldsFromBinary(p.fields)
-}
-
-func (p *point) HashID() uint64 {
-	h := fnv.New64a()
-	h.Write(p.key)
-	sum := h.Sum64()
-	return sum
-}
-
-func (p *point) UnixNano() int64 {
-	return p.Time().UnixNano()
-}
-
-// Tags represents a mapping between a Point's tag names and their
-// values.
-type Tags map[string]string
-
-// HashKey hashes all of a tag's keys.
-func (t Tags) HashKey() []byte {
-	// Empty maps marshal to empty bytes.
-	if len(t) == 0 {
-		return nil
-	}
-
-	escaped := Tags{}
-	for k, v := range t {
-		ek := escapeTag([]byte(k))
-		ev := escapeTag([]byte(v))
-
-		if len(ev) > 0 {
-			escaped[string(ek)] = string(ev)
-		}
-	}
-
-	// Extract keys and determine final size.
-	sz := len(escaped) + (len(escaped) * 2) // separators
-	keys := make([]string, len(escaped)+1)
-	i := 0
-	for k, v := range escaped {
-		keys[i] = k
-		i++
-		sz += len(k) + len(v)
-	}
-	keys = keys[:i]
-	sort.Strings(keys)
-	// Generate marshaled bytes.
-	b := make([]byte, sz)
-	buf := b
-	idx := 0
-	for _, k := range keys {
-		buf[idx] = ','
-		idx++
-		copy(buf[idx:idx+len(k)], k)
-		idx += len(k)
-		buf[idx] = '='
-		idx++
-		v := escaped[k]
-		copy(buf[idx:idx+len(v)], v)
-		idx += len(v)
-	}
-	return b[:idx]
-}
-
-// Fields represents a mapping between a Point's field names and their
-// values.
-type Fields map[string]interface{}
-
-func parseNumber(val []byte) (interface{}, error) {
-	if val[len(val)-1] == 'i' {
-		val = val[:len(val)-1]
-		return strconv.ParseInt(string(val), 10, 64)
-	}
-	for i := 0; i < len(val); i++ {
-		// If there is a decimal or an N (NaN), I (Inf), parse as float
-		if val[i] == '.' || val[i] == 'N' || val[i] == 'n' || val[i] == 'I' || val[i] == 'i' || val[i] == 'e' {
-			return strconv.ParseFloat(string(val), 64)
-		}
-		if val[i] < '0' && val[i] > '9' {
-			return string(val), nil
-		}
-	}
-	return strconv.ParseFloat(string(val), 64)
-}
-
-func newFieldsFromBinary(buf []byte) Fields {
-	fields := make(Fields, 8)
-	var (
-		i              int
-		name, valueBuf []byte
-		value          interface{}
-		err            error
-	)
-	for i < len(buf) {
-
-		i, name = scanTo(buf, i, '=')
-		name = escape.Unescape(name)
-
-		i, valueBuf = scanFieldValue(buf, i+1)
-		if len(name) > 0 {
-			if len(valueBuf) == 0 {
-				fields[string(name)] = nil
-				continue
-			}
-
-			// If the first char is a double-quote, then unmarshal as string
-			if valueBuf[0] == '"' {
-				value = unescapeStringField(string(valueBuf[1 : len(valueBuf)-1]))
-				// Check for numeric characters and special NaN or Inf
-			} else if (valueBuf[0] >= '0' && valueBuf[0] <= '9') || valueBuf[0] == '-' || valueBuf[0] == '.' ||
-				valueBuf[0] == 'N' || valueBuf[0] == 'n' || // NaN
-				valueBuf[0] == 'I' || valueBuf[0] == 'i' { // Inf
-
-				value, err = parseNumber(valueBuf)
-				if err != nil {
-					panic(fmt.Sprintf("unable to parse number value '%v': %v", string(valueBuf), err))
-				}
-
-				// Otherwise parse it as bool
-			} else {
-				value, err = strconv.ParseBool(string(valueBuf))
-				if err != nil {
-					panic(fmt.Sprintf("unable to parse bool value '%v': %v\n", string(valueBuf), err))
-				}
-			}
-			fields[string(name)] = value
-		}
-		i++
-	}
-	return fields
-}
-
-// MarshalBinary encodes all the fields to their proper type and returns the binary
-// represenation
-// NOTE: uint64 is specifically not supported due to potential overflow when we decode
-// again later to an int64
-func (p Fields) MarshalBinary() []byte {
-	b := []byte{}
-	keys := make([]string, len(p))
-	i := 0
-	for k := range p {
-		keys[i] = k
-		i++
-	}
-	sort.Strings(keys)
-
-	for _, k := range keys {
-		v := p[k]
-		b = append(b, []byte(escape.String(k))...)
-		b = append(b, '=')
-		switch t := v.(type) {
-		case int:
-			b = append(b, []byte(strconv.FormatInt(int64(t), 10))...)
-			b = append(b, 'i')
-		case int8:
-			b = append(b, []byte(strconv.FormatInt(int64(t), 10))...)
-			b = append(b, 'i')
-		case int16:
-			b = append(b, []byte(strconv.FormatInt(int64(t), 10))...)
-			b = append(b, 'i')
-		case int32:
-			b = append(b, []byte(strconv.FormatInt(int64(t), 10))...)
-			b = append(b, 'i')
-		case int64:
-			b = append(b, []byte(strconv.FormatInt(t, 10))...)
-			b = append(b, 'i')
-		case uint:
-			b = append(b, []byte(strconv.FormatInt(int64(t), 10))...)
-			b = append(b, 'i')
-		case uint8:
-			b = append(b, []byte(strconv.FormatInt(int64(t), 10))...)
-			b = append(b, 'i')
-		case uint16:
-			b = append(b, []byte(strconv.FormatInt(int64(t), 10))...)
-			b = append(b, 'i')
-		case uint32:
-			b = append(b, []byte(strconv.FormatInt(int64(t), 10))...)
-			b = append(b, 'i')
-		case float32:
-			val := []byte(strconv.FormatFloat(float64(t), 'f', -1, 32))
-			b = append(b, val...)
-		case float64:
-			val := []byte(strconv.FormatFloat(t, 'f', -1, 64))
-			b = append(b, val...)
-		case bool:
-			b = append(b, []byte(strconv.FormatBool(t))...)
-		case []byte:
-			b = append(b, t...)
-		case string:
-			b = append(b, '"')
-			b = append(b, []byte(escapeStringField(t))...)
-			b = append(b, '"')
-		case nil:
-			// skip
-		default:
-			// Can't determine the type, so convert to string
-			b = append(b, '"')
-			b = append(b, []byte(escapeStringField(fmt.Sprintf("%v", v)))...)
-			b = append(b, '"')
-
-		}
-		b = append(b, ',')
-	}
-	if len(b) > 0 {
-		return b[0 : len(b)-1]
-	}
-	return b
-}
-
-type indexedSlice struct {
-	indices []int
-	b       []byte
-}
-
-func (s *indexedSlice) Less(i, j int) bool {
-	_, a := scanTo(s.b, s.indices[i], '=')
-	_, b := scanTo(s.b, s.indices[j], '=')
-	return bytes.Compare(a, b) < 0
-}
-
-func (s *indexedSlice) Swap(i, j int) {
-	s.indices[i], s.indices[j] = s.indices[j], s.indices[i]
-}
-
-func (s *indexedSlice) Len() int {
-	return len(s.indices)
-}

+ 0 - 60
Godeps/_workspace/src/github.com/influxdata/influxdb/models/rows.go

@@ -1,60 +0,0 @@
-package models
-
-import (
-	"hash/fnv"
-	"sort"
-)
-
-// Row represents a single row returned from the execution of a statement.
-type Row struct {
-	Name    string            `json:"name,omitempty"`
-	Tags    map[string]string `json:"tags,omitempty"`
-	Columns []string          `json:"columns,omitempty"`
-	Values  [][]interface{}   `json:"values,omitempty"`
-	Err     error             `json:"err,omitempty"`
-}
-
-// SameSeries returns true if r contains values for the same series as o.
-func (r *Row) SameSeries(o *Row) bool {
-	return r.tagsHash() == o.tagsHash() && r.Name == o.Name
-}
-
-// tagsHash returns a hash of tag key/value pairs.
-func (r *Row) tagsHash() uint64 {
-	h := fnv.New64a()
-	keys := r.tagsKeys()
-	for _, k := range keys {
-		h.Write([]byte(k))
-		h.Write([]byte(r.Tags[k]))
-	}
-	return h.Sum64()
-}
-
-// tagKeys returns a sorted list of tag keys.
-func (r *Row) tagsKeys() []string {
-	a := make([]string, 0, len(r.Tags))
-	for k := range r.Tags {
-		a = append(a, k)
-	}
-	sort.Strings(a)
-	return a
-}
-
-// Rows represents a collection of rows. Rows implements sort.Interface.
-type Rows []*Row
-
-func (p Rows) Len() int { return len(p) }
-
-func (p Rows) Less(i, j int) bool {
-	// Sort by name first.
-	if p[i].Name != p[j].Name {
-		return p[i].Name < p[j].Name
-	}
-
-	// Sort by tag set hash. Tags don't have a meaningful sort order so we
-	// just compute a hash and sort by that instead. This allows the tests
-	// to receive rows in a predictable order every time.
-	return p[i].tagsHash() < p[j].tagsHash()
-}
-
-func (p Rows) Swap(i, j int) { p[i], p[j] = p[j], p[i] }

+ 0 - 51
Godeps/_workspace/src/github.com/influxdata/influxdb/models/time.go

@@ -1,51 +0,0 @@
-package models
-
-// Helper time methods since parsing time can easily overflow and we only support a
-// specific time range.
-
-import (
-	"fmt"
-	"math"
-	"time"
-)
-
-var (
-	// MaxNanoTime is the maximum time that can be represented via int64 nanoseconds since the epoch.
-	MaxNanoTime = time.Unix(0, math.MaxInt64).UTC()
-	// MinNanoTime is the minumum time that can be represented via int64 nanoseconds since the epoch.
-	MinNanoTime = time.Unix(0, math.MinInt64).UTC()
-
-	// ErrTimeOutOfRange gets returned when time is out of the representable range using int64 nanoseconds since the epoch.
-	ErrTimeOutOfRange = fmt.Errorf("time outside range %s - %s", MinNanoTime, MaxNanoTime)
-)
-
-// SafeCalcTime safely calculates the time given. Will return error if the time is outside the
-// supported range.
-func SafeCalcTime(timestamp int64, precision string) (time.Time, error) {
-	mult := GetPrecisionMultiplier(precision)
-	if t, ok := safeSignedMult(timestamp, mult); ok {
-		return time.Unix(0, t).UTC(), nil
-	}
-
-	return time.Time{}, ErrTimeOutOfRange
-}
-
-// CheckTime checks that a time is within the safe range.
-func CheckTime(t time.Time) error {
-	if t.Before(MinNanoTime) || t.After(MaxNanoTime) {
-		return ErrTimeOutOfRange
-	}
-	return nil
-}
-
-// Perform the multiplication and check to make sure it didn't overflow.
-func safeSignedMult(a, b int64) (int64, bool) {
-	if a == 0 || b == 0 || a == 1 || b == 1 {
-		return a * b, true
-	}
-	if a == math.MinInt64 || b == math.MaxInt64 {
-		return 0, false
-	}
-	c := a * b
-	return c, c/b == a
-}

+ 0 - 53
Godeps/_workspace/src/github.com/influxdata/influxdb/pkg/escape/bytes.go

@@ -1,53 +0,0 @@
-package escape
-
-import "bytes"
-
-func Bytes(in []byte) []byte {
-	for b, esc := range Codes {
-		in = bytes.Replace(in, []byte{b}, esc, -1)
-	}
-	return in
-}
-
-func Unescape(in []byte) []byte {
-	if len(in) == 0 {
-		return nil
-	}
-
-	if bytes.IndexByte(in, '\\') == -1 {
-		return in
-	}
-
-	i := 0
-	inLen := len(in)
-	var out []byte
-
-	for {
-		if i >= inLen {
-			break
-		}
-		if in[i] == '\\' && i+1 < inLen {
-			switch in[i+1] {
-			case ',':
-				out = append(out, ',')
-				i += 2
-				continue
-			case '"':
-				out = append(out, '"')
-				i += 2
-				continue
-			case ' ':
-				out = append(out, ' ')
-				i += 2
-				continue
-			case '=':
-				out = append(out, '=')
-				i += 2
-				continue
-			}
-		}
-		out = append(out, in[i])
-		i += 1
-	}
-	return out
-}

+ 0 - 34
Godeps/_workspace/src/github.com/influxdata/influxdb/pkg/escape/strings.go

@@ -1,34 +0,0 @@
-package escape
-
-import "strings"
-
-var (
-	Codes = map[byte][]byte{
-		',': []byte(`\,`),
-		'"': []byte(`\"`),
-		' ': []byte(`\ `),
-		'=': []byte(`\=`),
-	}
-
-	codesStr = map[string]string{}
-)
-
-func init() {
-	for k, v := range Codes {
-		codesStr[string(k)] = string(v)
-	}
-}
-
-func UnescapeString(in string) string {
-	for b, esc := range codesStr {
-		in = strings.Replace(in, esc, b, -1)
-	}
-	return in
-}
-
-func String(in string) string {
-	for b, esc := range codesStr {
-		in = strings.Replace(in, b, esc, -1)
-	}
-	return in
-}

+ 2 - 5
conf/defaults.ini

@@ -248,12 +248,9 @@ templates_pattern = emails/*.html
 #################################### Logging ##########################
 [log]
 # Either "console", "file", "syslog". Default is console and  file
-# Use comma to separate multiple modes, e.g. "console, file"
+# Use space to separate multiple modes, e.g. "console file"
 mode = console, file
 
-# Buffer length of channel, keep it as it is if you don't know what it is.
-buffer_len = 10000
-
 # Either "Trace", "Debug", "Info", "Warn", "Error", "Critical", default is "Info"
 level = Info
 
@@ -273,7 +270,7 @@ log_rotate = true
 max_lines = 1000000
 
 # Max size shift of single file, default is 28 means 1 << 28, 256MB
-max_lines_shift = 28
+max_size_shift = 28
 
 # Segment log daily, default is true
 daily_rotate = true

+ 1 - 4
conf/sample.ini

@@ -233,9 +233,6 @@ check_for_updates = true
 # Use comma to separate multiple modes, e.g. "console, file"
 ;mode = console, file
 
-# Buffer length of channel, keep it as it is if you don't know what it is.
-;buffer_len = 10000
-
 # Either "Trace", "Debug", "Info", "Warn", "Error", "Critical", default is "Info"
 ;level = Info
 
@@ -253,7 +250,7 @@ check_for_updates = true
 ;max_lines = 1000000
 
 # Max size shift of single file, default is 28 means 1 << 28, 256MB
-;max_lines_shift = 28
+;max_size_shift = 28
 
 # Segment log daily, default is true
 ;daily_rotate = true

+ 3 - 0
pkg/api/api.go

@@ -240,6 +240,9 @@ func Register(r *macaron.Macaron) {
 		// metrics
 		r.Get("/metrics", wrap(GetInternalMetrics))
 
+		// error test
+		r.Get("/metrics/error", wrap(GenerateError))
+
 	}, reqSignedIn)
 
 	// admin api

+ 6 - 0
pkg/api/metrics.go

@@ -87,3 +87,9 @@ func GetInternalMetrics(c *middleware.Context) Response {
 		},
 	}
 }
+
+// Genereates a index out of range error
+func GenerateError(c *middleware.Context) Response {
+	var array []string
+	return Json(200, array[20])
+}

+ 5 - 3
pkg/cmd/grafana-server/main.go

@@ -39,7 +39,6 @@ func init() {
 }
 
 func main() {
-
 	v := flag.Bool("v", false, "prints current version and exits")
 	flag.Parse()
 	if *v {
@@ -48,6 +47,9 @@ func main() {
 	}
 
 	buildstampInt64, _ := strconv.ParseInt(buildstamp, 10, 64)
+	if buildstampInt64 == 0 {
+		buildstampInt64 = time.Now().Unix()
+	}
 
 	setting.BuildVersion = version
 	setting.BuildCommit = commit
@@ -85,8 +87,8 @@ func initRuntime() {
 		log.Fatal(3, err.Error())
 	}
 
-	log.Info("Starting Grafana")
-	log.Info("Version: %v, Commit: %v, Build date: %v", setting.BuildVersion, setting.BuildCommit, time.Unix(setting.BuildStamp, 0))
+	logger := log.New("main")
+	logger.Info("Starting Grafana", "version", version, "commit", commit, "compiled", time.Unix(setting.BuildStamp, 0))
 
 	setting.LogConfigurationInfo()
 

+ 11 - 5
pkg/cmd/grafana-server/web.go

@@ -6,6 +6,7 @@ package main
 import (
 	"fmt"
 	"net/http"
+	"os"
 	"path"
 
 	"gopkg.in/macaron.v1"
@@ -18,12 +19,14 @@ import (
 	"github.com/grafana/grafana/pkg/setting"
 )
 
+var logger log.Logger
+
 func newMacaron() *macaron.Macaron {
 	macaron.Env = setting.Env
 	m := macaron.New()
 
 	m.Use(middleware.Logger())
-	m.Use(macaron.Recovery())
+	m.Use(middleware.Recovery())
 
 	if setting.EnableGzip {
 		m.Use(middleware.Gziper())
@@ -31,7 +34,7 @@ func newMacaron() *macaron.Macaron {
 
 	for _, route := range plugins.StaticRoutes {
 		pluginRoute := path.Join("/public/plugins/", route.PluginId)
-		log.Debug("Plugins: Adding route %s -> %s", pluginRoute, route.Directory)
+		logger.Debug("Plugins: Adding route", "route", pluginRoute, "dir", route.Directory)
 		mapStatic(m, route.Directory, "", pluginRoute)
 	}
 
@@ -76,23 +79,26 @@ func mapStatic(m *macaron.Macaron, rootDir string, dir string, prefix string) {
 }
 
 func StartServer() {
+	logger = log.New("server")
 
 	var err error
 	m := newMacaron()
 	api.Register(m)
 
 	listenAddr := fmt.Sprintf("%s:%s", setting.HttpAddr, setting.HttpPort)
-	log.Info("Listen: %v://%s%s", setting.Protocol, listenAddr, setting.AppSubUrl)
+	logger.Info("Server Listening", "address", listenAddr, "protocol", setting.Protocol, "subUrl", setting.AppSubUrl)
 	switch setting.Protocol {
 	case setting.HTTP:
 		err = http.ListenAndServe(listenAddr, m)
 	case setting.HTTPS:
 		err = http.ListenAndServeTLS(listenAddr, setting.CertFile, setting.KeyFile, m)
 	default:
-		log.Fatal(4, "Invalid protocol: %s", setting.Protocol)
+		logger.Error("Invalid protocol", "protocol", setting.Protocol)
+		os.Exit(1)
 	}
 
 	if err != nil {
-		log.Fatal(4, "Fail to start server: %v", err)
+		logger.Error("Fail to start server", "error", err)
+		os.Exit(1)
 	}
 }

+ 0 - 157
pkg/log/console.go

@@ -1,157 +0,0 @@
-// Copyright 2014 The Gogs Authors. All rights reserved.
-// Use of this source code is governed by a MIT-style
-// license that can be found in the LICENSE file.
-
-package log
-
-import (
-	"encoding/json"
-	"fmt"
-	"log"
-	"os"
-	"runtime"
-)
-
-type Brush func(string) string
-
-func NewBrush(color string) Brush {
-	pre := "\033["
-	reset := "\033[0m"
-	return func(text string) string {
-		return pre + color + "m" + text + reset
-	}
-}
-
-var (
-	Red    = NewBrush("1;31")
-	Purple = NewBrush("1;35")
-	Yellow = NewBrush("1;33")
-	Green  = NewBrush("1;32")
-	Blue   = NewBrush("1;34")
-	Cyan   = NewBrush("1;36")
-
-	colors = []Brush{
-		Cyan,   // Trace      cyan
-		Blue,   // Debug      blue
-		Green,  // Info       green
-		Yellow, // Warn       yellow
-		Red,    // Error      red
-		Purple, // Critical   purple
-		Red,    // Fatal      red
-	}
-	consoleWriter = &ConsoleWriter{lg: log.New(os.Stdout, "", 0),
-		Level: TRACE}
-)
-
-// ConsoleWriter implements LoggerInterface and writes messages to terminal.
-type ConsoleWriter struct {
-	lg         *log.Logger
-	Level      LogLevel `json:"level"`
-	Formatting bool     `json:"formatting"`
-}
-
-// create ConsoleWriter returning as LoggerInterface.
-func NewConsole() LoggerInterface {
-	return &ConsoleWriter{
-		lg:         log.New(os.Stderr, "", log.Ldate|log.Ltime),
-		Level:      TRACE,
-		Formatting: true,
-	}
-}
-
-func (cw *ConsoleWriter) Init(config string) error {
-	return json.Unmarshal([]byte(config), cw)
-}
-
-func (cw *ConsoleWriter) WriteMsg(msg string, skip int, level LogLevel) error {
-	if cw.Level > level {
-		return nil
-	}
-	if runtime.GOOS == "windows" || !cw.Formatting {
-		cw.lg.Println(msg)
-	} else {
-		cw.lg.Println(colors[level](msg))
-	}
-	return nil
-}
-
-func (_ *ConsoleWriter) Flush() {
-
-}
-
-func (_ *ConsoleWriter) Destroy() {
-}
-
-func printConsole(level LogLevel, msg string) {
-	consoleWriter.WriteMsg(msg, 0, level)
-}
-
-func printfConsole(level LogLevel, format string, v ...interface{}) {
-	consoleWriter.WriteMsg(fmt.Sprintf(format, v...), 0, level)
-}
-
-// ConsoleTrace prints to stdout using TRACE colors
-func ConsoleTrace(s string) {
-	printConsole(TRACE, s)
-}
-
-// ConsoleTracef prints a formatted string to stdout using TRACE colors
-func ConsoleTracef(format string, v ...interface{}) {
-	printfConsole(TRACE, format, v...)
-}
-
-// ConsoleDebug prints to stdout using DEBUG colors
-func ConsoleDebug(s string) {
-	printConsole(DEBUG, s)
-}
-
-// ConsoleDebugf prints a formatted string to stdout using DEBUG colors
-func ConsoleDebugf(format string, v ...interface{}) {
-	printfConsole(DEBUG, format, v...)
-}
-
-// ConsoleInfo prints to stdout using INFO colors
-func ConsoleInfo(s string) {
-	printConsole(INFO, s)
-}
-
-// ConsoleInfof prints a formatted string to stdout using INFO colors
-func ConsoleInfof(format string, v ...interface{}) {
-	printfConsole(INFO, format, v...)
-}
-
-// ConsoleWarn prints to stdout using WARN colors
-func ConsoleWarn(s string) {
-	printConsole(WARN, s)
-}
-
-// ConsoleWarnf prints a formatted string to stdout using WARN colors
-func ConsoleWarnf(format string, v ...interface{}) {
-	printfConsole(WARN, format, v...)
-}
-
-// ConsoleError prints to stdout using ERROR colors
-func ConsoleError(s string) {
-	printConsole(ERROR, s)
-}
-
-// ConsoleErrorf prints a formatted string to stdout using ERROR colors
-func ConsoleErrorf(format string, v ...interface{}) {
-	printfConsole(ERROR, format, v...)
-}
-
-// ConsoleFatal prints to stdout using FATAL colors
-func ConsoleFatal(s string) {
-	printConsole(FATAL, s)
-	os.Exit(1)
-}
-
-// ConsoleFatalf prints a formatted string to stdout using FATAL colors
-func ConsoleFatalf(format string, v ...interface{}) {
-	printfConsole(FATAL, format, v...)
-	os.Exit(1)
-}
-
-func init() {
-	Register("console", NewConsole)
-}

+ 21 - 48
pkg/log/file.go

@@ -5,43 +5,39 @@
 package log
 
 import (
-	"encoding/json"
 	"errors"
 	"fmt"
 	"io/ioutil"
-	"log"
 	"os"
 	"path/filepath"
 	"strings"
 	"sync"
 	"time"
+
+	"github.com/inconshreveable/log15"
 )
 
 // FileLogWriter implements LoggerInterface.
 // It writes messages by lines limit, file size limit, or time frequency.
 type FileLogWriter struct {
-	*log.Logger
 	mw *MuxWriter
-	// The opened file
-	Filename string `json:"filename"`
 
-	Maxlines          int `json:"maxlines"`
+	Format            log15.Format
+	Filename          string
+	Maxlines          int
 	maxlines_curlines int
 
 	// Rotate at size
-	Maxsize         int `json:"maxsize"`
+	Maxsize         int
 	maxsize_cursize int
 
 	// Rotate daily
-	Daily          bool  `json:"daily"`
-	Maxdays        int64 `json:"maxdays"`
+	Daily          bool
+	Maxdays        int64
 	daily_opendate int
 
-	Rotate bool `json:"rotate"`
-
-	startLock sync.Mutex // Only one log can write to the file
-
-	Level LogLevel `json:"level"`
+	Rotate    bool
+	startLock sync.Mutex
 }
 
 // an *os.File writer with locker.
@@ -66,37 +62,29 @@ func (l *MuxWriter) SetFd(fd *os.File) {
 }
 
 // create a FileLogWriter returning as LoggerInterface.
-func NewFileWriter() LoggerInterface {
+func NewFileWriter() *FileLogWriter {
 	w := &FileLogWriter{
 		Filename: "",
+		Format:   log15.LogfmtFormat(),
 		Maxlines: 1000000,
 		Maxsize:  1 << 28, //256 MB
 		Daily:    true,
 		Maxdays:  7,
 		Rotate:   true,
-		Level:    TRACE,
 	}
 	// use MuxWriter instead direct use os.File for lock write when rotate
 	w.mw = new(MuxWriter)
-	// set MuxWriter as Logger's io.Writer
-	w.Logger = log.New(w.mw, "", log.Ldate|log.Ltime)
 	return w
 }
 
-// Init file logger with json config.
-// config like:
-//	{
-//	"filename":"log/gogs.log",
-//	"maxlines":10000,
-//	"maxsize":1<<30,
-//	"daily":true,
-//	"maxdays":15,
-//	"rotate":true
-//	}
-func (w *FileLogWriter) Init(config string) error {
-	if err := json.Unmarshal([]byte(config), w); err != nil {
-		return err
-	}
+func (w *FileLogWriter) Log(r *log15.Record) error {
+	data := w.Format.Format(r)
+	w.docheck(len(data))
+	_, err := w.mw.Write(data)
+	return err
+}
+
+func (w *FileLogWriter) Init() error {
 	if len(w.Filename) == 0 {
 		return errors.New("config must have filename")
 	}
@@ -131,17 +119,6 @@ func (w *FileLogWriter) docheck(size int) {
 	w.maxsize_cursize += size
 }
 
-// write logger message into file.
-func (w *FileLogWriter) WriteMsg(msg string, skip int, level LogLevel) error {
-	if level < w.Level {
-		return nil
-	}
-	n := 24 + len(msg) // 24 stand for the length "2013/06/23 21:00:22 [T] "
-	w.docheck(n)
-	w.Logger.Println(msg)
-	return nil
-}
-
 func (w *FileLogWriter) createLogFile() (*os.File, error) {
 	// Open the log file
 	return os.OpenFile(w.Filename, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644)
@@ -227,7 +204,7 @@ func (w *FileLogWriter) deleteOldLog() {
 }
 
 // destroy file logger, close file writer.
-func (w *FileLogWriter) Destroy() {
+func (w *FileLogWriter) Close() {
 	w.mw.fd.Close()
 }
 
@@ -237,7 +214,3 @@ func (w *FileLogWriter) Destroy() {
 func (w *FileLogWriter) Flush() {
 	w.mw.fd.Sync()
 }
-
-func init() {
-	Register("file", NewFileWriter)
-}

+ 5 - 0
pkg/log/handlers.go

@@ -0,0 +1,5 @@
+package log
+
+type DisposableHandler interface {
+	Close()
+}

+ 31 - 0
pkg/log/interface.go

@@ -0,0 +1,31 @@
+package log
+
+import "github.com/inconshreveable/log15"
+
+type Lvl int
+
+const (
+	LvlCrit Lvl = iota
+	LvlError
+	LvlWarn
+	LvlInfo
+	LvlDebug
+)
+
+type Logger interface {
+	// New returns a new Logger that has this logger's context plus the given context
+	New(ctx ...interface{}) log15.Logger
+
+	// GetHandler gets the handler associated with the logger.
+	GetHandler() log15.Handler
+
+	// SetHandler updates the logger to write records to the specified handler.
+	SetHandler(h log15.Handler)
+
+	// Log a message at the given level with context key/value pairs
+	Debug(msg string, ctx ...interface{})
+	Info(msg string, ctx ...interface{})
+	Warn(msg string, ctx ...interface{})
+	Error(msg string, ctx ...interface{})
+	Crit(msg string, ctx ...interface{})
+}

+ 132 - 255
pkg/log/log.go

@@ -8,324 +8,201 @@ import (
 	"fmt"
 	"os"
 	"path/filepath"
-	"runtime"
 	"strings"
-	"sync"
-)
 
-var (
-	loggers []*Logger
+	"gopkg.in/ini.v1"
+
+	"github.com/inconshreveable/log15"
 )
 
-func NewLogger(bufLen int64, mode, config string) {
-	logger := newLogger(bufLen)
+var Root log15.Logger
+var loggersToClose []DisposableHandler
 
-	isExist := false
-	for _, l := range loggers {
-		if l.adapter == mode {
-			isExist = true
-			l = logger
-		}
-	}
-	if !isExist {
-		loggers = append(loggers, logger)
-	}
-	if err := logger.SetLogger(mode, config); err != nil {
-		Fatal(1, "Fail to set logger(%s): %v", mode, err)
-	}
+func init() {
+	loggersToClose = make([]DisposableHandler, 0)
+	Root = log15.Root()
 }
 
-// this helps you work around the performance annoyance mentioned in
-// https://github.com/grafana/grafana/issues/4055
-// until we refactor this library completely
-func Level(level LogLevel) {
-	for i := range loggers {
-		loggers[i].level = level
-	}
+func New(logger string, ctx ...interface{}) Logger {
+	params := append([]interface{}{"logger", logger}, ctx...)
+	return Root.New(params...)
 }
 
 func Trace(format string, v ...interface{}) {
-	for _, logger := range loggers {
-		logger.Trace(format, v...)
-	}
+	Root.Debug(fmt.Sprintf(format, v))
 }
 
 func Debug(format string, v ...interface{}) {
-	for _, logger := range loggers {
-		logger.Debug(format, v...)
-	}
+	Root.Debug(fmt.Sprintf(format, v))
+}
+
+func Debug2(message string, v ...interface{}) {
+	Root.Debug(message, v...)
 }
 
 func Info(format string, v ...interface{}) {
-	for _, logger := range loggers {
-		logger.Info(format, v...)
-	}
+	Root.Info(fmt.Sprintf(format, v))
+}
+
+func Info2(message string, v ...interface{}) {
+	Root.Info(message, v...)
 }
 
 func Warn(format string, v ...interface{}) {
-	for _, logger := range loggers {
-		logger.Warn(format, v...)
-	}
+	Root.Warn(fmt.Sprintf(format, v))
+}
+
+func Warn2(message string, v ...interface{}) {
+	Root.Warn(message, v...)
 }
 
 func Error(skip int, format string, v ...interface{}) {
-	for _, logger := range loggers {
-		logger.Error(skip, format, v...)
-	}
+	Root.Error(fmt.Sprintf(format, v))
+}
+
+func Error2(message string, v ...interface{}) {
+	Root.Error(message, v...)
 }
 
 func Critical(skip int, format string, v ...interface{}) {
-	for _, logger := range loggers {
-		logger.Critical(skip, format, v...)
-	}
+	Root.Crit(fmt.Sprintf(format, v))
 }
 
 func Fatal(skip int, format string, v ...interface{}) {
-	Error(skip, format, v...)
-	for _, l := range loggers {
-		l.Close()
-	}
+	Root.Crit(fmt.Sprintf(format, v))
+	Close()
 	os.Exit(1)
 }
 
 func Close() {
-	for _, l := range loggers {
-		l.Close()
-		// delete the logger.
-		l = nil
+	for _, logger := range loggersToClose {
+		logger.Close()
 	}
-	// clear the loggers slice.
-	loggers = nil
+	loggersToClose = make([]DisposableHandler, 0)
 }
 
-// .___        __                 _____
-// |   | _____/  |_  ____________/ ____\____    ____  ____
-// |   |/    \   __\/ __ \_  __ \   __\\__  \ _/ ___\/ __ \
-// |   |   |  \  | \  ___/|  | \/|  |   / __ \\  \__\  ___/
-// |___|___|  /__|  \___  >__|   |__|  (____  /\___  >___  >
-//          \/          \/                  \/     \/    \/
-
-type LogLevel int
-
-const (
-	TRACE LogLevel = iota
-	DEBUG
-	INFO
-	WARN
-	ERROR
-	CRITICAL
-	FATAL
-)
-
-// LoggerInterface represents behaviors of a logger provider.
-type LoggerInterface interface {
-	Init(config string) error
-	WriteMsg(msg string, skip int, level LogLevel) error
-	Destroy()
-	Flush()
+var logLevels = map[string]log15.Lvl{
+	"Trace":    log15.LvlDebug,
+	"Debug":    log15.LvlDebug,
+	"Info":     log15.LvlInfo,
+	"Warn":     log15.LvlWarn,
+	"Error":    log15.LvlError,
+	"Critical": log15.LvlCrit,
 }
 
-type loggerType func() LoggerInterface
+func getLogLevelFromConfig(key string, defaultName string, cfg *ini.File) (string, log15.Lvl) {
+	levelName := cfg.Section(key).Key("level").In(defaultName, []string{"Trace", "Debug", "Info", "Warn", "Error", "Critical"})
+	level := getLogLevelFromString(levelName)
+	return levelName, level
+}
 
-var adapters = make(map[string]loggerType)
+func getLogLevelFromString(levelName string) log15.Lvl {
+	level, ok := logLevels[levelName]
 
-// Register registers given logger provider to adapters.
-func Register(name string, log loggerType) {
-	if log == nil {
-		panic("log: register provider is nil")
+	if !ok {
+		Root.Error("Unknown log level", "level", levelName)
+		return log15.LvlError
 	}
-	if _, dup := adapters[name]; dup {
-		panic("log: register called twice for provider \"" + name + "\"")
-	}
-	adapters[name] = log
-}
 
-type logMsg struct {
-	skip  int
-	level LogLevel
-	msg   string
+	return level
 }
 
-// Logger is default logger in beego application.
-// it can contain several providers and log message into all providers.
-type Logger struct {
-	adapter string
-	lock    sync.Mutex
-	level   LogLevel
-	msg     chan *logMsg
-	outputs map[string]LoggerInterface
-	quit    chan bool
-}
+func getFilters(filterStrArray []string) map[string]log15.Lvl {
+	filterMap := make(map[string]log15.Lvl)
 
-// newLogger initializes and returns a new logger.
-func newLogger(buffer int64) *Logger {
-	l := &Logger{
-		msg:     make(chan *logMsg, buffer),
-		outputs: make(map[string]LoggerInterface),
-		quit:    make(chan bool),
+	for _, filterStr := range filterStrArray {
+		parts := strings.Split(filterStr, ":")
+		filterMap[parts[0]] = getLogLevelFromString(parts[1])
 	}
-	go l.StartLogger()
-	return l
-}
 
-// SetLogger sets new logger instanse with given logger adapter and config.
-func (l *Logger) SetLogger(adapter string, config string) error {
-	l.lock.Lock()
-	defer l.lock.Unlock()
-	if log, ok := adapters[adapter]; ok {
-		lg := log()
-		if err := lg.Init(config); err != nil {
-			return err
-		}
-		l.outputs[adapter] = lg
-		l.adapter = adapter
-	} else {
-		panic("log: unknown adapter \"" + adapter + "\" (forgotten register?)")
-	}
-	return nil
+	return filterMap
 }
 
-// DelLogger removes a logger adapter instance.
-func (l *Logger) DelLogger(adapter string) error {
-	l.lock.Lock()
-	defer l.lock.Unlock()
-	if lg, ok := l.outputs[adapter]; ok {
-		lg.Destroy()
-		delete(l.outputs, adapter)
-	} else {
-		panic("log: unknown adapter \"" + adapter + "\" (forgotten register?)")
-	}
-	return nil
-}
+func ReadLoggingConfig(modes []string, logsPath string, cfg *ini.File) {
+	Close()
 
-func (l *Logger) writerMsg(skip int, level LogLevel, msg string) error {
-	lm := &logMsg{
-		skip:  skip,
-		level: level,
-	}
+	defaultLevelName, _ := getLogLevelFromConfig("log", "Info", cfg)
+	defaultFilters := getFilters(cfg.Section("log").Key("filters").Strings(" "))
 
-	// Only error information needs locate position for debugging.
-	if lm.level >= ERROR {
-		pc, file, line, ok := runtime.Caller(skip)
-		if ok {
-			// Get caller function name.
-			fn := runtime.FuncForPC(pc)
-			var fnName string
-			if fn == nil {
-				fnName = "?()"
-			} else {
-				fnName = strings.TrimLeft(filepath.Ext(fn.Name()), ".") + "()"
-			}
+	handlers := make([]log15.Handler, 0)
 
-			lm.msg = fmt.Sprintf("[%s:%d %s] %s", filepath.Base(file), line, fnName, msg)
-		} else {
-			lm.msg = msg
+	for _, mode := range modes {
+		mode = strings.TrimSpace(mode)
+		sec, err := cfg.GetSection("log." + mode)
+		if err != nil {
+			Root.Error("Unknown log mode", "mode", mode)
 		}
-	} else {
-		lm.msg = msg
-	}
-	l.msg <- lm
-	return nil
-}
 
-// StartLogger starts logger chan reading.
-func (l *Logger) StartLogger() {
-	for {
-		select {
-		case bm := <-l.msg:
-			for _, l := range l.outputs {
-				if err := l.WriteMsg(bm.msg, bm.skip, bm.level); err != nil {
-					fmt.Println("ERROR, unable to WriteMsg:", err)
-				}
+		// Log level.
+		_, level := getLogLevelFromConfig("log."+mode, defaultLevelName, cfg)
+		modeFilters := getFilters(sec.Key("filters").Strings(" "))
+
+		var handler log15.Handler
+
+		// Generate log configuration.
+		switch mode {
+		case "console":
+			handler = log15.StdoutHandler
+		case "file":
+			fileName := sec.Key("file_name").MustString(filepath.Join(logsPath, "grafana.log"))
+			os.MkdirAll(filepath.Dir(fileName), os.ModePerm)
+			fileHandler := NewFileWriter()
+			fileHandler.Filename = fileName
+			fileHandler.Rotate = sec.Key("log_rotate").MustBool(true)
+			fileHandler.Maxlines = sec.Key("max_lines").MustInt(1000000)
+			fileHandler.Maxsize = 1 << uint(sec.Key("max_size_shift").MustInt(28))
+			fileHandler.Daily = sec.Key("daily_rotate").MustBool(true)
+			fileHandler.Maxdays = sec.Key("max_days").MustInt64(7)
+			fileHandler.Init()
+
+			loggersToClose = append(loggersToClose, fileHandler)
+			handler = fileHandler
+		case "syslog":
+			sysLogHandler := NewSyslog()
+			sysLogHandler.Network = sec.Key("network").MustString("")
+			sysLogHandler.Address = sec.Key("address").MustString("")
+			sysLogHandler.Facility = sec.Key("facility").MustString("local7")
+			sysLogHandler.Tag = sec.Key("tag").MustString("")
+
+			if err := sysLogHandler.Init(); err != nil {
+				Root.Error("Failed to init syslog log handler", "error", err)
+				os.Exit(1)
 			}
-		case <-l.quit:
-			return
-		}
-	}
-}
 
-// Flush flushs all chan data.
-func (l *Logger) Flush() {
-	for _, l := range l.outputs {
-		l.Flush()
-	}
-}
+			loggersToClose = append(loggersToClose, sysLogHandler)
+			handler = sysLogHandler
+		}
 
-// Close closes logger, flush all chan data and destroy all adapter instances.
-func (l *Logger) Close() {
-	l.quit <- true
-	for {
-		if len(l.msg) > 0 {
-			bm := <-l.msg
-			for _, l := range l.outputs {
-				if err := l.WriteMsg(bm.msg, bm.skip, bm.level); err != nil {
-					fmt.Println("ERROR, unable to WriteMsg:", err)
-				}
+		for key, value := range defaultFilters {
+			if _, exist := modeFilters[key]; !exist {
+				modeFilters[key] = value
 			}
-		} else {
-			break
 		}
-	}
-	for _, l := range l.outputs {
-		l.Flush()
-		l.Destroy()
-	}
-}
-
-func (l *Logger) Trace(format string, v ...interface{}) {
-	if l.level > TRACE {
-		return
-	}
-	msg := fmt.Sprintf("[T] "+format, v...)
-	l.writerMsg(0, TRACE, msg)
-}
-
-func (l *Logger) Debug(format string, v ...interface{}) {
-	if l.level > DEBUG {
-		return
-	}
-	msg := fmt.Sprintf("[D] "+format, v...)
-	l.writerMsg(0, DEBUG, msg)
-}
 
-func (l *Logger) Info(format string, v ...interface{}) {
-	if l.level > INFO {
-		return
+		handler = LogFilterHandler(level, modeFilters, handler)
+		handlers = append(handlers, handler)
 	}
-	msg := fmt.Sprintf("[I] "+format, v...)
-	l.writerMsg(0, INFO, msg)
-}
 
-func (l *Logger) Warn(format string, v ...interface{}) {
-	if l.level > WARN {
-		return
-	}
-	msg := fmt.Sprintf("[W] "+format, v...)
-	l.writerMsg(0, WARN, msg)
+	Root.SetHandler(log15.MultiHandler(handlers...))
 }
 
-func (l *Logger) Error(skip int, format string, v ...interface{}) {
-	if l.level > ERROR {
-		return
-	}
-	msg := fmt.Sprintf("[E] "+format, v...)
-	l.writerMsg(skip, ERROR, msg)
-}
+func LogFilterHandler(maxLevel log15.Lvl, filters map[string]log15.Lvl, h log15.Handler) log15.Handler {
+	return log15.FilterHandler(func(r *log15.Record) (pass bool) {
 
-func (l *Logger) Critical(skip int, format string, v ...interface{}) {
-	if l.level > CRITICAL {
-		return
-	}
-	msg := fmt.Sprintf("[C] "+format, v...)
-	l.writerMsg(skip, CRITICAL, msg)
-}
+		if len(filters) > 0 {
+			for i := 0; i < len(r.Ctx); i += 2 {
+				key := r.Ctx[i].(string)
+				if key == "logger" {
+					loggerName, strOk := r.Ctx[i+1].(string)
+					if strOk {
+						if filterLevel, ok := filters[loggerName]; ok {
+							return r.Lvl <= filterLevel
+						}
+					}
+				}
+			}
+		}
 
-func (l *Logger) Fatal(skip int, format string, v ...interface{}) {
-	if l.level > FATAL {
-		return
-	}
-	msg := fmt.Sprintf("[F] "+format, v...)
-	l.writerMsg(skip, FATAL, msg)
-	l.Close()
-	os.Exit(1)
+		return r.Lvl <= maxLevel
+	}, h)
 }

+ 23 - 29
pkg/log/syslog.go

@@ -3,28 +3,28 @@
 package log
 
 import (
-	"encoding/json"
 	"errors"
 	"log/syslog"
+
+	"github.com/inconshreveable/log15"
 )
 
-type SyslogWriter struct {
+type SysLogHandler struct {
 	syslog   *syslog.Writer
-	Network  string `json:"network"`
-	Address  string `json:"address"`
-	Facility string `json:"facility"`
-	Tag      string `json:"tag"`
-}
-
-func NewSyslog() LoggerInterface {
-	return new(SyslogWriter)
+	Network  string
+	Address  string
+	Facility string
+	Tag      string
+	Format   log15.Format
 }
 
-func (sw *SyslogWriter) Init(config string) error {
-	if err := json.Unmarshal([]byte(config), sw); err != nil {
-		return err
+func NewSyslog() *SysLogHandler {
+	return &SysLogHandler{
+		Format: log15.LogfmtFormat(),
 	}
+}
 
+func (sw *SysLogHandler) Init() error {
 	prio, err := parseFacility(sw.Facility)
 	if err != nil {
 		return err
@@ -39,22 +39,22 @@ func (sw *SyslogWriter) Init(config string) error {
 	return nil
 }
 
-func (sw *SyslogWriter) WriteMsg(msg string, skip int, level LogLevel) error {
+func (sw *SysLogHandler) Log(r *log15.Record) error {
 	var err error
 
-	switch level {
-	case TRACE, DEBUG:
+	msg := string(sw.Format.Format(r))
+
+	switch r.Lvl {
+	case log15.LvlDebug:
 		err = sw.syslog.Debug(msg)
-	case INFO:
+	case log15.LvlInfo:
 		err = sw.syslog.Info(msg)
-	case WARN:
+	case log15.LvlWarn:
 		err = sw.syslog.Warning(msg)
-	case ERROR:
+	case log15.LvlError:
 		err = sw.syslog.Err(msg)
-	case CRITICAL:
+	case log15.LvlCrit:
 		err = sw.syslog.Crit(msg)
-	case FATAL:
-		err = sw.syslog.Alert(msg)
 	default:
 		err = errors.New("invalid syslog level")
 	}
@@ -62,12 +62,10 @@ func (sw *SyslogWriter) WriteMsg(msg string, skip int, level LogLevel) error {
 	return err
 }
 
-func (sw *SyslogWriter) Destroy() {
+func (sw *SysLogHandler) Close() {
 	sw.syslog.Close()
 }
 
-func (sw *SyslogWriter) Flush() {}
-
 var facilities = map[string]syslog.Priority{
 	"user":   syslog.LOG_USER,
 	"daemon": syslog.LOG_DAEMON,
@@ -89,7 +87,3 @@ func parseFacility(facility string) (syslog.Priority, error) {
 
 	return prio, nil
 }
-
-func init() {
-	Register("syslog", NewSyslog)
-}

+ 11 - 5
pkg/login/settings.go

@@ -2,6 +2,7 @@ package login
 
 import (
 	"fmt"
+	"os"
 
 	"github.com/BurntSushi/toml"
 	"github.com/grafana/grafana/pkg/log"
@@ -49,21 +50,24 @@ type LdapGroupToOrgRole struct {
 }
 
 var ldapCfg LdapConfig
+var ldapLogger log.Logger = log.New("ldap")
 
 func loadLdapConfig() {
 	if !setting.LdapEnabled {
 		return
 	}
 
-	log.Info("Login: Ldap enabled, reading config file: %s", setting.LdapConfigFile)
+	ldapLogger.Info("Ldap enabled, reading config file", "file", setting.LdapConfigFile)
 
 	_, err := toml.DecodeFile(setting.LdapConfigFile, &ldapCfg)
 	if err != nil {
-		log.Fatal(3, "Failed to load ldap config file: %s", err)
+		ldapLogger.Crit("Failed to load ldap config file", "error", err)
+		os.Exit(1)
 	}
 
 	if len(ldapCfg.Servers) == 0 {
-		log.Fatal(3, "ldap enabled but no ldap servers defined in config file: %s", setting.LdapConfigFile)
+		ldapLogger.Crit("ldap enabled but no ldap servers defined in config file")
+		os.Exit(1)
 	}
 
 	// set default org id
@@ -83,11 +87,13 @@ func assertNotEmptyCfg(val interface{}, propName string) {
 	switch v := val.(type) {
 	case string:
 		if v == "" {
-			log.Fatal(3, "LDAP config file is missing option: %s", propName)
+			ldapLogger.Crit("LDAP config file is missing option", "option", propName)
+			os.Exit(1)
 		}
 	case []string:
 		if len(v) == 0 {
-			log.Fatal(3, "LDAP config file is missing option: %s", propName)
+			ldapLogger.Crit("LDAP config file is missing option", "option", propName)
+			os.Exit(1)
 		}
 	default:
 		fmt.Println("unknown")

+ 5 - 3
pkg/metrics/publish.go

@@ -14,6 +14,8 @@ import (
 	"github.com/grafana/grafana/pkg/setting"
 )
 
+var metricsLogger log.Logger = log.New("metrics")
+
 func Init() {
 	settings := readSettings()
 	initMetricVars(settings)
@@ -54,7 +56,7 @@ func sendUsageStats() {
 		return
 	}
 
-	log.Trace("Sending anonymous usage stats to stats.grafana.org")
+	metricsLogger.Debug("Sending anonymous usage stats to stats.grafana.org")
 
 	version := strings.Replace(setting.BuildVersion, ".", "_", -1)
 
@@ -66,7 +68,7 @@ func sendUsageStats() {
 
 	statsQuery := m.GetSystemStatsQuery{}
 	if err := bus.Dispatch(&statsQuery); err != nil {
-		log.Error(3, "Failed to get system stats", err)
+		metricsLogger.Error("Failed to get system stats", "error", err)
 		return
 	}
 
@@ -80,7 +82,7 @@ func sendUsageStats() {
 
 	dsStats := m.GetDataSourceStatsQuery{}
 	if err := bus.Dispatch(&dsStats); err != nil {
-		log.Error(3, "Failed to get datasource stats", err)
+		metricsLogger.Error("Failed to get datasource stats", "error", err)
 		return
 	}
 

+ 4 - 7
pkg/metrics/settings.go

@@ -1,9 +1,6 @@
 package metrics
 
-import (
-	"github.com/grafana/grafana/pkg/log"
-	"github.com/grafana/grafana/pkg/setting"
-)
+import "github.com/grafana/grafana/pkg/setting"
 
 type MetricPublisher interface {
 	Publish(metrics []Metric)
@@ -24,7 +21,7 @@ func readSettings() *MetricSettings {
 
 	var section, err = setting.Cfg.GetSection("metrics")
 	if err != nil {
-		log.Fatal(3, "Unable to find metrics config section")
+		metricsLogger.Crit("Unable to find metrics config section", "error", err)
 		return nil
 	}
 
@@ -36,9 +33,9 @@ func readSettings() *MetricSettings {
 	}
 
 	if graphitePublisher, err := CreateGraphitePublisher(); err != nil {
-		log.Error(3, "Metrics: Failed to init Graphite metric publisher", err)
+		metricsLogger.Error("Failed to init Graphite metric publisher", "error", err)
 	} else if graphitePublisher != nil {
-		log.Info("Metrics: Graphite publisher initialized")
+		metricsLogger.Info("Metrics publisher initialized", "type", "graphite")
 		settings.Publishers = append(settings.Publishers, graphitePublisher)
 	}
 

+ 6 - 16
pkg/middleware/logger.go

@@ -16,11 +16,9 @@
 package middleware
 
 import (
-	"fmt"
 	"net/http"
 	"time"
 
-	"github.com/grafana/grafana/pkg/log"
 	"github.com/grafana/grafana/pkg/metrics"
 	"github.com/grafana/grafana/pkg/setting"
 	"gopkg.in/macaron.v1"
@@ -31,34 +29,26 @@ func Logger() macaron.Handler {
 		start := time.Now()
 		c.Data["perfmon.start"] = start
 
-		uname := c.GetCookie(setting.CookieUserName)
-		if len(uname) == 0 {
-			uname = "-"
-		}
-
 		rw := res.(macaron.ResponseWriter)
 		c.Next()
 
 		timeTakenMs := time.Since(start) / time.Millisecond
-		content := fmt.Sprintf("Completed %s %s \"%s %s %s\" %v %s %d bytes in %dms", c.RemoteAddr(), uname, req.Method, req.URL.Path, req.Proto, rw.Status(), http.StatusText(rw.Status()), rw.Size(), timeTakenMs)
 
 		if timer, ok := c.Data["perfmon.timer"]; ok {
 			timerTyped := timer.(metrics.Timer)
 			timerTyped.Update(timeTakenMs)
 		}
 
-		switch rw.Status() {
-		case 200, 304:
-			content = fmt.Sprintf("%s", content)
+		status := rw.Status()
+		if status == 200 || status == 304 {
 			if !setting.RouterLogging {
 				return
 			}
-		case 404:
-			content = fmt.Sprintf("%s", content)
-		case 500:
-			content = fmt.Sprintf("%s", content)
 		}
 
-		log.Info(content)
+		if ctx, ok := c.Data["ctx"]; ok {
+			ctxTyped := ctx.(*Context)
+			ctxTyped.Logger.Info("Request Completed", "method", req.Method, "path", req.URL.Path, "status", status, "remote_addr", c.RemoteAddr(), "time_ns", timeTakenMs, "size", rw.Size())
+		}
 	}
 }

+ 5 - 0
pkg/middleware/middleware.go

@@ -23,6 +23,7 @@ type Context struct {
 
 	IsSignedIn     bool
 	AllowAnonymous bool
+	Logger         log.Logger
 }
 
 func GetContextHandler() macaron.Handler {
@@ -33,6 +34,7 @@ func GetContextHandler() macaron.Handler {
 			Session:        GetSession(),
 			IsSignedIn:     false,
 			AllowAnonymous: false,
+			Logger:         log.New("context"),
 		}
 
 		// the order in which these are tested are important
@@ -48,6 +50,9 @@ func GetContextHandler() macaron.Handler {
 			initContextWithAnonymousUser(ctx) {
 		}
 
+		ctx.Logger = log.New("context", "userId", ctx.UserId, "orgId", ctx.OrgId, "uname", ctx.Login)
+		ctx.Data["ctx"] = ctx
+
 		c.Map(ctx)
 	}
 }

+ 174 - 0
pkg/middleware/recovery.go

@@ -0,0 +1,174 @@
+// Copyright 2013 Martini Authors
+// Copyright 2014 The Macaron Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package middleware
+
+import (
+	"bytes"
+	"fmt"
+	"io/ioutil"
+	"net/http"
+	"runtime"
+
+	"gopkg.in/macaron.v1"
+
+	"github.com/go-macaron/inject"
+	"github.com/grafana/grafana/pkg/log"
+	"github.com/grafana/grafana/pkg/setting"
+)
+
+const (
+	panicHtml = `<html>
+<head><title>PANIC: %s</title>
+<meta charset="utf-8" />
+<style type="text/css">
+html, body {
+	font-family: "Roboto", sans-serif;
+	color: #333333;
+	background-color: #ea5343;
+	margin: 0px;
+}
+h1 {
+	color: #d04526;
+	background-color: #ffffff;
+	padding: 20px;
+	border-bottom: 1px dashed #2b3848;
+}
+pre {
+	margin: 20px;
+	padding: 20px;
+	border: 2px solid #2b3848;
+	background-color: #ffffff;
+	white-space: pre-wrap;       /* css-3 */
+	white-space: -moz-pre-wrap;  /* Mozilla, since 1999 */
+	white-space: -pre-wrap;      /* Opera 4-6 */
+	white-space: -o-pre-wrap;    /* Opera 7 */
+	word-wrap: break-word;       /* Internet Explorer 5.5+ */
+}
+</style>
+</head><body>
+<h1>PANIC</h1>
+<pre style="font-weight: bold;">%s</pre>
+<pre>%s</pre>
+</body>
+</html>`
+)
+
+var (
+	dunno     = []byte("???")
+	centerDot = []byte("·")
+	dot       = []byte(".")
+	slash     = []byte("/")
+)
+
+// stack returns a nicely formated stack frame, skipping skip frames
+func stack(skip int) []byte {
+	buf := new(bytes.Buffer) // the returned data
+	// As we loop, we open files and read them. These variables record the currently
+	// loaded file.
+	var lines [][]byte
+	var lastFile string
+	for i := skip; ; i++ { // Skip the expected number of frames
+		pc, file, line, ok := runtime.Caller(i)
+		if !ok {
+			break
+		}
+		// Print this much at least.  If we can't find the source, it won't show.
+		fmt.Fprintf(buf, "%s:%d (0x%x)\n", file, line, pc)
+		if file != lastFile {
+			data, err := ioutil.ReadFile(file)
+			if err != nil {
+				continue
+			}
+			lines = bytes.Split(data, []byte{'\n'})
+			lastFile = file
+		}
+		fmt.Fprintf(buf, "\t%s: %s\n", function(pc), source(lines, line))
+	}
+	return buf.Bytes()
+}
+
+// source returns a space-trimmed slice of the n'th line.
+func source(lines [][]byte, n int) []byte {
+	n-- // in stack trace, lines are 1-indexed but our array is 0-indexed
+	if n < 0 || n >= len(lines) {
+		return dunno
+	}
+	return bytes.TrimSpace(lines[n])
+}
+
+// function returns, if possible, the name of the function containing the PC.
+func function(pc uintptr) []byte {
+	fn := runtime.FuncForPC(pc)
+	if fn == nil {
+		return dunno
+	}
+	name := []byte(fn.Name())
+	// The name includes the path name to the package, which is unnecessary
+	// since the file name is already included.  Plus, it has center dots.
+	// That is, we see
+	//	runtime/debug.*T·ptrmethod
+	// and want
+	//	*T.ptrmethod
+	// Also the package path might contains dot (e.g. code.google.com/...),
+	// so first eliminate the path prefix
+	if lastslash := bytes.LastIndex(name, slash); lastslash >= 0 {
+		name = name[lastslash+1:]
+	}
+	if period := bytes.Index(name, dot); period >= 0 {
+		name = name[period+1:]
+	}
+	name = bytes.Replace(name, centerDot, dot, -1)
+	return name
+}
+
+// Recovery returns a middleware that recovers from any panics and writes a 500 if there was one.
+// While Martini is in development mode, Recovery will also output the panic as HTML.
+func Recovery() macaron.Handler {
+	return func(c *macaron.Context) {
+		defer func() {
+			if err := recover(); err != nil {
+				stack := stack(3)
+
+				panicLogger := log.Root
+				// try to get request logger
+				if ctx, ok := c.Data["ctx"]; ok {
+					ctxTyped := ctx.(*Context)
+					panicLogger = ctxTyped.Logger
+				}
+
+				panicLogger.Error("Request error", "error", err, "stack", string(stack))
+
+				// Lookup the current responsewriter
+				val := c.GetVal(inject.InterfaceOf((*http.ResponseWriter)(nil)))
+				res := val.Interface().(http.ResponseWriter)
+
+				// respond with panic message while in development mode
+				var body []byte
+				if setting.Env == setting.DEV {
+					res.Header().Set("Content-Type", "text/html")
+					body = []byte(fmt.Sprintf(panicHtml, err, err, stack))
+				}
+
+				res.WriteHeader(http.StatusInternalServerError)
+				if nil != body {
+					res.Write(body)
+				}
+			}
+		}()
+
+		c.Next()
+	}
+}

+ 1 - 2
pkg/plugins/models.go

@@ -6,7 +6,6 @@ import (
 	"fmt"
 	"strings"
 
-	"github.com/grafana/grafana/pkg/log"
 	m "github.com/grafana/grafana/pkg/models"
 	"github.com/grafana/grafana/pkg/setting"
 )
@@ -58,7 +57,7 @@ func (pb *PluginBase) registerPlugin(pluginDir string) error {
 	}
 
 	if !strings.HasPrefix(pluginDir, setting.StaticRootPath) {
-		log.Info("Plugins: Registering plugin %v", pb.Name)
+		plog.Info("Registering plugin", "name", pb.Name)
 	}
 
 	if len(pb.Dependencies.Plugins) == 0 {

+ 7 - 4
pkg/plugins/plugins.go

@@ -25,6 +25,7 @@ var (
 
 	GrafanaLatestVersion string
 	GrafanaHasUpdate     bool
+	plog                 log.Logger
 )
 
 type PluginScanner struct {
@@ -33,6 +34,8 @@ type PluginScanner struct {
 }
 
 func Init() error {
+	plog = log.New("plugins")
+
 	DataSources = make(map[string]*DataSourcePlugin)
 	StaticRoutes = make([]*PluginStaticRoute, 0)
 	Panels = make(map[string]*PanelPlugin)
@@ -44,16 +47,16 @@ func Init() error {
 		"app":        AppPlugin{},
 	}
 
-	log.Info("Plugins: Scan starting")
+	plog.Info("Starting plugin search")
 	scan(path.Join(setting.StaticRootPath, "app/plugins"))
 
 	// check if plugins dir exists
 	if _, err := os.Stat(setting.PluginsPath); os.IsNotExist(err) {
-		log.Warn("Plugins: Plugin dir %v does not exist", setting.PluginsPath)
+		plog.Warn("Plugin dir does not exist", "dir", setting.PluginsPath)
 		if err = os.MkdirAll(setting.PluginsPath, os.ModePerm); err != nil {
-			log.Warn("Plugins: Failed to create plugin dir: %v, error: %v", setting.PluginsPath, err)
+			plog.Warn("Failed to create plugin dir", "dir", setting.PluginsPath, "error", err)
 		} else {
-			log.Info("Plugins: Plugin dir %v created", setting.PluginsPath)
+			plog.Info("Plugin dir created", "dir", setting.PluginsPath)
 			scan(setting.PluginsPath)
 		}
 	} else {

+ 2 - 1
pkg/services/sqlstore/migrations/migrations_test.go

@@ -6,6 +6,7 @@ import (
 	"github.com/go-xorm/xorm"
 	. "github.com/grafana/grafana/pkg/services/sqlstore/migrator"
 	"github.com/grafana/grafana/pkg/services/sqlstore/sqlutil"
+	"github.com/inconshreveable/log15"
 
 	. "github.com/smartystreets/goconvey/convey"
 )
@@ -28,7 +29,7 @@ func TestMigrations(t *testing.T) {
 			sqlutil.CleanDB(x)
 
 			mg := NewMigrator(x)
-			//mg.LogLevel = log.DEBUG
+			mg.Logger.SetHandler(log15.DiscardHandler())
 			AddMigrations(mg)
 
 			err = mg.Start()

+ 9 - 18
pkg/services/sqlstore/migrator/migrator.go

@@ -11,11 +11,10 @@ import (
 )
 
 type Migrator struct {
-	LogLevel log.LogLevel
-
 	x          *xorm.Engine
 	dialect    Dialect
 	migrations []Migration
+	Logger     log.Logger
 }
 
 type MigrationLog struct {
@@ -30,7 +29,7 @@ type MigrationLog struct {
 func NewMigrator(engine *xorm.Engine) *Migrator {
 	mg := &Migrator{}
 	mg.x = engine
-	mg.LogLevel = log.WARN
+	mg.Logger = log.New("migrator")
 	mg.migrations = make([]Migration, 0)
 	mg.dialect = NewDialect(mg.x.DriverName())
 	return mg
@@ -69,9 +68,7 @@ func (mg *Migrator) GetMigrationLog() (map[string]MigrationLog, error) {
 }
 
 func (mg *Migrator) Start() error {
-	if mg.LogLevel <= log.INFO {
-		log.Info("Migrator: Starting DB migration")
-	}
+	mg.Logger.Info("Starting DB migration")
 
 	logMap, err := mg.GetMigrationLog()
 	if err != nil {
@@ -81,9 +78,7 @@ func (mg *Migrator) Start() error {
 	for _, m := range mg.migrations {
 		_, exists := logMap[m.Id()]
 		if exists {
-			if mg.LogLevel <= log.DEBUG {
-				log.Debug("Migrator: Skipping migration: %v, Already executed", m.Id())
-			}
+			mg.Logger.Debug("Skipping migration: Already executed", "id", m.Id())
 			continue
 		}
 
@@ -95,12 +90,10 @@ func (mg *Migrator) Start() error {
 			Timestamp:   time.Now(),
 		}
 
-		if mg.LogLevel <= log.DEBUG {
-			log.Debug("Migrator: Executing SQL: \n %v \n", sql)
-		}
+		mg.Logger.Debug("Executing", "sql", sql)
 
 		if err := mg.exec(m); err != nil {
-			log.Error(3, "Migrator: error: \n%s:\n%s", err, sql)
+			mg.Logger.Error("Exec failed", "error", err, "sql", sql)
 			record.Error = err.Error()
 			mg.x.Insert(&record)
 			return err
@@ -114,9 +107,7 @@ func (mg *Migrator) Start() error {
 }
 
 func (mg *Migrator) exec(m Migration) error {
-	if mg.LogLevel <= log.INFO {
-		log.Info("Migrator: exec migration id: %v", m.Id())
-	}
+	log.Info("Executing migration", "id", m.Id())
 
 	err := mg.inTransaction(func(sess *xorm.Session) error {
 
@@ -125,14 +116,14 @@ func (mg *Migrator) exec(m Migration) error {
 			sql, args := condition.Sql(mg.dialect)
 			results, err := sess.Query(sql, args...)
 			if err != nil || len(results) == 0 {
-				log.Info("Migrator: skipping migration id: %v, condition not fulfilled", m.Id())
+				mg.Logger.Info("Skipping migration condition not fulfilled", "id", m.Id())
 				return sess.Rollback()
 			}
 		}
 
 		_, err := sess.Exec(m.Sql(mg.dialect))
 		if err != nil {
-			log.Error(3, "Migrator: exec FAILED migration id: %v, err: %v", m.Id(), err)
+			mg.Logger.Error("Executing migration failed", "id", m.Id(), "error", err)
 			return err
 		}
 		return nil

+ 7 - 18
pkg/services/sqlstore/sqlstore.go

@@ -40,8 +40,8 @@ var (
 	}
 
 	mysqlConfig MySQLConfig
-
-	UseSQLite3 bool
+	UseSQLite3  bool
+	sqlog       log.Logger = log.New("sqlstore")
 )
 
 func EnsureAdminUser() {
@@ -74,13 +74,15 @@ func NewEngine() {
 	x, err := getEngine()
 
 	if err != nil {
-		log.Fatal(3, "Sqlstore: Fail to connect to database: %v", err)
+		sqlog.Crit("Fail to connect to database", "error", err)
+		os.Exit(1)
 	}
 
 	err = SetEngine(x, setting.Env == setting.DEV)
 
 	if err != nil {
-		log.Fatal(3, "fail to initialize orm engine: %v", err)
+		sqlog.Error("Fail to initialize orm engine", "error", err)
+		os.Exit(1)
 	}
 }
 
@@ -89,24 +91,12 @@ func SetEngine(engine *xorm.Engine, enableLog bool) (err error) {
 	dialect = migrator.NewDialect(x.DriverName())
 
 	migrator := migrator.NewMigrator(x)
-	migrator.LogLevel = log.INFO
 	migrations.AddMigrations(migrator)
 
 	if err := migrator.Start(); err != nil {
 		return fmt.Errorf("Sqlstore::Migration failed err: %v\n", err)
 	}
 
-	if enableLog {
-		logPath := path.Join(setting.LogsPath, "xorm.log")
-		os.MkdirAll(path.Dir(logPath), os.ModePerm)
-
-		f, err := os.Create(logPath)
-		if err != nil {
-			return fmt.Errorf("sqlstore.init(fail to create xorm.log): %v", err)
-		}
-		x.Logger = xorm.NewSimpleLogger(f)
-	}
-
 	return nil
 }
 
@@ -158,8 +148,7 @@ func getEngine() (*xorm.Engine, error) {
 		return nil, fmt.Errorf("Unknown database type: %s", DbCfg.Type)
 	}
 
-	log.Info("Database: %v", DbCfg.Type)
-
+	sqlog.Info("Initializing DB", "dbtype", DbCfg.Type)
 	return xorm.NewEngine(DbCfg.Type, cnnstr)
 }
 

+ 23 - 116
pkg/setting/setting.go

@@ -5,7 +5,6 @@ package setting
 
 import (
 	"bytes"
-	"encoding/json"
 	"fmt"
 	"net/url"
 	"os"
@@ -139,6 +138,9 @@ var (
 
 	// QUOTA
 	Quota QuotaSettings
+
+	// logger
+	logger log.Logger
 )
 
 type CommandLineArgs struct {
@@ -149,7 +151,7 @@ type CommandLineArgs struct {
 
 func init() {
 	IsWindows = runtime.GOOS == "windows"
-	log.NewLogger(0, "console", `{"level": 0, "formatting":true}`)
+	logger = log.New("settings")
 }
 
 func parseAppUrlAndSubUrl(section *ini.Section) (string, string) {
@@ -335,7 +337,7 @@ func loadConfiguration(args *CommandLineArgs) {
 
 	// init logging before specific config so we can log errors from here on
 	DataPath = makeAbsolute(Cfg.Section("paths").Key("data").String(), HomePath)
-	initLogging(args)
+	initLogging()
 
 	// load specified config file
 	loadSpecifedConfigFile(args.Config)
@@ -351,7 +353,7 @@ func loadConfiguration(args *CommandLineArgs) {
 
 	// update data path and logging config
 	DataPath = makeAbsolute(Cfg.Section("paths").Key("data").String(), HomePath)
-	initLogging(args)
+	initLogging()
 }
 
 func pathExists(path string) bool {
@@ -543,134 +545,39 @@ func readSessionConfig() {
 	}
 }
 
-var logLevels = map[string]int{
-	"Trace":    0,
-	"Debug":    1,
-	"Info":     2,
-	"Warn":     3,
-	"Error":    4,
-	"Critical": 5,
-}
-
-func getLogLevel(key string, defaultName string) (string, int) {
-	levelName := Cfg.Section(key).Key("level").In(defaultName, []string{"Trace", "Debug", "Info", "Warn", "Error", "Critical"})
-
-	level, ok := logLevels[levelName]
-	if !ok {
-		log.Fatal(4, "Unknown log level: %s", levelName)
-	}
-
-	return levelName, level
-}
-
-func initLogging(args *CommandLineArgs) {
-	//close any existing log handlers.
-	log.Close()
-	// Get and check log mode.
+func initLogging() {
+	// split on comma
 	LogModes = strings.Split(Cfg.Section("log").Key("mode").MustString("console"), ",")
-	LogsPath = makeAbsolute(Cfg.Section("paths").Key("logs").String(), HomePath)
-
-	defaultLevelName, _ := getLogLevel("log", "Info")
-
-	LogConfigs = make([]util.DynMap, len(LogModes))
-
-	for i, mode := range LogModes {
-
-		mode = strings.TrimSpace(mode)
-		sec, err := Cfg.GetSection("log." + mode)
-		if err != nil {
-			log.Fatal(4, "Unknown log mode: %s", mode)
-		}
-
-		// Log level.
-		_, level := getLogLevel("log."+mode, defaultLevelName)
-
-		// Generate log configuration.
-		switch mode {
-		case "console":
-			formatting := sec.Key("formatting").MustBool(true)
-			LogConfigs[i] = util.DynMap{
-				"level":      level,
-				"formatting": formatting,
-			}
-		case "file":
-			logPath := sec.Key("file_name").MustString(filepath.Join(LogsPath, "grafana.log"))
-			os.MkdirAll(filepath.Dir(logPath), os.ModePerm)
-			LogConfigs[i] = util.DynMap{
-				"level":    level,
-				"filename": logPath,
-				"rotate":   sec.Key("log_rotate").MustBool(true),
-				"maxlines": sec.Key("max_lines").MustInt(1000000),
-				"maxsize":  1 << uint(sec.Key("max_size_shift").MustInt(28)),
-				"daily":    sec.Key("daily_rotate").MustBool(true),
-				"maxdays":  sec.Key("max_days").MustInt(7),
-			}
-		case "conn":
-			LogConfigs[i] = util.DynMap{
-				"level":          level,
-				"reconnectOnMsg": sec.Key("reconnect_on_msg").MustBool(),
-				"reconnect":      sec.Key("reconnect").MustBool(),
-				"net":            sec.Key("protocol").In("tcp", []string{"tcp", "unix", "udp"}),
-				"addr":           sec.Key("addr").MustString(":7020"),
-			}
-		case "smtp":
-			LogConfigs[i] = util.DynMap{
-				"level":     level,
-				"user":      sec.Key("user").MustString("example@example.com"),
-				"passwd":    sec.Key("passwd").MustString("******"),
-				"host":      sec.Key("host").MustString("127.0.0.1:25"),
-				"receivers": sec.Key("receivers").MustString("[]"),
-				"subject":   sec.Key("subject").MustString("Diagnostic message from serve"),
-			}
-		case "database":
-			LogConfigs[i] = util.DynMap{
-				"level":  level,
-				"driver": sec.Key("driver").String(),
-				"conn":   sec.Key("conn").String(),
-			}
-		case "syslog":
-			LogConfigs[i] = util.DynMap{
-				"level":    level,
-				"network":  sec.Key("network").MustString(""),
-				"address":  sec.Key("address").MustString(""),
-				"facility": sec.Key("facility").MustString("local7"),
-				"tag":      sec.Key("tag").MustString(""),
-			}
-		}
-
-		cfgJsonBytes, _ := json.Marshal(LogConfigs[i])
-		log.NewLogger(Cfg.Section("log").Key("buffer_len").MustInt64(10000), mode, string(cfgJsonBytes))
+	// also try space
+	if len(LogModes) == 1 {
+		LogModes = strings.Split(Cfg.Section("log").Key("mode").MustString("console"), " ")
 	}
+	LogsPath = makeAbsolute(Cfg.Section("paths").Key("logs").String(), HomePath)
+	log.ReadLoggingConfig(LogModes, LogsPath, Cfg)
 }
 
 func LogConfigurationInfo() {
 	var text bytes.Buffer
-	text.WriteString("Configuration Info\n")
 
-	text.WriteString("Config files:\n")
-	for i, file := range configFiles {
-		text.WriteString(fmt.Sprintf("  [%d]: %s\n", i, file))
+	for _, file := range configFiles {
+		logger.Info("Config loaded from", "file", file)
 	}
 
 	if len(appliedCommandLineProperties) > 0 {
-		text.WriteString("Command lines overrides:\n")
-		for i, prop := range appliedCommandLineProperties {
-			text.WriteString(fmt.Sprintf("  [%d]: %s\n", i, prop))
+		for _, prop := range appliedCommandLineProperties {
+			logger.Info("Config overriden from command line", "arg", prop)
 		}
 	}
 
 	if len(appliedEnvOverrides) > 0 {
 		text.WriteString("\tEnvironment variables used:\n")
-		for i, prop := range appliedEnvOverrides {
-			text.WriteString(fmt.Sprintf("  [%d]: %s\n", i, prop))
+		for _, prop := range appliedEnvOverrides {
+			logger.Info("Config overriden from Environment variable", "var", prop)
 		}
 	}
 
-	text.WriteString("Paths:\n")
-	text.WriteString(fmt.Sprintf("  home: %s\n", HomePath))
-	text.WriteString(fmt.Sprintf("  data: %s\n", DataPath))
-	text.WriteString(fmt.Sprintf("  logs: %s\n", LogsPath))
-	text.WriteString(fmt.Sprintf("  plugins: %s\n", PluginsPath))
-
-	log.Info(text.String())
+	logger.Info("Path Home", "path", HomePath)
+	logger.Info("Path Data", "path", DataPath)
+	logger.Info("Path Logs", "path", LogsPath)
+	logger.Info("Path Plugins", "path", PluginsPath)
 }