ソースを参照

Merge branch 'new-logger' into alerting_definitions

Conflicts:
	pkg/api/api.go
	pkg/setting/setting.go
Torkel Ödegaard 9 年 前
コミット
0667d73660

+ 2 - 5
conf/defaults.ini

@@ -248,12 +248,9 @@ templates_pattern = emails/*.html
 #################################### Logging ##########################
 [log]
 # Either "console", "file", "syslog". Default is console and  file
-# Use comma to separate multiple modes, e.g. "console, file"
+# Use space to separate multiple modes, e.g. "console file"
 mode = console, file
 
-# Buffer length of channel, keep it as it is if you don't know what it is.
-buffer_len = 10000
-
 # Either "Trace", "Debug", "Info", "Warn", "Error", "Critical", default is "Info"
 level = Info
 
@@ -273,7 +270,7 @@ log_rotate = true
 max_lines = 1000000
 
 # Max size shift of single file, default is 28 means 1 << 28, 256MB
-max_lines_shift = 28
+max_size_shift = 28
 
 # Segment log daily, default is true
 daily_rotate = true

+ 1 - 4
conf/sample.ini

@@ -233,9 +233,6 @@ check_for_updates = true
 # Use comma to separate multiple modes, e.g. "console, file"
 ;mode = console, file
 
-# Buffer length of channel, keep it as it is if you don't know what it is.
-;buffer_len = 10000
-
 # Either "Trace", "Debug", "Info", "Warn", "Error", "Critical", default is "Info"
 ;level = Info
 
@@ -253,7 +250,7 @@ check_for_updates = true
 ;max_lines = 1000000
 
 # Max size shift of single file, default is 28 means 1 << 28, 256MB
-;max_lines_shift = 28
+;max_size_shift = 28
 
 # Segment log daily, default is true
 ;daily_rotate = true

+ 3 - 0
pkg/api/api.go

@@ -253,6 +253,9 @@ func Register(r *macaron.Macaron) {
 			r.Get("/changes", wrap(GetAlertChanges))
 		})
 
+		// error test
+		r.Get("/metrics/error", wrap(GenerateError))
+
 	}, reqSignedIn)
 
 	// admin api

+ 6 - 0
pkg/api/metrics.go

@@ -87,3 +87,9 @@ func GetInternalMetrics(c *middleware.Context) Response {
 		},
 	}
 }
+
+// Genereates a index out of range error
+func GenerateError(c *middleware.Context) Response {
+	var array []string
+	return Json(200, array[20])
+}

+ 5 - 3
pkg/cmd/grafana-server/main.go

@@ -40,7 +40,6 @@ func init() {
 }
 
 func main() {
-
 	v := flag.Bool("v", false, "prints current version and exits")
 	flag.Parse()
 	if *v {
@@ -49,6 +48,9 @@ func main() {
 	}
 
 	buildstampInt64, _ := strconv.ParseInt(buildstamp, 10, 64)
+	if buildstampInt64 == 0 {
+		buildstampInt64 = time.Now().Unix()
+	}
 
 	setting.BuildVersion = version
 	setting.BuildCommit = commit
@@ -87,8 +89,8 @@ func initRuntime() {
 		log.Fatal(3, err.Error())
 	}
 
-	log.Info("Starting Grafana")
-	log.Info("Version: %v, Commit: %v, Build date: %v", setting.BuildVersion, setting.BuildCommit, time.Unix(setting.BuildStamp, 0))
+	logger := log.New("main")
+	logger.Info("Starting Grafana", "version", version, "commit", commit, "compiled", time.Unix(setting.BuildStamp, 0))
 
 	setting.LogConfigurationInfo()
 

+ 11 - 5
pkg/cmd/grafana-server/web.go

@@ -6,6 +6,7 @@ package main
 import (
 	"fmt"
 	"net/http"
+	"os"
 	"path"
 
 	"gopkg.in/macaron.v1"
@@ -18,12 +19,14 @@ import (
 	"github.com/grafana/grafana/pkg/setting"
 )
 
+var logger log.Logger
+
 func newMacaron() *macaron.Macaron {
 	macaron.Env = setting.Env
 	m := macaron.New()
 
 	m.Use(middleware.Logger())
-	m.Use(macaron.Recovery())
+	m.Use(middleware.Recovery())
 
 	if setting.EnableGzip {
 		m.Use(middleware.Gziper())
@@ -31,7 +34,7 @@ func newMacaron() *macaron.Macaron {
 
 	for _, route := range plugins.StaticRoutes {
 		pluginRoute := path.Join("/public/plugins/", route.PluginId)
-		log.Debug("Plugins: Adding route %s -> %s", pluginRoute, route.Directory)
+		logger.Debug("Plugins: Adding route", "route", pluginRoute, "dir", route.Directory)
 		mapStatic(m, route.Directory, "", pluginRoute)
 	}
 
@@ -76,23 +79,26 @@ func mapStatic(m *macaron.Macaron, rootDir string, dir string, prefix string) {
 }
 
 func StartServer() {
+	logger = log.New("server")
 
 	var err error
 	m := newMacaron()
 	api.Register(m)
 
 	listenAddr := fmt.Sprintf("%s:%s", setting.HttpAddr, setting.HttpPort)
-	log.Info("Listen: %v://%s%s", setting.Protocol, listenAddr, setting.AppSubUrl)
+	logger.Info("Server Listening", "address", listenAddr, "protocol", setting.Protocol, "subUrl", setting.AppSubUrl)
 	switch setting.Protocol {
 	case setting.HTTP:
 		err = http.ListenAndServe(listenAddr, m)
 	case setting.HTTPS:
 		err = http.ListenAndServeTLS(listenAddr, setting.CertFile, setting.KeyFile, m)
 	default:
-		log.Fatal(4, "Invalid protocol: %s", setting.Protocol)
+		logger.Error("Invalid protocol", "protocol", setting.Protocol)
+		os.Exit(1)
 	}
 
 	if err != nil {
-		log.Fatal(4, "Fail to start server: %v", err)
+		logger.Error("Fail to start server", "error", err)
+		os.Exit(1)
 	}
 }

+ 0 - 157
pkg/log/console.go

@@ -1,157 +0,0 @@
-// Copyright 2014 The Gogs Authors. All rights reserved.
-// Use of this source code is governed by a MIT-style
-// license that can be found in the LICENSE file.
-
-package log
-
-import (
-	"encoding/json"
-	"fmt"
-	"log"
-	"os"
-	"runtime"
-)
-
-type Brush func(string) string
-
-func NewBrush(color string) Brush {
-	pre := "\033["
-	reset := "\033[0m"
-	return func(text string) string {
-		return pre + color + "m" + text + reset
-	}
-}
-
-var (
-	Red    = NewBrush("1;31")
-	Purple = NewBrush("1;35")
-	Yellow = NewBrush("1;33")
-	Green  = NewBrush("1;32")
-	Blue   = NewBrush("1;34")
-	Cyan   = NewBrush("1;36")
-
-	colors = []Brush{
-		Cyan,   // Trace      cyan
-		Blue,   // Debug      blue
-		Green,  // Info       green
-		Yellow, // Warn       yellow
-		Red,    // Error      red
-		Purple, // Critical   purple
-		Red,    // Fatal      red
-	}
-	consoleWriter = &ConsoleWriter{lg: log.New(os.Stdout, "", 0),
-		Level: TRACE}
-)
-
-// ConsoleWriter implements LoggerInterface and writes messages to terminal.
-type ConsoleWriter struct {
-	lg         *log.Logger
-	Level      LogLevel `json:"level"`
-	Formatting bool     `json:"formatting"`
-}
-
-// create ConsoleWriter returning as LoggerInterface.
-func NewConsole() LoggerInterface {
-	return &ConsoleWriter{
-		lg:         log.New(os.Stderr, "", log.Ldate|log.Ltime),
-		Level:      TRACE,
-		Formatting: true,
-	}
-}
-
-func (cw *ConsoleWriter) Init(config string) error {
-	return json.Unmarshal([]byte(config), cw)
-}
-
-func (cw *ConsoleWriter) WriteMsg(msg string, skip int, level LogLevel) error {
-	if cw.Level > level {
-		return nil
-	}
-	if runtime.GOOS == "windows" || !cw.Formatting {
-		cw.lg.Println(msg)
-	} else {
-		cw.lg.Println(colors[level](msg))
-	}
-	return nil
-}
-
-func (_ *ConsoleWriter) Flush() {
-
-}
-
-func (_ *ConsoleWriter) Destroy() {
-}
-
-func printConsole(level LogLevel, msg string) {
-	consoleWriter.WriteMsg(msg, 0, level)
-}
-
-func printfConsole(level LogLevel, format string, v ...interface{}) {
-	consoleWriter.WriteMsg(fmt.Sprintf(format, v...), 0, level)
-}
-
-// ConsoleTrace prints to stdout using TRACE colors
-func ConsoleTrace(s string) {
-	printConsole(TRACE, s)
-}
-
-// ConsoleTracef prints a formatted string to stdout using TRACE colors
-func ConsoleTracef(format string, v ...interface{}) {
-	printfConsole(TRACE, format, v...)
-}
-
-// ConsoleDebug prints to stdout using DEBUG colors
-func ConsoleDebug(s string) {
-	printConsole(DEBUG, s)
-}
-
-// ConsoleDebugf prints a formatted string to stdout using DEBUG colors
-func ConsoleDebugf(format string, v ...interface{}) {
-	printfConsole(DEBUG, format, v...)
-}
-
-// ConsoleInfo prints to stdout using INFO colors
-func ConsoleInfo(s string) {
-	printConsole(INFO, s)
-}
-
-// ConsoleInfof prints a formatted string to stdout using INFO colors
-func ConsoleInfof(format string, v ...interface{}) {
-	printfConsole(INFO, format, v...)
-}
-
-// ConsoleWarn prints to stdout using WARN colors
-func ConsoleWarn(s string) {
-	printConsole(WARN, s)
-}
-
-// ConsoleWarnf prints a formatted string to stdout using WARN colors
-func ConsoleWarnf(format string, v ...interface{}) {
-	printfConsole(WARN, format, v...)
-}
-
-// ConsoleError prints to stdout using ERROR colors
-func ConsoleError(s string) {
-	printConsole(ERROR, s)
-}
-
-// ConsoleErrorf prints a formatted string to stdout using ERROR colors
-func ConsoleErrorf(format string, v ...interface{}) {
-	printfConsole(ERROR, format, v...)
-}
-
-// ConsoleFatal prints to stdout using FATAL colors
-func ConsoleFatal(s string) {
-	printConsole(FATAL, s)
-	os.Exit(1)
-}
-
-// ConsoleFatalf prints a formatted string to stdout using FATAL colors
-func ConsoleFatalf(format string, v ...interface{}) {
-	printfConsole(FATAL, format, v...)
-	os.Exit(1)
-}
-
-func init() {
-	Register("console", NewConsole)
-}

+ 21 - 48
pkg/log/file.go

@@ -5,43 +5,39 @@
 package log
 
 import (
-	"encoding/json"
 	"errors"
 	"fmt"
 	"io/ioutil"
-	"log"
 	"os"
 	"path/filepath"
 	"strings"
 	"sync"
 	"time"
+
+	"github.com/inconshreveable/log15"
 )
 
 // FileLogWriter implements LoggerInterface.
 // It writes messages by lines limit, file size limit, or time frequency.
 type FileLogWriter struct {
-	*log.Logger
 	mw *MuxWriter
-	// The opened file
-	Filename string `json:"filename"`
 
-	Maxlines          int `json:"maxlines"`
+	Format            log15.Format
+	Filename          string
+	Maxlines          int
 	maxlines_curlines int
 
 	// Rotate at size
-	Maxsize         int `json:"maxsize"`
+	Maxsize         int
 	maxsize_cursize int
 
 	// Rotate daily
-	Daily          bool  `json:"daily"`
-	Maxdays        int64 `json:"maxdays"`
+	Daily          bool
+	Maxdays        int64
 	daily_opendate int
 
-	Rotate bool `json:"rotate"`
-
-	startLock sync.Mutex // Only one log can write to the file
-
-	Level LogLevel `json:"level"`
+	Rotate    bool
+	startLock sync.Mutex
 }
 
 // an *os.File writer with locker.
@@ -66,37 +62,29 @@ func (l *MuxWriter) SetFd(fd *os.File) {
 }
 
 // create a FileLogWriter returning as LoggerInterface.
-func NewFileWriter() LoggerInterface {
+func NewFileWriter() *FileLogWriter {
 	w := &FileLogWriter{
 		Filename: "",
+		Format:   log15.LogfmtFormat(),
 		Maxlines: 1000000,
 		Maxsize:  1 << 28, //256 MB
 		Daily:    true,
 		Maxdays:  7,
 		Rotate:   true,
-		Level:    TRACE,
 	}
 	// use MuxWriter instead direct use os.File for lock write when rotate
 	w.mw = new(MuxWriter)
-	// set MuxWriter as Logger's io.Writer
-	w.Logger = log.New(w.mw, "", log.Ldate|log.Ltime)
 	return w
 }
 
-// Init file logger with json config.
-// config like:
-//	{
-//	"filename":"log/gogs.log",
-//	"maxlines":10000,
-//	"maxsize":1<<30,
-//	"daily":true,
-//	"maxdays":15,
-//	"rotate":true
-//	}
-func (w *FileLogWriter) Init(config string) error {
-	if err := json.Unmarshal([]byte(config), w); err != nil {
-		return err
-	}
+func (w *FileLogWriter) Log(r *log15.Record) error {
+	data := w.Format.Format(r)
+	w.docheck(len(data))
+	_, err := w.mw.Write(data)
+	return err
+}
+
+func (w *FileLogWriter) Init() error {
 	if len(w.Filename) == 0 {
 		return errors.New("config must have filename")
 	}
@@ -131,17 +119,6 @@ func (w *FileLogWriter) docheck(size int) {
 	w.maxsize_cursize += size
 }
 
-// write logger message into file.
-func (w *FileLogWriter) WriteMsg(msg string, skip int, level LogLevel) error {
-	if level < w.Level {
-		return nil
-	}
-	n := 24 + len(msg) // 24 stand for the length "2013/06/23 21:00:22 [T] "
-	w.docheck(n)
-	w.Logger.Println(msg)
-	return nil
-}
-
 func (w *FileLogWriter) createLogFile() (*os.File, error) {
 	// Open the log file
 	return os.OpenFile(w.Filename, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644)
@@ -227,7 +204,7 @@ func (w *FileLogWriter) deleteOldLog() {
 }
 
 // destroy file logger, close file writer.
-func (w *FileLogWriter) Destroy() {
+func (w *FileLogWriter) Close() {
 	w.mw.fd.Close()
 }
 
@@ -237,7 +214,3 @@ func (w *FileLogWriter) Destroy() {
 func (w *FileLogWriter) Flush() {
 	w.mw.fd.Sync()
 }
-
-func init() {
-	Register("file", NewFileWriter)
-}

+ 5 - 0
pkg/log/handlers.go

@@ -0,0 +1,5 @@
+package log
+
+type DisposableHandler interface {
+	Close()
+}

+ 31 - 0
pkg/log/interface.go

@@ -0,0 +1,31 @@
+package log
+
+import "github.com/inconshreveable/log15"
+
+type Lvl int
+
+const (
+	LvlCrit Lvl = iota
+	LvlError
+	LvlWarn
+	LvlInfo
+	LvlDebug
+)
+
+type Logger interface {
+	// New returns a new Logger that has this logger's context plus the given context
+	New(ctx ...interface{}) log15.Logger
+
+	// GetHandler gets the handler associated with the logger.
+	GetHandler() log15.Handler
+
+	// SetHandler updates the logger to write records to the specified handler.
+	SetHandler(h log15.Handler)
+
+	// Log a message at the given level with context key/value pairs
+	Debug(msg string, ctx ...interface{})
+	Info(msg string, ctx ...interface{})
+	Warn(msg string, ctx ...interface{})
+	Error(msg string, ctx ...interface{})
+	Crit(msg string, ctx ...interface{})
+}

+ 131 - 255
pkg/log/log.go

@@ -8,324 +8,200 @@ import (
 	"fmt"
 	"os"
 	"path/filepath"
-	"runtime"
 	"strings"
-	"sync"
-)
 
-var (
-	loggers []*Logger
+	"gopkg.in/ini.v1"
+
+	"github.com/inconshreveable/log15"
 )
 
-func NewLogger(bufLen int64, mode, config string) {
-	logger := newLogger(bufLen)
+var Root log15.Logger
+var loggersToClose []DisposableHandler
 
-	isExist := false
-	for _, l := range loggers {
-		if l.adapter == mode {
-			isExist = true
-			l = logger
-		}
-	}
-	if !isExist {
-		loggers = append(loggers, logger)
-	}
-	if err := logger.SetLogger(mode, config); err != nil {
-		Fatal(1, "Fail to set logger(%s): %v", mode, err)
-	}
+func init() {
+	loggersToClose = make([]DisposableHandler, 0)
+	Root = log15.Root()
 }
 
-// this helps you work around the performance annoyance mentioned in
-// https://github.com/grafana/grafana/issues/4055
-// until we refactor this library completely
-func Level(level LogLevel) {
-	for i := range loggers {
-		loggers[i].level = level
-	}
+func New(logger string, ctx ...interface{}) Logger {
+	params := append([]interface{}{"logger", logger}, ctx...)
+	return Root.New(params...)
 }
 
 func Trace(format string, v ...interface{}) {
-	for _, logger := range loggers {
-		logger.Trace(format, v...)
-	}
+	Root.Debug(fmt.Sprintf(format, v))
 }
 
 func Debug(format string, v ...interface{}) {
-	for _, logger := range loggers {
-		logger.Debug(format, v...)
-	}
+	Root.Debug(fmt.Sprintf(format, v))
+}
+
+func Debug2(message string, v ...interface{}) {
+	Root.Debug(message, v...)
 }
 
 func Info(format string, v ...interface{}) {
-	for _, logger := range loggers {
-		logger.Info(format, v...)
-	}
+	Root.Info(fmt.Sprintf(format, v))
+}
+
+func Info2(message string, v ...interface{}) {
+	Root.Info(message, v...)
 }
 
 func Warn(format string, v ...interface{}) {
-	for _, logger := range loggers {
-		logger.Warn(format, v...)
-	}
+	Root.Warn(fmt.Sprintf(format, v))
+}
+
+func Warn2(message string, v ...interface{}) {
+	Root.Warn(message, v...)
 }
 
 func Error(skip int, format string, v ...interface{}) {
-	for _, logger := range loggers {
-		logger.Error(skip, format, v...)
-	}
+	Root.Error(fmt.Sprintf(format, v))
+}
+
+func Error2(message string, v ...interface{}) {
+	Root.Error(message, v...)
 }
 
 func Critical(skip int, format string, v ...interface{}) {
-	for _, logger := range loggers {
-		logger.Critical(skip, format, v...)
-	}
+	Root.Crit(fmt.Sprintf(format, v))
 }
 
 func Fatal(skip int, format string, v ...interface{}) {
-	Error(skip, format, v...)
-	for _, l := range loggers {
-		l.Close()
-	}
+	Root.Crit(fmt.Sprintf(format, v))
+	Close()
 	os.Exit(1)
 }
 
 func Close() {
-	for _, l := range loggers {
-		l.Close()
-		// delete the logger.
-		l = nil
+	for _, logger := range loggersToClose {
+		logger.Close()
 	}
-	// clear the loggers slice.
-	loggers = nil
+	loggersToClose = make([]DisposableHandler, 0)
 }
 
-// .___        __                 _____
-// |   | _____/  |_  ____________/ ____\____    ____  ____
-// |   |/    \   __\/ __ \_  __ \   __\\__  \ _/ ___\/ __ \
-// |   |   |  \  | \  ___/|  | \/|  |   / __ \\  \__\  ___/
-// |___|___|  /__|  \___  >__|   |__|  (____  /\___  >___  >
-//          \/          \/                  \/     \/    \/
-
-type LogLevel int
-
-const (
-	TRACE LogLevel = iota
-	DEBUG
-	INFO
-	WARN
-	ERROR
-	CRITICAL
-	FATAL
-)
-
-// LoggerInterface represents behaviors of a logger provider.
-type LoggerInterface interface {
-	Init(config string) error
-	WriteMsg(msg string, skip int, level LogLevel) error
-	Destroy()
-	Flush()
+var logLevels = map[string]log15.Lvl{
+	"Trace":    log15.LvlDebug,
+	"Debug":    log15.LvlDebug,
+	"Info":     log15.LvlInfo,
+	"Warn":     log15.LvlWarn,
+	"Error":    log15.LvlError,
+	"Critical": log15.LvlCrit,
 }
 
-type loggerType func() LoggerInterface
+func getLogLevelFromConfig(key string, defaultName string, cfg *ini.File) (string, log15.Lvl) {
+	levelName := cfg.Section(key).Key("level").In(defaultName, []string{"Trace", "Debug", "Info", "Warn", "Error", "Critical"})
+	level := getLogLevelFromString(levelName)
+	return levelName, level
+}
 
-var adapters = make(map[string]loggerType)
+func getLogLevelFromString(levelName string) log15.Lvl {
+	level, ok := logLevels[levelName]
 
-// Register registers given logger provider to adapters.
-func Register(name string, log loggerType) {
-	if log == nil {
-		panic("log: register provider is nil")
+	if !ok {
+		Root.Error("Unknown log level", "level", levelName)
+		return log15.LvlError
 	}
-	if _, dup := adapters[name]; dup {
-		panic("log: register called twice for provider \"" + name + "\"")
-	}
-	adapters[name] = log
-}
 
-type logMsg struct {
-	skip  int
-	level LogLevel
-	msg   string
+	return level
 }
 
-// Logger is default logger in beego application.
-// it can contain several providers and log message into all providers.
-type Logger struct {
-	adapter string
-	lock    sync.Mutex
-	level   LogLevel
-	msg     chan *logMsg
-	outputs map[string]LoggerInterface
-	quit    chan bool
-}
+func getFilters(filterStrArray []string) map[string]log15.Lvl {
+	filterMap := make(map[string]log15.Lvl)
 
-// newLogger initializes and returns a new logger.
-func newLogger(buffer int64) *Logger {
-	l := &Logger{
-		msg:     make(chan *logMsg, buffer),
-		outputs: make(map[string]LoggerInterface),
-		quit:    make(chan bool),
+	for _, filterStr := range filterStrArray {
+		parts := strings.Split(filterStr, ":")
+		filterMap[parts[0]] = getLogLevelFromString(parts[1])
 	}
-	go l.StartLogger()
-	return l
-}
 
-// SetLogger sets new logger instanse with given logger adapter and config.
-func (l *Logger) SetLogger(adapter string, config string) error {
-	l.lock.Lock()
-	defer l.lock.Unlock()
-	if log, ok := adapters[adapter]; ok {
-		lg := log()
-		if err := lg.Init(config); err != nil {
-			return err
-		}
-		l.outputs[adapter] = lg
-		l.adapter = adapter
-	} else {
-		panic("log: unknown adapter \"" + adapter + "\" (forgotten register?)")
-	}
-	return nil
+	return filterMap
 }
 
-// DelLogger removes a logger adapter instance.
-func (l *Logger) DelLogger(adapter string) error {
-	l.lock.Lock()
-	defer l.lock.Unlock()
-	if lg, ok := l.outputs[adapter]; ok {
-		lg.Destroy()
-		delete(l.outputs, adapter)
-	} else {
-		panic("log: unknown adapter \"" + adapter + "\" (forgotten register?)")
-	}
-	return nil
-}
+func ReadLoggingConfig(modes []string, logsPath string, cfg *ini.File) {
+	Close()
 
-func (l *Logger) writerMsg(skip int, level LogLevel, msg string) error {
-	lm := &logMsg{
-		skip:  skip,
-		level: level,
-	}
+	defaultLevelName, _ := getLogLevelFromConfig("log", "Info", cfg)
+	defaultFilters := getFilters(cfg.Section("log").Key("filters").Strings(" "))
 
-	// Only error information needs locate position for debugging.
-	if lm.level >= ERROR {
-		pc, file, line, ok := runtime.Caller(skip)
-		if ok {
-			// Get caller function name.
-			fn := runtime.FuncForPC(pc)
-			var fnName string
-			if fn == nil {
-				fnName = "?()"
-			} else {
-				fnName = strings.TrimLeft(filepath.Ext(fn.Name()), ".") + "()"
-			}
+	handlers := make([]log15.Handler, 0)
 
-			lm.msg = fmt.Sprintf("[%s:%d %s] %s", filepath.Base(file), line, fnName, msg)
-		} else {
-			lm.msg = msg
+	for _, mode := range modes {
+		mode = strings.TrimSpace(mode)
+		sec, err := cfg.GetSection("log." + mode)
+		if err != nil {
+			Root.Error("Unknown log mode", "mode", mode)
 		}
-	} else {
-		lm.msg = msg
-	}
-	l.msg <- lm
-	return nil
-}
 
-// StartLogger starts logger chan reading.
-func (l *Logger) StartLogger() {
-	for {
-		select {
-		case bm := <-l.msg:
-			for _, l := range l.outputs {
-				if err := l.WriteMsg(bm.msg, bm.skip, bm.level); err != nil {
-					fmt.Println("ERROR, unable to WriteMsg:", err)
-				}
-			}
-		case <-l.quit:
-			return
+		// Log level.
+		_, level := getLogLevelFromConfig("log."+mode, defaultLevelName, cfg)
+		modeFilters := getFilters(sec.Key("filters").Strings(" "))
+
+		var handler log15.Handler
+
+		// Generate log configuration.
+		switch mode {
+		case "console":
+			handler = log15.StdoutHandler
+		case "file":
+			fileName := sec.Key("file_name").MustString(filepath.Join(logsPath, "grafana.log"))
+			os.MkdirAll(filepath.Dir(fileName), os.ModePerm)
+			fileHandler := NewFileWriter()
+			fileHandler.Filename = fileName
+			fileHandler.Rotate = sec.Key("log_rotate").MustBool(true)
+			fileHandler.Maxlines = sec.Key("max_lines").MustInt(1000000)
+			fileHandler.Maxsize = 1 << uint(sec.Key("max_size_shift").MustInt(28))
+			fileHandler.Daily = sec.Key("daily_rotate").MustBool(true)
+			fileHandler.Maxdays = sec.Key("max_days").MustInt64(7)
+			fileHandler.Init()
+
+			loggersToClose = append(loggersToClose, fileHandler)
+			handler = fileHandler
+
+			// case "syslog":
+			// 	LogConfigs[i] = util.DynMap{
+			// 		"level":    level,
+			// 		"network":  sec.Key("network").MustString(""),
+			// 		"address":  sec.Key("address").MustString(""),
+			// 		"facility": sec.Key("facility").MustString("local7"),
+			// 		"tag":      sec.Key("tag").MustString(""),
+			// 	}
 		}
-	}
-}
 
-// Flush flushs all chan data.
-func (l *Logger) Flush() {
-	for _, l := range l.outputs {
-		l.Flush()
-	}
-}
-
-// Close closes logger, flush all chan data and destroy all adapter instances.
-func (l *Logger) Close() {
-	l.quit <- true
-	for {
-		if len(l.msg) > 0 {
-			bm := <-l.msg
-			for _, l := range l.outputs {
-				if err := l.WriteMsg(bm.msg, bm.skip, bm.level); err != nil {
-					fmt.Println("ERROR, unable to WriteMsg:", err)
-				}
+		for key, value := range defaultFilters {
+			if _, exist := modeFilters[key]; !exist {
+				modeFilters[key] = value
 			}
-		} else {
-			break
 		}
-	}
-	for _, l := range l.outputs {
-		l.Flush()
-		l.Destroy()
-	}
-}
-
-func (l *Logger) Trace(format string, v ...interface{}) {
-	if l.level > TRACE {
-		return
-	}
-	msg := fmt.Sprintf("[T] "+format, v...)
-	l.writerMsg(0, TRACE, msg)
-}
 
-func (l *Logger) Debug(format string, v ...interface{}) {
-	if l.level > DEBUG {
-		return
-	}
-	msg := fmt.Sprintf("[D] "+format, v...)
-	l.writerMsg(0, DEBUG, msg)
-}
+		for key, value := range modeFilters {
+			fmt.Printf("key: %v, value: %v \n", key, value)
+		}
 
-func (l *Logger) Info(format string, v ...interface{}) {
-	if l.level > INFO {
-		return
+		handler = LogFilterHandler(level, modeFilters, handler)
+		handlers = append(handlers, handler)
 	}
-	msg := fmt.Sprintf("[I] "+format, v...)
-	l.writerMsg(0, INFO, msg)
-}
 
-func (l *Logger) Warn(format string, v ...interface{}) {
-	if l.level > WARN {
-		return
-	}
-	msg := fmt.Sprintf("[W] "+format, v...)
-	l.writerMsg(0, WARN, msg)
+	Root.SetHandler(log15.MultiHandler(handlers...))
 }
 
-func (l *Logger) Error(skip int, format string, v ...interface{}) {
-	if l.level > ERROR {
-		return
-	}
-	msg := fmt.Sprintf("[E] "+format, v...)
-	l.writerMsg(skip, ERROR, msg)
-}
+func LogFilterHandler(maxLevel log15.Lvl, filters map[string]log15.Lvl, h log15.Handler) log15.Handler {
+	return log15.FilterHandler(func(r *log15.Record) (pass bool) {
 
-func (l *Logger) Critical(skip int, format string, v ...interface{}) {
-	if l.level > CRITICAL {
-		return
-	}
-	msg := fmt.Sprintf("[C] "+format, v...)
-	l.writerMsg(skip, CRITICAL, msg)
-}
+		if len(filters) > 0 {
+			for i := 0; i < len(r.Ctx); i += 2 {
+				key := r.Ctx[i].(string)
+				if key == "logger" {
+					loggerName, strOk := r.Ctx[i+1].(string)
+					if strOk {
+						if filterLevel, ok := filters[loggerName]; ok {
+							return r.Lvl <= filterLevel
+						}
+					}
+				}
+			}
+		}
 
-func (l *Logger) Fatal(skip int, format string, v ...interface{}) {
-	if l.level > FATAL {
-		return
-	}
-	msg := fmt.Sprintf("[F] "+format, v...)
-	l.writerMsg(skip, FATAL, msg)
-	l.Close()
-	os.Exit(1)
+		return r.Lvl <= maxLevel
+	}, h)
 }

+ 92 - 91
pkg/log/syslog.go

@@ -2,94 +2,95 @@
 
 package log
 
-import (
-	"encoding/json"
-	"errors"
-	"log/syslog"
-)
-
-type SyslogWriter struct {
-	syslog   *syslog.Writer
-	Network  string `json:"network"`
-	Address  string `json:"address"`
-	Facility string `json:"facility"`
-	Tag      string `json:"tag"`
-}
-
-func NewSyslog() LoggerInterface {
-	return new(SyslogWriter)
-}
-
-func (sw *SyslogWriter) Init(config string) error {
-	if err := json.Unmarshal([]byte(config), sw); err != nil {
-		return err
-	}
-
-	prio, err := parseFacility(sw.Facility)
-	if err != nil {
-		return err
-	}
-
-	w, err := syslog.Dial(sw.Network, sw.Address, prio, sw.Tag)
-	if err != nil {
-		return err
-	}
-
-	sw.syslog = w
-	return nil
-}
-
-func (sw *SyslogWriter) WriteMsg(msg string, skip int, level LogLevel) error {
-	var err error
-
-	switch level {
-	case TRACE, DEBUG:
-		err = sw.syslog.Debug(msg)
-	case INFO:
-		err = sw.syslog.Info(msg)
-	case WARN:
-		err = sw.syslog.Warning(msg)
-	case ERROR:
-		err = sw.syslog.Err(msg)
-	case CRITICAL:
-		err = sw.syslog.Crit(msg)
-	case FATAL:
-		err = sw.syslog.Alert(msg)
-	default:
-		err = errors.New("invalid syslog level")
-	}
-
-	return err
-}
-
-func (sw *SyslogWriter) Destroy() {
-	sw.syslog.Close()
-}
-
-func (sw *SyslogWriter) Flush() {}
-
-var facilities = map[string]syslog.Priority{
-	"user":   syslog.LOG_USER,
-	"daemon": syslog.LOG_DAEMON,
-	"local0": syslog.LOG_LOCAL0,
-	"local1": syslog.LOG_LOCAL1,
-	"local2": syslog.LOG_LOCAL2,
-	"local3": syslog.LOG_LOCAL3,
-	"local4": syslog.LOG_LOCAL4,
-	"local5": syslog.LOG_LOCAL5,
-	"local6": syslog.LOG_LOCAL6,
-	"local7": syslog.LOG_LOCAL7,
-}
-
-func parseFacility(facility string) (syslog.Priority, error) {
-	prio, ok := facilities[facility]
-	if !ok {
-		return syslog.LOG_LOCAL0, errors.New("invalid syslog facility")
-	}
-
-	return prio, nil
-}
-
-func init() {
-	Register("syslog", NewSyslog)
-}
+//
+// import (
+// 	"encoding/json"
+// 	"errors"
+// 	"log/syslog"
+// )
+//
+// type SyslogWriter struct {
+// 	syslog   *syslog.Writer
+// 	Network  string `json:"network"`
+// 	Address  string `json:"address"`
+// 	Facility string `json:"facility"`
+// 	Tag      string `json:"tag"`
+// }
+//
+// func NewSyslog() LoggerInterface {
+// 	return new(SyslogWriter)
+// }
+//
+// func (sw *SyslogWriter) Init(config string) error {
+// 	if err := json.Unmarshal([]byte(config), sw); err != nil {
+// 		return err
+// 	}
+//
+// 	prio, err := parseFacility(sw.Facility)
+// 	if err != nil {
+// 		return err
+// 	}
+//
+// 	w, err := syslog.Dial(sw.Network, sw.Address, prio, sw.Tag)
+// 	if err != nil {
+// 		return err
+// 	}
+//
+// 	sw.syslog = w
+// 	return nil
+// }
+//
+// func (sw *SyslogWriter) WriteMsg(msg string, skip int, level LogLevel) error {
+// 	var err error
+//
+// 	switch level {
+// 	case TRACE, DEBUG:
+// 		err = sw.syslog.Debug(msg)
+// 	case INFO:
+// 		err = sw.syslog.Info(msg)
+// 	case WARN:
+// 		err = sw.syslog.Warning(msg)
+// 	case ERROR:
+// 		err = sw.syslog.Err(msg)
+// 	case CRITICAL:
+// 		err = sw.syslog.Crit(msg)
+// 	case FATAL:
+// 		err = sw.syslog.Alert(msg)
+// 	default:
+// 		err = errors.New("invalid syslog level")
+// 	}
+//
+// 	return err
+// }
+//
+// func (sw *SyslogWriter) Destroy() {
+// 	sw.syslog.Close()
+// }
+//
+// func (sw *SyslogWriter) Flush() {}
+//
+// var facilities = map[string]syslog.Priority{
+// 	"user":   syslog.LOG_USER,
+// 	"daemon": syslog.LOG_DAEMON,
+// 	"local0": syslog.LOG_LOCAL0,
+// 	"local1": syslog.LOG_LOCAL1,
+// 	"local2": syslog.LOG_LOCAL2,
+// 	"local3": syslog.LOG_LOCAL3,
+// 	"local4": syslog.LOG_LOCAL4,
+// 	"local5": syslog.LOG_LOCAL5,
+// 	"local6": syslog.LOG_LOCAL6,
+// 	"local7": syslog.LOG_LOCAL7,
+// }
+//
+// func parseFacility(facility string) (syslog.Priority, error) {
+// 	prio, ok := facilities[facility]
+// 	if !ok {
+// 		return syslog.LOG_LOCAL0, errors.New("invalid syslog facility")
+// 	}
+//
+// 	return prio, nil
+// }
+//
+// func init() {
+// 	Register("syslog", NewSyslog)
+// }

+ 11 - 5
pkg/login/settings.go

@@ -2,6 +2,7 @@ package login
 
 import (
 	"fmt"
+	"os"
 
 	"github.com/BurntSushi/toml"
 	"github.com/grafana/grafana/pkg/log"
@@ -49,21 +50,24 @@ type LdapGroupToOrgRole struct {
 }
 
 var ldapCfg LdapConfig
+var ldapLogger log.Logger = log.New("ldap")
 
 func loadLdapConfig() {
 	if !setting.LdapEnabled {
 		return
 	}
 
-	log.Info("Login: Ldap enabled, reading config file: %s", setting.LdapConfigFile)
+	ldapLogger.Info("Ldap enabled, reading config file", "file", setting.LdapConfigFile)
 
 	_, err := toml.DecodeFile(setting.LdapConfigFile, &ldapCfg)
 	if err != nil {
-		log.Fatal(3, "Failed to load ldap config file: %s", err)
+		ldapLogger.Crit("Failed to load ldap config file", "error", err)
+		os.Exit(1)
 	}
 
 	if len(ldapCfg.Servers) == 0 {
-		log.Fatal(3, "ldap enabled but no ldap servers defined in config file: %s", setting.LdapConfigFile)
+		ldapLogger.Crit("ldap enabled but no ldap servers defined in config file")
+		os.Exit(1)
 	}
 
 	// set default org id
@@ -83,11 +87,13 @@ func assertNotEmptyCfg(val interface{}, propName string) {
 	switch v := val.(type) {
 	case string:
 		if v == "" {
-			log.Fatal(3, "LDAP config file is missing option: %s", propName)
+			ldapLogger.Crit("LDAP config file is missing option", "option", propName)
+			os.Exit(1)
 		}
 	case []string:
 		if len(v) == 0 {
-			log.Fatal(3, "LDAP config file is missing option: %s", propName)
+			ldapLogger.Crit("LDAP config file is missing option", "option", propName)
+			os.Exit(1)
 		}
 	default:
 		fmt.Println("unknown")

+ 5 - 3
pkg/metrics/publish.go

@@ -14,6 +14,8 @@ import (
 	"github.com/grafana/grafana/pkg/setting"
 )
 
+var metricsLogger log.Logger = log.New("metrics")
+
 func Init() {
 	settings := readSettings()
 	initMetricVars(settings)
@@ -54,7 +56,7 @@ func sendUsageStats() {
 		return
 	}
 
-	log.Trace("Sending anonymous usage stats to stats.grafana.org")
+	metricsLogger.Debug("Sending anonymous usage stats to stats.grafana.org")
 
 	version := strings.Replace(setting.BuildVersion, ".", "_", -1)
 
@@ -66,7 +68,7 @@ func sendUsageStats() {
 
 	statsQuery := m.GetSystemStatsQuery{}
 	if err := bus.Dispatch(&statsQuery); err != nil {
-		log.Error(3, "Failed to get system stats", err)
+		metricsLogger.Error("Failed to get system stats", "error", err)
 		return
 	}
 
@@ -80,7 +82,7 @@ func sendUsageStats() {
 
 	dsStats := m.GetDataSourceStatsQuery{}
 	if err := bus.Dispatch(&dsStats); err != nil {
-		log.Error(3, "Failed to get datasource stats", err)
+		metricsLogger.Error("Failed to get datasource stats", "error", err)
 		return
 	}
 

+ 4 - 7
pkg/metrics/settings.go

@@ -1,9 +1,6 @@
 package metrics
 
-import (
-	"github.com/grafana/grafana/pkg/log"
-	"github.com/grafana/grafana/pkg/setting"
-)
+import "github.com/grafana/grafana/pkg/setting"
 
 type MetricPublisher interface {
 	Publish(metrics []Metric)
@@ -24,7 +21,7 @@ func readSettings() *MetricSettings {
 
 	var section, err = setting.Cfg.GetSection("metrics")
 	if err != nil {
-		log.Fatal(3, "Unable to find metrics config section")
+		metricsLogger.Crit("Unable to find metrics config section", "error", err)
 		return nil
 	}
 
@@ -36,9 +33,9 @@ func readSettings() *MetricSettings {
 	}
 
 	if graphitePublisher, err := CreateGraphitePublisher(); err != nil {
-		log.Error(3, "Metrics: Failed to init Graphite metric publisher", err)
+		metricsLogger.Error("Failed to init Graphite metric publisher", "error", err)
 	} else if graphitePublisher != nil {
-		log.Info("Metrics: Graphite publisher initialized")
+		metricsLogger.Info("Metrics publisher initialized", "type", "graphite")
 		settings.Publishers = append(settings.Publishers, graphitePublisher)
 	}
 

+ 6 - 16
pkg/middleware/logger.go

@@ -16,11 +16,9 @@
 package middleware
 
 import (
-	"fmt"
 	"net/http"
 	"time"
 
-	"github.com/grafana/grafana/pkg/log"
 	"github.com/grafana/grafana/pkg/metrics"
 	"github.com/grafana/grafana/pkg/setting"
 	"gopkg.in/macaron.v1"
@@ -31,34 +29,26 @@ func Logger() macaron.Handler {
 		start := time.Now()
 		c.Data["perfmon.start"] = start
 
-		uname := c.GetCookie(setting.CookieUserName)
-		if len(uname) == 0 {
-			uname = "-"
-		}
-
 		rw := res.(macaron.ResponseWriter)
 		c.Next()
 
 		timeTakenMs := time.Since(start) / time.Millisecond
-		content := fmt.Sprintf("Completed %s %s \"%s %s %s\" %v %s %d bytes in %dms", c.RemoteAddr(), uname, req.Method, req.URL.Path, req.Proto, rw.Status(), http.StatusText(rw.Status()), rw.Size(), timeTakenMs)
 
 		if timer, ok := c.Data["perfmon.timer"]; ok {
 			timerTyped := timer.(metrics.Timer)
 			timerTyped.Update(timeTakenMs)
 		}
 
-		switch rw.Status() {
-		case 200, 304:
-			content = fmt.Sprintf("%s", content)
+		status := rw.Status()
+		if status == 200 || status == 304 {
 			if !setting.RouterLogging {
 				return
 			}
-		case 404:
-			content = fmt.Sprintf("%s", content)
-		case 500:
-			content = fmt.Sprintf("%s", content)
 		}
 
-		log.Info(content)
+		if ctx, ok := c.Data["ctx"]; ok {
+			ctxTyped := ctx.(*Context)
+			ctxTyped.Logger.Info("Request Completed", "method", req.Method, "path", req.URL.Path, "status", status, "remote_addr", c.RemoteAddr(), "time_ns", timeTakenMs, "size", rw.Size())
+		}
 	}
 }

+ 5 - 0
pkg/middleware/middleware.go

@@ -23,6 +23,7 @@ type Context struct {
 
 	IsSignedIn     bool
 	AllowAnonymous bool
+	Logger         log.Logger
 }
 
 func GetContextHandler() macaron.Handler {
@@ -33,6 +34,7 @@ func GetContextHandler() macaron.Handler {
 			Session:        GetSession(),
 			IsSignedIn:     false,
 			AllowAnonymous: false,
+			Logger:         log.New("context"),
 		}
 
 		// the order in which these are tested are important
@@ -48,6 +50,9 @@ func GetContextHandler() macaron.Handler {
 			initContextWithAnonymousUser(ctx) {
 		}
 
+		ctx.Logger = log.New("context", "userId", ctx.UserId, "orgId", ctx.OrgId, "uname", ctx.Login)
+		ctx.Data["ctx"] = ctx
+
 		c.Map(ctx)
 	}
 }

+ 174 - 0
pkg/middleware/recovery.go

@@ -0,0 +1,174 @@
+// Copyright 2013 Martini Authors
+// Copyright 2014 The Macaron Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package middleware
+
+import (
+	"bytes"
+	"fmt"
+	"io/ioutil"
+	"net/http"
+	"runtime"
+
+	"gopkg.in/macaron.v1"
+
+	"github.com/go-macaron/inject"
+	"github.com/grafana/grafana/pkg/log"
+	"github.com/grafana/grafana/pkg/setting"
+)
+
+const (
+	panicHtml = `<html>
+<head><title>PANIC: %s</title>
+<meta charset="utf-8" />
+<style type="text/css">
+html, body {
+	font-family: "Roboto", sans-serif;
+	color: #333333;
+	background-color: #ea5343;
+	margin: 0px;
+}
+h1 {
+	color: #d04526;
+	background-color: #ffffff;
+	padding: 20px;
+	border-bottom: 1px dashed #2b3848;
+}
+pre {
+	margin: 20px;
+	padding: 20px;
+	border: 2px solid #2b3848;
+	background-color: #ffffff;
+	white-space: pre-wrap;       /* css-3 */
+	white-space: -moz-pre-wrap;  /* Mozilla, since 1999 */
+	white-space: -pre-wrap;      /* Opera 4-6 */
+	white-space: -o-pre-wrap;    /* Opera 7 */
+	word-wrap: break-word;       /* Internet Explorer 5.5+ */
+}
+</style>
+</head><body>
+<h1>PANIC</h1>
+<pre style="font-weight: bold;">%s</pre>
+<pre>%s</pre>
+</body>
+</html>`
+)
+
+var (
+	dunno     = []byte("???")
+	centerDot = []byte("·")
+	dot       = []byte(".")
+	slash     = []byte("/")
+)
+
+// stack returns a nicely formated stack frame, skipping skip frames
+func stack(skip int) []byte {
+	buf := new(bytes.Buffer) // the returned data
+	// As we loop, we open files and read them. These variables record the currently
+	// loaded file.
+	var lines [][]byte
+	var lastFile string
+	for i := skip; ; i++ { // Skip the expected number of frames
+		pc, file, line, ok := runtime.Caller(i)
+		if !ok {
+			break
+		}
+		// Print this much at least.  If we can't find the source, it won't show.
+		fmt.Fprintf(buf, "%s:%d (0x%x)\n", file, line, pc)
+		if file != lastFile {
+			data, err := ioutil.ReadFile(file)
+			if err != nil {
+				continue
+			}
+			lines = bytes.Split(data, []byte{'\n'})
+			lastFile = file
+		}
+		fmt.Fprintf(buf, "\t%s: %s\n", function(pc), source(lines, line))
+	}
+	return buf.Bytes()
+}
+
+// source returns a space-trimmed slice of the n'th line.
+func source(lines [][]byte, n int) []byte {
+	n-- // in stack trace, lines are 1-indexed but our array is 0-indexed
+	if n < 0 || n >= len(lines) {
+		return dunno
+	}
+	return bytes.TrimSpace(lines[n])
+}
+
+// function returns, if possible, the name of the function containing the PC.
+func function(pc uintptr) []byte {
+	fn := runtime.FuncForPC(pc)
+	if fn == nil {
+		return dunno
+	}
+	name := []byte(fn.Name())
+	// The name includes the path name to the package, which is unnecessary
+	// since the file name is already included.  Plus, it has center dots.
+	// That is, we see
+	//	runtime/debug.*T·ptrmethod
+	// and want
+	//	*T.ptrmethod
+	// Also the package path might contains dot (e.g. code.google.com/...),
+	// so first eliminate the path prefix
+	if lastslash := bytes.LastIndex(name, slash); lastslash >= 0 {
+		name = name[lastslash+1:]
+	}
+	if period := bytes.Index(name, dot); period >= 0 {
+		name = name[period+1:]
+	}
+	name = bytes.Replace(name, centerDot, dot, -1)
+	return name
+}
+
+// Recovery returns a middleware that recovers from any panics and writes a 500 if there was one.
+// While Martini is in development mode, Recovery will also output the panic as HTML.
+func Recovery() macaron.Handler {
+	return func(c *macaron.Context) {
+		defer func() {
+			if err := recover(); err != nil {
+				stack := stack(3)
+
+				panicLogger := log.Root
+				// try to get request logger
+				if ctx, ok := c.Data["ctx"]; ok {
+					ctxTyped := ctx.(*Context)
+					panicLogger = ctxTyped.Logger
+				}
+
+				panicLogger.Error("Request error", "error", err, "stack", string(stack))
+
+				// Lookup the current responsewriter
+				val := c.GetVal(inject.InterfaceOf((*http.ResponseWriter)(nil)))
+				res := val.Interface().(http.ResponseWriter)
+
+				// respond with panic message while in development mode
+				var body []byte
+				if setting.Env == setting.DEV {
+					res.Header().Set("Content-Type", "text/html")
+					body = []byte(fmt.Sprintf(panicHtml, err, err, stack))
+				}
+
+				res.WriteHeader(http.StatusInternalServerError)
+				if nil != body {
+					res.Write(body)
+				}
+			}
+		}()
+
+		c.Next()
+	}
+}

+ 1 - 2
pkg/plugins/models.go

@@ -6,7 +6,6 @@ import (
 	"fmt"
 	"strings"
 
-	"github.com/grafana/grafana/pkg/log"
 	m "github.com/grafana/grafana/pkg/models"
 	"github.com/grafana/grafana/pkg/setting"
 )
@@ -58,7 +57,7 @@ func (pb *PluginBase) registerPlugin(pluginDir string) error {
 	}
 
 	if !strings.HasPrefix(pluginDir, setting.StaticRootPath) {
-		log.Info("Plugins: Registering plugin %v", pb.Name)
+		plog.Info("Registering plugin", "name", pb.Name)
 	}
 
 	if len(pb.Dependencies.Plugins) == 0 {

+ 7 - 4
pkg/plugins/plugins.go

@@ -25,6 +25,7 @@ var (
 
 	GrafanaLatestVersion string
 	GrafanaHasUpdate     bool
+	plog                 log.Logger
 )
 
 type PluginScanner struct {
@@ -33,6 +34,8 @@ type PluginScanner struct {
 }
 
 func Init() error {
+	plog = log.New("plugins")
+
 	DataSources = make(map[string]*DataSourcePlugin)
 	StaticRoutes = make([]*PluginStaticRoute, 0)
 	Panels = make(map[string]*PanelPlugin)
@@ -44,16 +47,16 @@ func Init() error {
 		"app":        AppPlugin{},
 	}
 
-	log.Info("Plugins: Scan starting")
+	plog.Info("Starting plugin search")
 	scan(path.Join(setting.StaticRootPath, "app/plugins"))
 
 	// check if plugins dir exists
 	if _, err := os.Stat(setting.PluginsPath); os.IsNotExist(err) {
-		log.Warn("Plugins: Plugin dir %v does not exist", setting.PluginsPath)
+		plog.Warn("Plugin dir does not exist", "dir", setting.PluginsPath)
 		if err = os.MkdirAll(setting.PluginsPath, os.ModePerm); err != nil {
-			log.Warn("Plugins: Failed to create plugin dir: %v, error: %v", setting.PluginsPath, err)
+			plog.Warn("Failed to create plugin dir", "dir", setting.PluginsPath, "error", err)
 		} else {
-			log.Info("Plugins: Plugin dir %v created", setting.PluginsPath)
+			plog.Info("Plugin dir created", "dir", setting.PluginsPath)
 			scan(setting.PluginsPath)
 		}
 	} else {

+ 0 - 2
pkg/services/alerting/alerting.go

@@ -19,8 +19,6 @@ func Init() {
 		return
 	}
 
-	log.Info("Alerting: Initializing alerting engine...")
-
 	engine = NewEngine()
 	engine.Start()
 

+ 24 - 8
pkg/services/alerting/engine.go

@@ -5,7 +5,9 @@ import (
 	"time"
 
 	"github.com/benbjohnson/clock"
+	"github.com/grafana/grafana/pkg/bus"
 	"github.com/grafana/grafana/pkg/log"
+	m "github.com/grafana/grafana/pkg/models"
 	"github.com/grafana/grafana/pkg/services/alerting/alertstates"
 )
 
@@ -17,6 +19,7 @@ type Engine struct {
 	scheduler   Scheduler
 	executor    Executor
 	ruleReader  RuleReader
+	log         log.Logger
 }
 
 func NewEngine() *Engine {
@@ -25,15 +28,16 @@ func NewEngine() *Engine {
 		execQueue:   make(chan *AlertJob, 1000),
 		resultQueue: make(chan *AlertResult, 1000),
 		scheduler:   NewScheduler(),
-		executor:    &ExecutorImpl{},
+		executor:    NewExecutor(),
 		ruleReader:  NewRuleReader(),
+		log:         log.New("alerting.engine"),
 	}
 
 	return e
 }
 
 func (e *Engine) Start() {
-	log.Info("Alerting: engine.Start()")
+	e.log.Info("Starting Alerting Engine")
 
 	go e.alertingTicker()
 	go e.execDispatch()
@@ -84,17 +88,18 @@ func (e *Engine) executeJob(job *AlertJob) {
 			Error:    fmt.Errorf("Timeout"),
 			AlertJob: job,
 		}
-		log.Trace("Alerting: engine.executeJob(): timeout")
+		e.log.Debug("Job Execution timeout", "alertRuleId", job.Rule.Id)
+
 	case result := <-resultChan:
 		result.Duration = float64(time.Since(now).Nanoseconds()) / float64(1000000)
-		log.Trace("Alerting: engine.executeJob(): done %vms", result.Duration)
+		e.log.Debug("Job Execution done", "time_taken", result.Duration, "ruleId", job.Rule.Id)
 		e.resultQueue <- result
 	}
 }
 
 func (e *Engine) resultHandler() {
 	for result := range e.resultQueue {
-		log.Debug("Alerting: engine.resultHandler(): alert(%d) status(%s) actual(%v) retry(%d)", result.AlertJob.Rule.Id, result.State, result.ActualValue, result.AlertJob.RetryCount)
+		e.log.Debug("Alert Rule Result", "ruleId", result.AlertJob.Rule.Id, "state", result.State, "value", result.ActualValue, "retry", result.AlertJob.RetryCount)
 
 		result.AlertJob.Running = false
 
@@ -103,11 +108,10 @@ func (e *Engine) resultHandler() {
 			result.AlertJob.RetryCount++
 
 			if result.AlertJob.RetryCount < maxRetries {
-				log.Error(3, "Alerting: Rule('%s') Result Error: %v, Retrying..", result.AlertJob.Rule.Name, result.Error)
-
+				e.log.Error("Alert Rule Result Error", "ruleId", result.AlertJob.Rule.Id, "error", result.Error, "retry", result.AlertJob.RetryCount)
 				e.execQueue <- result.AlertJob
 			} else {
-				log.Error(3, "Alerting: Rule('%s') Result Error: %v, Max retries reached", result.AlertJob.Rule.Name, result.Error)
+				e.log.Error("Alert Rule Result Error After Max Retries", "ruleId", result.AlertJob.Rule.Id, "error", result.Error, "retry", result.AlertJob.RetryCount)
 
 				result.State = alertstates.Critical
 				result.Description = fmt.Sprintf("Failed to run check after %d retires, Error: %v", maxRetries, result.Error)
@@ -119,3 +123,15 @@ func (e *Engine) resultHandler() {
 		}
 	}
 }
+
+func (e *Engine) saveState(result *AlertResult) {
+	cmd := &m.UpdateAlertStateCommand{
+		AlertId:  result.AlertJob.Rule.Id,
+		NewState: result.State,
+		Info:     result.Description,
+	}
+
+	if err := bus.Dispatch(cmd); err != nil {
+		e.log.Error("Failed to save state", "error", err)
+	}
+}

+ 9 - 2
pkg/services/alerting/executor.go

@@ -19,6 +19,13 @@ var (
 )
 
 type ExecutorImpl struct {
+	log log.Logger
+}
+
+func NewExecutor() *ExecutorImpl {
+	return &ExecutorImpl{
+		log: log.New("alerting.executor"),
+	}
 }
 
 type compareFn func(float64, float64) bool
@@ -147,10 +154,10 @@ func (e *ExecutorImpl) GetRequestForAlertRule(rule *AlertRule, datasource *m.Dat
 }
 
 func (e *ExecutorImpl) evaluateRule(rule *AlertRule, series tsdb.TimeSeriesSlice) *AlertResult {
-	log.Trace("Alerting: executor.evaluateRule: %v, query result: series: %v", rule.Name, len(series))
+	e.log.Debug("Evaluating Alerting Rule", "seriesCount", len(series), "ruleName", rule.Name)
 
 	for _, serie := range series {
-		log.Info("Alerting: executor.validate: %v", serie.Name)
+		log.Debug("Evaluating series", "series", serie.Name)
 
 		if aggregator[rule.Aggregator] == nil {
 			continue

+ 9 - 18
pkg/services/sqlstore/migrator/migrator.go

@@ -11,11 +11,10 @@ import (
 )
 
 type Migrator struct {
-	LogLevel log.LogLevel
-
 	x          *xorm.Engine
 	dialect    Dialect
 	migrations []Migration
+	Logger     log.Logger
 }
 
 type MigrationLog struct {
@@ -30,7 +29,7 @@ type MigrationLog struct {
 func NewMigrator(engine *xorm.Engine) *Migrator {
 	mg := &Migrator{}
 	mg.x = engine
-	mg.LogLevel = log.WARN
+	mg.Logger = log.New("migrator")
 	mg.migrations = make([]Migration, 0)
 	mg.dialect = NewDialect(mg.x.DriverName())
 	return mg
@@ -69,9 +68,7 @@ func (mg *Migrator) GetMigrationLog() (map[string]MigrationLog, error) {
 }
 
 func (mg *Migrator) Start() error {
-	if mg.LogLevel <= log.INFO {
-		log.Info("Migrator: Starting DB migration")
-	}
+	mg.Logger.Info("Starting DB migration")
 
 	logMap, err := mg.GetMigrationLog()
 	if err != nil {
@@ -81,9 +78,7 @@ func (mg *Migrator) Start() error {
 	for _, m := range mg.migrations {
 		_, exists := logMap[m.Id()]
 		if exists {
-			if mg.LogLevel <= log.DEBUG {
-				log.Debug("Migrator: Skipping migration: %v, Already executed", m.Id())
-			}
+			mg.Logger.Debug("Skipping migration: Already executed", "id", m.Id())
 			continue
 		}
 
@@ -95,12 +90,10 @@ func (mg *Migrator) Start() error {
 			Timestamp:   time.Now(),
 		}
 
-		if mg.LogLevel <= log.DEBUG {
-			log.Debug("Migrator: Executing SQL: \n %v \n", sql)
-		}
+		mg.Logger.Debug("Executing", "sql", sql)
 
 		if err := mg.exec(m); err != nil {
-			log.Error(3, "Migrator: error: \n%s:\n%s", err, sql)
+			mg.Logger.Error("Exec failed", "error", err, "sql", sql)
 			record.Error = err.Error()
 			mg.x.Insert(&record)
 			return err
@@ -114,9 +107,7 @@ func (mg *Migrator) Start() error {
 }
 
 func (mg *Migrator) exec(m Migration) error {
-	if mg.LogLevel <= log.INFO {
-		log.Info("Migrator: exec migration id: %v", m.Id())
-	}
+	log.Info("Executing migration", "id", m.Id())
 
 	err := mg.inTransaction(func(sess *xorm.Session) error {
 
@@ -125,14 +116,14 @@ func (mg *Migrator) exec(m Migration) error {
 			sql, args := condition.Sql(mg.dialect)
 			results, err := sess.Query(sql, args...)
 			if err != nil || len(results) == 0 {
-				log.Info("Migrator: skipping migration id: %v, condition not fulfilled", m.Id())
+				mg.Logger.Info("Skipping migration condition not fulfilled", "id", m.Id())
 				return sess.Rollback()
 			}
 		}
 
 		_, err := sess.Exec(m.Sql(mg.dialect))
 		if err != nil {
-			log.Error(3, "Migrator: exec FAILED migration id: %v, err: %v", m.Id(), err)
+			mg.Logger.Error("Executing migration failed", "id", m.Id(), "error", err)
 			return err
 		}
 		return nil

+ 7 - 18
pkg/services/sqlstore/sqlstore.go

@@ -40,8 +40,8 @@ var (
 	}
 
 	mysqlConfig MySQLConfig
-
-	UseSQLite3 bool
+	UseSQLite3  bool
+	sqlog       log.Logger = log.New("sqlstore")
 )
 
 func EnsureAdminUser() {
@@ -74,13 +74,15 @@ func NewEngine() {
 	x, err := getEngine()
 
 	if err != nil {
-		log.Fatal(3, "Sqlstore: Fail to connect to database: %v", err)
+		sqlog.Crit("Fail to connect to database", "error", err)
+		os.Exit(1)
 	}
 
 	err = SetEngine(x, setting.Env == setting.DEV)
 
 	if err != nil {
-		log.Fatal(3, "fail to initialize orm engine: %v", err)
+		sqlog.Error("Fail to initialize orm engine: %v", err)
+		os.Exit(1)
 	}
 }
 
@@ -89,24 +91,12 @@ func SetEngine(engine *xorm.Engine, enableLog bool) (err error) {
 	dialect = migrator.NewDialect(x.DriverName())
 
 	migrator := migrator.NewMigrator(x)
-	migrator.LogLevel = log.INFO
 	migrations.AddMigrations(migrator)
 
 	if err := migrator.Start(); err != nil {
 		return fmt.Errorf("Sqlstore::Migration failed err: %v\n", err)
 	}
 
-	if enableLog {
-		logPath := path.Join(setting.LogsPath, "xorm.log")
-		os.MkdirAll(path.Dir(logPath), os.ModePerm)
-
-		f, err := os.Create(logPath)
-		if err != nil {
-			return fmt.Errorf("sqlstore.init(fail to create xorm.log): %v", err)
-		}
-		x.Logger = xorm.NewSimpleLogger(f)
-	}
-
 	return nil
 }
 
@@ -158,8 +148,7 @@ func getEngine() (*xorm.Engine, error) {
 		return nil, fmt.Errorf("Unknown database type: %s", DbCfg.Type)
 	}
 
-	log.Info("Database: %v", DbCfg.Type)
-
+	sqlog.Info("Initializing DB", "dbtype", DbCfg.Type)
 	return xorm.NewEngine(DbCfg.Type, cnnstr)
 }
 

+ 14 - 5
pkg/services/sqlstore/user.go

@@ -161,14 +161,23 @@ func GetUserByLogin(query *m.GetUserByLoginQuery) error {
 	}
 
 	user := new(m.User)
-	if strings.Contains(query.LoginOrEmail, "@") {
-		user = &m.User{Email: query.LoginOrEmail}
-	} else {
-		user = &m.User{Login: query.LoginOrEmail}
-	}
 
+	// Try and find the user by login first.
+	// It's not sufficient to assume that a LoginOrEmail with an "@" is an email.
+	user = &m.User{Login: query.LoginOrEmail}
 	has, err := x.Get(user)
 
+	if err != nil {
+		return err
+	}
+
+	if has == false && strings.Contains(query.LoginOrEmail, "@") {
+		// If the user wasn't found, and it contains an "@" fallback to finding the
+		// user by email.
+		user = &m.User{Email: query.LoginOrEmail}
+		has, err = x.Get(user)
+	}
+
 	if err != nil {
 		return err
 	} else if has == false {

+ 23 - 116
pkg/setting/setting.go

@@ -5,7 +5,6 @@ package setting
 
 import (
 	"bytes"
-	"encoding/json"
 	"fmt"
 	"net/url"
 	"os"
@@ -142,6 +141,9 @@ var (
 
 	// Alerting
 	AlertingEnabled bool
+
+	// logger
+	logger log.Logger
 )
 
 type CommandLineArgs struct {
@@ -152,7 +154,7 @@ type CommandLineArgs struct {
 
 func init() {
 	IsWindows = runtime.GOOS == "windows"
-	log.NewLogger(0, "console", `{"level": 0, "formatting":true}`)
+	logger = log.New("settings")
 }
 
 func parseAppUrlAndSubUrl(section *ini.Section) (string, string) {
@@ -338,7 +340,7 @@ func loadConfiguration(args *CommandLineArgs) {
 
 	// init logging before specific config so we can log errors from here on
 	DataPath = makeAbsolute(Cfg.Section("paths").Key("data").String(), HomePath)
-	initLogging(args)
+	initLogging()
 
 	// load specified config file
 	loadSpecifedConfigFile(args.Config)
@@ -354,7 +356,7 @@ func loadConfiguration(args *CommandLineArgs) {
 
 	// update data path and logging config
 	DataPath = makeAbsolute(Cfg.Section("paths").Key("data").String(), HomePath)
-	initLogging(args)
+	initLogging()
 }
 
 func pathExists(path string) bool {
@@ -549,134 +551,39 @@ func readSessionConfig() {
 	}
 }
 
-var logLevels = map[string]int{
-	"Trace":    0,
-	"Debug":    1,
-	"Info":     2,
-	"Warn":     3,
-	"Error":    4,
-	"Critical": 5,
-}
-
-func getLogLevel(key string, defaultName string) (string, int) {
-	levelName := Cfg.Section(key).Key("level").In(defaultName, []string{"Trace", "Debug", "Info", "Warn", "Error", "Critical"})
-
-	level, ok := logLevels[levelName]
-	if !ok {
-		log.Fatal(4, "Unknown log level: %s", levelName)
-	}
-
-	return levelName, level
-}
-
-func initLogging(args *CommandLineArgs) {
-	//close any existing log handlers.
-	log.Close()
-	// Get and check log mode.
+func initLogging() {
+	// split on comma
 	LogModes = strings.Split(Cfg.Section("log").Key("mode").MustString("console"), ",")
-	LogsPath = makeAbsolute(Cfg.Section("paths").Key("logs").String(), HomePath)
-
-	defaultLevelName, _ := getLogLevel("log", "Info")
-
-	LogConfigs = make([]util.DynMap, len(LogModes))
-
-	for i, mode := range LogModes {
-
-		mode = strings.TrimSpace(mode)
-		sec, err := Cfg.GetSection("log." + mode)
-		if err != nil {
-			log.Fatal(4, "Unknown log mode: %s", mode)
-		}
-
-		// Log level.
-		_, level := getLogLevel("log."+mode, defaultLevelName)
-
-		// Generate log configuration.
-		switch mode {
-		case "console":
-			formatting := sec.Key("formatting").MustBool(true)
-			LogConfigs[i] = util.DynMap{
-				"level":      level,
-				"formatting": formatting,
-			}
-		case "file":
-			logPath := sec.Key("file_name").MustString(filepath.Join(LogsPath, "grafana.log"))
-			os.MkdirAll(filepath.Dir(logPath), os.ModePerm)
-			LogConfigs[i] = util.DynMap{
-				"level":    level,
-				"filename": logPath,
-				"rotate":   sec.Key("log_rotate").MustBool(true),
-				"maxlines": sec.Key("max_lines").MustInt(1000000),
-				"maxsize":  1 << uint(sec.Key("max_size_shift").MustInt(28)),
-				"daily":    sec.Key("daily_rotate").MustBool(true),
-				"maxdays":  sec.Key("max_days").MustInt(7),
-			}
-		case "conn":
-			LogConfigs[i] = util.DynMap{
-				"level":          level,
-				"reconnectOnMsg": sec.Key("reconnect_on_msg").MustBool(),
-				"reconnect":      sec.Key("reconnect").MustBool(),
-				"net":            sec.Key("protocol").In("tcp", []string{"tcp", "unix", "udp"}),
-				"addr":           sec.Key("addr").MustString(":7020"),
-			}
-		case "smtp":
-			LogConfigs[i] = util.DynMap{
-				"level":     level,
-				"user":      sec.Key("user").MustString("example@example.com"),
-				"passwd":    sec.Key("passwd").MustString("******"),
-				"host":      sec.Key("host").MustString("127.0.0.1:25"),
-				"receivers": sec.Key("receivers").MustString("[]"),
-				"subject":   sec.Key("subject").MustString("Diagnostic message from serve"),
-			}
-		case "database":
-			LogConfigs[i] = util.DynMap{
-				"level":  level,
-				"driver": sec.Key("driver").String(),
-				"conn":   sec.Key("conn").String(),
-			}
-		case "syslog":
-			LogConfigs[i] = util.DynMap{
-				"level":    level,
-				"network":  sec.Key("network").MustString(""),
-				"address":  sec.Key("address").MustString(""),
-				"facility": sec.Key("facility").MustString("local7"),
-				"tag":      sec.Key("tag").MustString(""),
-			}
-		}
-
-		cfgJsonBytes, _ := json.Marshal(LogConfigs[i])
-		log.NewLogger(Cfg.Section("log").Key("buffer_len").MustInt64(10000), mode, string(cfgJsonBytes))
+	// also try space
+	if len(LogModes) == 1 {
+		LogModes = strings.Split(Cfg.Section("log").Key("mode").MustString("console"), " ")
 	}
+	LogsPath = makeAbsolute(Cfg.Section("paths").Key("logs").String(), HomePath)
+	log.ReadLoggingConfig(LogModes, LogsPath, Cfg)
 }
 
 func LogConfigurationInfo() {
 	var text bytes.Buffer
-	text.WriteString("Configuration Info\n")
 
-	text.WriteString("Config files:\n")
-	for i, file := range configFiles {
-		text.WriteString(fmt.Sprintf("  [%d]: %s\n", i, file))
+	for _, file := range configFiles {
+		logger.Info("Config loaded from", "file", file)
 	}
 
 	if len(appliedCommandLineProperties) > 0 {
-		text.WriteString("Command lines overrides:\n")
-		for i, prop := range appliedCommandLineProperties {
-			text.WriteString(fmt.Sprintf("  [%d]: %s\n", i, prop))
+		for _, prop := range appliedCommandLineProperties {
+			logger.Info("Config overriden from command line", "arg", prop)
 		}
 	}
 
 	if len(appliedEnvOverrides) > 0 {
 		text.WriteString("\tEnvironment variables used:\n")
-		for i, prop := range appliedEnvOverrides {
-			text.WriteString(fmt.Sprintf("  [%d]: %s\n", i, prop))
+		for _, prop := range appliedEnvOverrides {
+			logger.Info("Config overriden from Environment variable", "var", prop)
 		}
 	}
 
-	text.WriteString("Paths:\n")
-	text.WriteString(fmt.Sprintf("  home: %s\n", HomePath))
-	text.WriteString(fmt.Sprintf("  data: %s\n", DataPath))
-	text.WriteString(fmt.Sprintf("  logs: %s\n", LogsPath))
-	text.WriteString(fmt.Sprintf("  plugins: %s\n", PluginsPath))
-
-	log.Info(text.String())
+	logger.Info("Path Home", "path", HomePath)
+	logger.Info("Path Data", "path", DataPath)
+	logger.Info("Path Logs", "path", LogsPath)
+	logger.Info("Path Plugins", "path", PluginsPath)
 }

+ 2 - 1
public/app/app.ts

@@ -41,10 +41,11 @@ export class GrafanaApp {
     var app = angular.module('grafana', []);
     app.constant('grafanaVersion', "@grafanaVersion@");
 
-    app.config(($locationProvider, $controllerProvider, $compileProvider, $filterProvider, $provide) => {
+    app.config(($locationProvider, $controllerProvider, $compileProvider, $filterProvider, $httpProvider, $provide) => {
       if (config.buildInfo.env !== 'development') {
         $compileProvider.debugInfoEnabled(false);
       }
+      $httpProvider.useApplyAsync(true);
 
       this.registerFunctions.controller = $controllerProvider.register;
       this.registerFunctions.directive  = $compileProvider.directive;

+ 1 - 1
public/app/core/directives/plugin_component.ts

@@ -211,7 +211,7 @@ function pluginDirectiveLoader($compile, datasourceSrv, $rootScope, $q, $http, $
     // let a binding digest cycle complete before adding to dom
     setTimeout(function() {
       elem.append(child);
-      scope.$apply(function() {
+      scope.$applyAsync(function() {
         scope.$broadcast('refresh');
       });
     });