Sfoglia il codice sorgente

Sqlstore refactor (#11908)

* refactor: tracing service refactoring

* refactor: sqlstore to instance service

* refactor: sqlstore & registory priority

* refactor: sqlstore refactor wip

* sqlstore: progress on getting tests to work again

* sqlstore: progress on refactoring and getting tests working

* sqlstore: connection string fix

* fix: not sure why this test is not working and required changing expires

* fix: updated grafana-cli
Torkel Ödegaard 7 anni fa
parent
commit
6c0752473a

+ 3 - 1
pkg/cmd/grafana-cli/commands/commands.go

@@ -22,7 +22,9 @@ func runDbCommand(command func(commandLine CommandLine) error) func(context *cli
 			Args:     flag.Args(),
 		})
 
-		sqlstore.NewEngine()
+		engine := &sqlstore.SqlStore{}
+		engine.Cfg = cfg
+		engine.Init()
 
 		if err := command(cmd); err != nil {
 			logger.Errorf("\n%s: ", color.RedString("Error"))

+ 10 - 15
pkg/cmd/grafana-server/server.go

@@ -8,7 +8,6 @@ import (
 	"net"
 	"os"
 	"path/filepath"
-	"reflect"
 	"strconv"
 	"time"
 
@@ -23,7 +22,6 @@ import (
 	"github.com/grafana/grafana/pkg/api"
 	"github.com/grafana/grafana/pkg/log"
 	"github.com/grafana/grafana/pkg/login"
-	"github.com/grafana/grafana/pkg/services/sqlstore"
 	"github.com/grafana/grafana/pkg/setting"
 
 	"github.com/grafana/grafana/pkg/social"
@@ -37,6 +35,7 @@ import (
 	_ "github.com/grafana/grafana/pkg/services/notifications"
 	_ "github.com/grafana/grafana/pkg/services/provisioning"
 	_ "github.com/grafana/grafana/pkg/services/search"
+	_ "github.com/grafana/grafana/pkg/services/sqlstore"
 	_ "github.com/grafana/grafana/pkg/tracing"
 )
 
@@ -70,10 +69,6 @@ func (g *GrafanaServerImpl) Run() error {
 	g.loadConfiguration()
 	g.writePIDFile()
 
-	// initSql
-	sqlstore.NewEngine() // TODO: this should return an error
-	sqlstore.EnsureAdminUser()
-
 	login.Init()
 	social.NewOAuthService()
 
@@ -88,7 +83,7 @@ func (g *GrafanaServerImpl) Run() error {
 
 	// Add all services to dependency graph
 	for _, service := range services {
-		serviceGraph.Provide(&inject.Object{Value: service})
+		serviceGraph.Provide(&inject.Object{Value: service.Instance})
 	}
 
 	serviceGraph.Provide(&inject.Object{Value: g})
@@ -100,25 +95,25 @@ func (g *GrafanaServerImpl) Run() error {
 
 	// Init & start services
 	for _, service := range services {
-		if registry.IsDisabled(service) {
+		if registry.IsDisabled(service.Instance) {
 			continue
 		}
 
-		g.log.Info("Initializing " + reflect.TypeOf(service).Elem().Name())
+		g.log.Info("Initializing " + service.Name)
 
-		if err := service.Init(); err != nil {
+		if err := service.Instance.Init(); err != nil {
 			return fmt.Errorf("Service init failed: %v", err)
 		}
 	}
 
 	// Start background services
-	for index := range services {
-		service, ok := services[index].(registry.BackgroundService)
+	for _, descriptor := range services {
+		service, ok := descriptor.Instance.(registry.BackgroundService)
 		if !ok {
 			continue
 		}
 
-		if registry.IsDisabled(services[index]) {
+		if registry.IsDisabled(descriptor.Instance) {
 			continue
 		}
 
@@ -133,9 +128,9 @@ func (g *GrafanaServerImpl) Run() error {
 
 			// If error is not canceled then the service crashed
 			if err != context.Canceled && err != nil {
-				g.log.Error("Stopped "+reflect.TypeOf(service).Elem().Name(), "reason", err)
+				g.log.Error("Stopped "+descriptor.Name, "reason", err)
 			} else {
-				g.log.Info("Stopped "+reflect.TypeOf(service).Elem().Name(), "reason", err)
+				g.log.Info("Stopped "+descriptor.Name, "reason", err)
 			}
 
 			// Mark that we are in shutdown mode

+ 35 - 4
pkg/registry/registry.go

@@ -2,15 +2,35 @@ package registry
 
 import (
 	"context"
+	"reflect"
+	"sort"
 )
 
-var services = []Service{}
+type Descriptor struct {
+	Name         string
+	Instance     Service
+	InitPriority Priority
+}
+
+var services []*Descriptor
 
-func RegisterService(srv Service) {
-	services = append(services, srv)
+func RegisterService(instance Service) {
+	services = append(services, &Descriptor{
+		Name:         reflect.TypeOf(instance).Elem().Name(),
+		Instance:     instance,
+		InitPriority: Low,
+	})
 }
 
-func GetServices() []Service {
+func Register(descriptor *Descriptor) {
+	services = append(services, descriptor)
+}
+
+func GetServices() []*Descriptor {
+	sort.Slice(services, func(i, j int) bool {
+		return services[i].InitPriority > services[j].InitPriority
+	})
+
 	return services
 }
 
@@ -27,7 +47,18 @@ type BackgroundService interface {
 	Run(ctx context.Context) error
 }
 
+type HasInitPriority interface {
+	GetInitPriority() Priority
+}
+
 func IsDisabled(srv Service) bool {
 	canBeDisabled, ok := srv.(CanBeDisabled)
 	return ok && canBeDisabled.IsDisabled()
 }
+
+type Priority int
+
+const (
+	High Priority = 100
+	Low  Priority = 0
+)

+ 6 - 7
pkg/services/sqlstore/dashboard_snapshot_test.go

@@ -4,7 +4,6 @@ import (
 	"testing"
 	"time"
 
-	"github.com/go-xorm/xorm"
 	. "github.com/smartystreets/goconvey/convey"
 
 	"github.com/grafana/grafana/pkg/components/simplejson"
@@ -110,14 +109,14 @@ func TestDashboardSnapshotDBAccess(t *testing.T) {
 }
 
 func TestDeleteExpiredSnapshots(t *testing.T) {
-	x := InitTestDB(t)
+	sqlstore := InitTestDB(t)
 
 	Convey("Testing dashboard snapshots clean up", t, func() {
 		setting.SnapShotRemoveExpired = true
 
-		notExpiredsnapshot := createTestSnapshot(x, "key1", 48000)
-		createTestSnapshot(x, "key2", -1200)
-		createTestSnapshot(x, "key3", -1200)
+		notExpiredsnapshot := createTestSnapshot(sqlstore, "key1", 48000)
+		createTestSnapshot(sqlstore, "key2", -1200)
+		createTestSnapshot(sqlstore, "key3", -1200)
 
 		err := DeleteExpiredSnapshots(&m.DeleteExpiredSnapshotsCommand{})
 		So(err, ShouldBeNil)
@@ -146,7 +145,7 @@ func TestDeleteExpiredSnapshots(t *testing.T) {
 	})
 }
 
-func createTestSnapshot(x *xorm.Engine, key string, expires int64) *m.DashboardSnapshot {
+func createTestSnapshot(sqlstore *SqlStore, key string, expires int64) *m.DashboardSnapshot {
 	cmd := m.CreateDashboardSnapshotCommand{
 		Key:       key,
 		DeleteKey: "delete" + key,
@@ -163,7 +162,7 @@ func createTestSnapshot(x *xorm.Engine, key string, expires int64) *m.DashboardS
 	// Set expiry date manually - to be able to create expired snapshots
 	if expires < 0 {
 		expireDate := time.Now().Add(time.Second * time.Duration(expires))
-		_, err = x.Exec("UPDATE dashboard_snapshot SET expires = ? WHERE id = ?", expireDate, cmd.Result.Id)
+		_, err = sqlstore.engine.Exec("UPDATE dashboard_snapshot SET expires = ? WHERE id = ?", expireDate, cmd.Result.Id)
 		So(err, ShouldBeNil)
 	}
 

+ 1 - 1
pkg/services/sqlstore/migrations/migrations_test.go

@@ -39,7 +39,7 @@ func TestMigrations(t *testing.T) {
 			has, err := x.SQL(sql).Get(&r)
 			So(err, ShouldBeNil)
 			So(has, ShouldBeTrue)
-			expectedMigrations := mg.MigrationsCount() - 2 //we currently skip to migrations. We should rewrite skipped migrations to write in the log as well. until then we have to keep this
+			expectedMigrations := mg.MigrationsCount() //we currently skip to migrations. We should rewrite skipped migrations to write in the log as well. until then we have to keep this
 			So(r.Count, ShouldEqual, expectedMigrations)
 
 			mg = NewMigrator(x)

+ 0 - 21
pkg/services/sqlstore/migrations/org_mig.go

@@ -48,27 +48,6 @@ func addOrgMigrations(mg *Migrator) {
 	mg.AddMigration("create org_user table v1", NewAddTableMigration(orgUserV1))
 	addTableIndicesMigrations(mg, "v1", orgUserV1)
 
-	//-------  copy data from old table-------------------
-	mg.AddMigration("copy data account to org", NewCopyTableDataMigration("org", "account", map[string]string{
-		"id":      "id",
-		"version": "version",
-		"name":    "name",
-		"created": "created",
-		"updated": "updated",
-	}).IfTableExists("account"))
-
-	mg.AddMigration("copy data account_user to org_user", NewCopyTableDataMigration("org_user", "account_user", map[string]string{
-		"id":      "id",
-		"org_id":  "account_id",
-		"user_id": "user_id",
-		"role":    "role",
-		"created": "created",
-		"updated": "updated",
-	}).IfTableExists("account_user"))
-
-	mg.AddMigration("Drop old table account", NewDropTableMigration("account"))
-	mg.AddMigration("Drop old table account_user", NewDropTableMigration("account_user"))
-
 	mg.AddMigration("Update org table charset", NewTableCharsetMigration("org", []*Column{
 		{Name: "name", Type: DB_NVarchar, Length: 190, Nullable: false},
 		{Name: "address1", Type: DB_NVarchar, Length: 255, Nullable: true},

+ 1 - 1
pkg/services/sqlstore/migrator/migrator.go

@@ -125,7 +125,7 @@ func (mg *Migrator) exec(m Migration, sess *xorm.Session) error {
 		sql, args := condition.Sql(mg.dialect)
 		results, err := sess.SQL(sql).Query(args...)
 		if err != nil || len(results) == 0 {
-			mg.Logger.Info("Skipping migration condition not fulfilled", "id", m.Id())
+			mg.Logger.Debug("Skipping migration condition not fulfilled", "id", m.Id())
 			return sess.Rollback()
 		}
 	}

+ 1 - 0
pkg/services/sqlstore/quota_test.go

@@ -43,6 +43,7 @@ func TestQuotaCommandsAndQueries(t *testing.T) {
 			Name:   "TestOrg",
 			UserId: 1,
 		}
+
 		err := CreateOrg(&userCmd)
 		So(err, ShouldBeNil)
 		orgId = userCmd.Result.Id

+ 161 - 140
pkg/services/sqlstore/sqlstore.go

@@ -13,6 +13,7 @@ import (
 	"github.com/grafana/grafana/pkg/bus"
 	"github.com/grafana/grafana/pkg/log"
 	m "github.com/grafana/grafana/pkg/models"
+	"github.com/grafana/grafana/pkg/registry"
 	"github.com/grafana/grafana/pkg/services/annotations"
 	"github.com/grafana/grafana/pkg/services/sqlstore/migrations"
 	"github.com/grafana/grafana/pkg/services/sqlstore/migrator"
@@ -27,151 +28,164 @@ import (
 	_ "github.com/grafana/grafana/pkg/tsdb/mssql"
 )
 
-type DatabaseConfig struct {
-	Type, Host, Name, User, Pwd, Path, SslMode string
-	CaCertPath                                 string
-	ClientKeyPath                              string
-	ClientCertPath                             string
-	ServerCertName                             string
-	MaxOpenConn                                int
-	MaxIdleConn                                int
-	ConnMaxLifetime                            int
-}
-
 var (
 	x       *xorm.Engine
 	dialect migrator.Dialect
 
-	HasEngine bool
-
-	DbCfg DatabaseConfig
-
-	UseSQLite3 bool
-	sqlog      log.Logger = log.New("sqlstore")
+	sqlog log.Logger = log.New("sqlstore")
 )
 
-func EnsureAdminUser() {
-	statsQuery := m.GetSystemStatsQuery{}
-
-	if err := bus.Dispatch(&statsQuery); err != nil {
-		log.Fatal(3, "Could not determine if admin user exists: %v", err)
-		return
-	}
-
-	if statsQuery.Result.Users > 0 {
-		return
-	}
-
-	cmd := m.CreateUserCommand{}
-	cmd.Login = setting.AdminUser
-	cmd.Email = setting.AdminUser + "@localhost"
-	cmd.Password = setting.AdminPassword
-	cmd.IsAdmin = true
+func init() {
+	registry.Register(&registry.Descriptor{
+		Name:         "SqlStore",
+		Instance:     &SqlStore{},
+		InitPriority: registry.High,
+	})
+}
 
-	if err := bus.Dispatch(&cmd); err != nil {
-		log.Error(3, "Failed to create default admin user", err)
-		return
-	}
+type SqlStore struct {
+	Cfg *setting.Cfg `inject:""`
 
-	log.Info("Created default admin user: %v", setting.AdminUser)
+	dbCfg           DatabaseConfig
+	engine          *xorm.Engine
+	log             log.Logger
+	skipEnsureAdmin bool
 }
 
-func NewEngine() *xorm.Engine {
-	x, err := getEngine()
+func (ss *SqlStore) Init() error {
+	ss.log = log.New("sqlstore")
+	ss.readConfig()
 
-	if err != nil {
-		sqlog.Crit("Fail to connect to database", "error", err)
-		os.Exit(1)
-	}
-
-	err = SetEngine(x)
+	engine, err := ss.getEngine()
 
 	if err != nil {
-		sqlog.Error("Fail to initialize orm engine", "error", err)
-		os.Exit(1)
+		return fmt.Errorf("Fail to connect to database: %v", err)
 	}
 
-	return x
-}
+	ss.engine = engine
 
-func SetEngine(engine *xorm.Engine) (err error) {
+	// temporarily still set global var
 	x = engine
 	dialect = migrator.NewDialect(x)
-
 	migrator := migrator.NewMigrator(x)
 	migrations.AddMigrations(migrator)
 
 	if err := migrator.Start(); err != nil {
-		return fmt.Errorf("Sqlstore::Migration failed err: %v\n", err)
+		return fmt.Errorf("Migration failed err: %v", err)
 	}
 
 	// Init repo instances
 	annotations.SetRepository(&SqlAnnotationRepo{})
+
+	// ensure admin user
+	if ss.skipEnsureAdmin {
+		return nil
+	}
+
+	return ss.ensureAdminUser()
+}
+
+func (ss *SqlStore) ensureAdminUser() error {
+	statsQuery := m.GetSystemStatsQuery{}
+
+	if err := bus.Dispatch(&statsQuery); err != nil {
+		fmt.Errorf("Could not determine if admin user exists: %v", err)
+	}
+
+	if statsQuery.Result.Users > 0 {
+		return nil
+	}
+
+	cmd := m.CreateUserCommand{}
+	cmd.Login = setting.AdminUser
+	cmd.Email = setting.AdminUser + "@localhost"
+	cmd.Password = setting.AdminPassword
+	cmd.IsAdmin = true
+
+	if err := bus.Dispatch(&cmd); err != nil {
+		return fmt.Errorf("Failed to create admin user: %v", err)
+	}
+
+	ss.log.Info("Created default admin user: %v", setting.AdminUser)
+
 	return nil
 }
 
-func getEngine() (*xorm.Engine, error) {
-	LoadConfig()
+func (ss *SqlStore) buildConnectionString() (string, error) {
+	cnnstr := ss.dbCfg.ConnectionString
 
-	cnnstr := ""
-	switch DbCfg.Type {
+	// special case used by integration tests
+	if cnnstr != "" {
+		return cnnstr, nil
+	}
+
+	switch ss.dbCfg.Type {
 	case migrator.MYSQL:
 		protocol := "tcp"
-		if strings.HasPrefix(DbCfg.Host, "/") {
+		if strings.HasPrefix(ss.dbCfg.Host, "/") {
 			protocol = "unix"
 		}
 
 		cnnstr = fmt.Sprintf("%s:%s@%s(%s)/%s?collation=utf8mb4_unicode_ci&allowNativePasswords=true",
-			url.QueryEscape(DbCfg.User), url.QueryEscape(DbCfg.Pwd), protocol, DbCfg.Host, url.PathEscape(DbCfg.Name))
+			ss.dbCfg.User, ss.dbCfg.Pwd, protocol, ss.dbCfg.Host, ss.dbCfg.Name)
 
-		if DbCfg.SslMode == "true" || DbCfg.SslMode == "skip-verify" {
-			tlsCert, err := makeCert("custom", DbCfg)
+		if ss.dbCfg.SslMode == "true" || ss.dbCfg.SslMode == "skip-verify" {
+			tlsCert, err := makeCert("custom", ss.dbCfg)
 			if err != nil {
-				return nil, err
+				return "", err
 			}
 			mysql.RegisterTLSConfig("custom", tlsCert)
 			cnnstr += "&tls=custom"
 		}
 	case migrator.POSTGRES:
 		var host, port = "127.0.0.1", "5432"
-		fields := strings.Split(DbCfg.Host, ":")
+		fields := strings.Split(ss.dbCfg.Host, ":")
 		if len(fields) > 0 && len(strings.TrimSpace(fields[0])) > 0 {
 			host = fields[0]
 		}
 		if len(fields) > 1 && len(strings.TrimSpace(fields[1])) > 0 {
 			port = fields[1]
 		}
-		cnnstr = fmt.Sprintf("user='%s' password='%s' host='%s' port='%s' dbname='%s' sslmode='%s' sslcert='%s' sslkey='%s' sslrootcert='%s'",
-			strings.Replace(DbCfg.User, `'`, `\'`, -1),
-			strings.Replace(DbCfg.Pwd, `'`, `\'`, -1),
-			strings.Replace(host, `'`, `\'`, -1),
-			strings.Replace(port, `'`, `\'`, -1),
-			strings.Replace(DbCfg.Name, `'`, `\'`, -1),
-			strings.Replace(DbCfg.SslMode, `'`, `\'`, -1),
-			strings.Replace(DbCfg.ClientCertPath, `'`, `\'`, -1),
-			strings.Replace(DbCfg.ClientKeyPath, `'`, `\'`, -1),
-			strings.Replace(DbCfg.CaCertPath, `'`, `\'`, -1),
-		)
+		if ss.dbCfg.Pwd == "" {
+			ss.dbCfg.Pwd = "''"
+		}
+		if ss.dbCfg.User == "" {
+			ss.dbCfg.User = "''"
+		}
+		cnnstr = fmt.Sprintf("user=%s password=%s host=%s port=%s dbname=%s sslmode=%s sslcert=%s sslkey=%s sslrootcert=%s", ss.dbCfg.User, ss.dbCfg.Pwd, host, port, ss.dbCfg.Name, ss.dbCfg.SslMode, ss.dbCfg.ClientCertPath, ss.dbCfg.ClientKeyPath, ss.dbCfg.CaCertPath)
 	case migrator.SQLITE:
-		if !filepath.IsAbs(DbCfg.Path) {
-			DbCfg.Path = filepath.Join(setting.DataPath, DbCfg.Path)
+		// special case for tests
+		if !filepath.IsAbs(ss.dbCfg.Path) {
+			ss.dbCfg.Path = filepath.Join(setting.DataPath, ss.dbCfg.Path)
 		}
-		os.MkdirAll(path.Dir(DbCfg.Path), os.ModePerm)
-		cnnstr = "file:" + DbCfg.Path + "?cache=shared&mode=rwc"
+		os.MkdirAll(path.Dir(ss.dbCfg.Path), os.ModePerm)
+		cnnstr = "file:" + ss.dbCfg.Path + "?cache=shared&mode=rwc"
 	default:
-		return nil, fmt.Errorf("Unknown database type: %s", DbCfg.Type)
+		return "", fmt.Errorf("Unknown database type: %s", ss.dbCfg.Type)
 	}
 
-	sqlog.Info("Initializing DB", "dbtype", DbCfg.Type)
-	engine, err := xorm.NewEngine(DbCfg.Type, cnnstr)
+	return cnnstr, nil
+}
+
+func (ss *SqlStore) getEngine() (*xorm.Engine, error) {
+	connectionString, err := ss.buildConnectionString()
+
 	if err != nil {
 		return nil, err
 	}
 
-	engine.SetMaxOpenConns(DbCfg.MaxOpenConn)
-	engine.SetMaxIdleConns(DbCfg.MaxIdleConn)
-	engine.SetConnMaxLifetime(time.Second * time.Duration(DbCfg.ConnMaxLifetime))
-	debugSql := setting.Raw.Section("database").Key("log_queries").MustBool(false)
+	sqlog.Info("Connecting to DB", "dbtype", ss.dbCfg.Type)
+	engine, err := xorm.NewEngine(ss.dbCfg.Type, connectionString)
+	if err != nil {
+		return nil, err
+	}
+
+	engine.SetMaxOpenConns(ss.dbCfg.MaxOpenConn)
+	engine.SetMaxIdleConns(ss.dbCfg.MaxIdleConn)
+	engine.SetConnMaxLifetime(time.Second * time.Duration(ss.dbCfg.ConnMaxLifetime))
+
+	// configure sql logging
+	debugSql := ss.Cfg.Raw.Section("database").Key("log_queries").MustBool(false)
 	if !debugSql {
 		engine.SetLogger(&xorm.DiscardLogger{})
 	} else {
@@ -183,95 +197,90 @@ func getEngine() (*xorm.Engine, error) {
 	return engine, nil
 }
 
-func LoadConfig() {
-	sec := setting.Raw.Section("database")
+func (ss *SqlStore) readConfig() {
+	sec := ss.Cfg.Raw.Section("database")
 
 	cfgURL := sec.Key("url").String()
 	if len(cfgURL) != 0 {
 		dbURL, _ := url.Parse(cfgURL)
-		DbCfg.Type = dbURL.Scheme
-		DbCfg.Host = dbURL.Host
+		ss.dbCfg.Type = dbURL.Scheme
+		ss.dbCfg.Host = dbURL.Host
 
 		pathSplit := strings.Split(dbURL.Path, "/")
 		if len(pathSplit) > 1 {
-			DbCfg.Name = pathSplit[1]
+			ss.dbCfg.Name = pathSplit[1]
 		}
 
 		userInfo := dbURL.User
 		if userInfo != nil {
-			DbCfg.User = userInfo.Username()
-			DbCfg.Pwd, _ = userInfo.Password()
+			ss.dbCfg.User = userInfo.Username()
+			ss.dbCfg.Pwd, _ = userInfo.Password()
 		}
 	} else {
-		DbCfg.Type = sec.Key("type").String()
-		DbCfg.Host = sec.Key("host").String()
-		DbCfg.Name = sec.Key("name").String()
-		DbCfg.User = sec.Key("user").String()
-		if len(DbCfg.Pwd) == 0 {
-			DbCfg.Pwd = sec.Key("password").String()
-		}
-	}
-	DbCfg.MaxOpenConn = sec.Key("max_open_conn").MustInt(0)
-	DbCfg.MaxIdleConn = sec.Key("max_idle_conn").MustInt(0)
-	DbCfg.ConnMaxLifetime = sec.Key("conn_max_lifetime").MustInt(14400)
-
-	if DbCfg.Type == "sqlite3" {
-		UseSQLite3 = true
-		// only allow one connection as sqlite3 has multi threading issues that cause table locks
-		// DbCfg.MaxIdleConn = 1
-		// DbCfg.MaxOpenConn = 1
+		ss.dbCfg.Type = sec.Key("type").String()
+		ss.dbCfg.Host = sec.Key("host").String()
+		ss.dbCfg.Name = sec.Key("name").String()
+		ss.dbCfg.User = sec.Key("user").String()
+		ss.dbCfg.ConnectionString = sec.Key("connection_string").String()
+		ss.dbCfg.Pwd = sec.Key("password").String()
 	}
-	DbCfg.SslMode = sec.Key("ssl_mode").String()
-	DbCfg.CaCertPath = sec.Key("ca_cert_path").String()
-	DbCfg.ClientKeyPath = sec.Key("client_key_path").String()
-	DbCfg.ClientCertPath = sec.Key("client_cert_path").String()
-	DbCfg.ServerCertName = sec.Key("server_cert_name").String()
-	DbCfg.Path = sec.Key("path").MustString("data/grafana.db")
+
+	ss.dbCfg.MaxOpenConn = sec.Key("max_open_conn").MustInt(0)
+	ss.dbCfg.MaxIdleConn = sec.Key("max_idle_conn").MustInt(2)
+	ss.dbCfg.ConnMaxLifetime = sec.Key("conn_max_lifetime").MustInt(14400)
+
+	ss.dbCfg.SslMode = sec.Key("ssl_mode").String()
+	ss.dbCfg.CaCertPath = sec.Key("ca_cert_path").String()
+	ss.dbCfg.ClientKeyPath = sec.Key("client_key_path").String()
+	ss.dbCfg.ClientCertPath = sec.Key("client_cert_path").String()
+	ss.dbCfg.ServerCertName = sec.Key("server_cert_name").String()
+	ss.dbCfg.Path = sec.Key("path").MustString("data/grafana.db")
 }
 
-func InitTestDB(t *testing.T) *xorm.Engine {
-	selectedDb := migrator.SQLITE
-	// selectedDb := migrator.MYSQL
-	// selectedDb := migrator.POSTGRES
+func InitTestDB(t *testing.T) *SqlStore {
+	sqlstore := &SqlStore{}
+	sqlstore.skipEnsureAdmin = true
 
-	var x *xorm.Engine
-	var err error
+	dbType := migrator.SQLITE
 
 	// environment variable present for test db?
 	if db, present := os.LookupEnv("GRAFANA_TEST_DB"); present {
-		selectedDb = db
+		dbType = db
 	}
 
-	switch strings.ToLower(selectedDb) {
-	case migrator.MYSQL:
-		x, err = xorm.NewEngine(sqlutil.TestDB_Mysql.DriverName, sqlutil.TestDB_Mysql.ConnStr)
-	case migrator.POSTGRES:
-		x, err = xorm.NewEngine(sqlutil.TestDB_Postgres.DriverName, sqlutil.TestDB_Postgres.ConnStr)
+	// set test db config
+	sqlstore.Cfg = setting.NewCfg()
+	sec, _ := sqlstore.Cfg.Raw.NewSection("database")
+	sec.NewKey("type", dbType)
+
+	switch dbType {
+	case "mysql":
+		sec.NewKey("connection_string", sqlutil.TestDB_Mysql.ConnStr)
+	case "postgres":
+		sec.NewKey("connection_string", sqlutil.TestDB_Postgres.ConnStr)
 	default:
-		x, err = xorm.NewEngine(sqlutil.TestDB_Sqlite3.DriverName, sqlutil.TestDB_Sqlite3.ConnStr)
+		sec.NewKey("connection_string", sqlutil.TestDB_Sqlite3.ConnStr)
 	}
 
-	x.DatabaseTZ = time.UTC
-	x.TZLocation = time.UTC
-
+	// need to get engine to clean db before we init
+	engine, err := xorm.NewEngine(dbType, sec.Key("connection_string").String())
 	if err != nil {
 		t.Fatalf("Failed to init test database: %v", err)
 	}
 
-	dialect = migrator.NewDialect(x)
-
-	err = dialect.CleanDB()
-	if err != nil {
+	dialect = migrator.NewDialect(engine)
+	if err := dialect.CleanDB(); err != nil {
 		t.Fatalf("Failed to clean test db %v", err)
 	}
 
-	if err := SetEngine(x); err != nil {
-		t.Fatal(err)
+	if err := sqlstore.Init(); err != nil {
+		t.Fatalf("Failed to init test database: %v", err)
 	}
 
-	// x.ShowSQL()
+	//// sqlstore.engine.DatabaseTZ = time.UTC
+	//// sqlstore.engine.TZLocation = time.UTC
 
-	return x
+	return sqlstore
 }
 
 func IsTestDbMySql() bool {
@@ -289,3 +298,15 @@ func IsTestDbPostgres() bool {
 
 	return false
 }
+
+type DatabaseConfig struct {
+	Type, Host, Name, User, Pwd, Path, SslMode string
+	CaCertPath                                 string
+	ClientKeyPath                              string
+	ClientCertPath                             string
+	ServerCertName                             string
+	ConnectionString                           string
+	MaxOpenConn                                int
+	MaxIdleConn                                int
+	ConnMaxLifetime                            int
+}

+ 3 - 1
pkg/setting/setting.go

@@ -495,7 +495,9 @@ func validateStaticRootPath() error {
 }
 
 func NewCfg() *Cfg {
-	return &Cfg{}
+	return &Cfg{
+		Raw: ini.Empty(),
+	}
 }
 
 func (cfg *Cfg) Load(args *CommandLineArgs) error {

+ 2 - 5
public/app/core/specs/file_export.jest.ts

@@ -63,7 +63,6 @@ describe('file_export', () => {
   });
 
   describe('when exporting table data to csv', () => {
-
     it('should properly escape special characters and quote all string values', () => {
       const inputTable = {
         columns: [
@@ -104,13 +103,11 @@ describe('file_export', () => {
 
     it('should decode HTML encoded characters', function() {
       const inputTable = {
-        columns: [
-          { text: 'string_value' },
-        ],
+        columns: [{ text: 'string_value' }],
         rows: [
           ['&quot;&amp;&auml;'],
           ['<strong>&quot;some html&quot;</strong>'],
-          ['<a href="http://something/index.html">some text</a>']
+          ['<a href="http://something/index.html">some text</a>'],
         ],
       };
 

+ 1 - 1
public/app/features/templating/template_srv.ts

@@ -75,7 +75,7 @@ export class TemplateSrv {
       return luceneEscape(value);
     }
     if (value instanceof Array && value.length === 0) {
-        return '__empty__';
+      return '__empty__';
     }
     var quotedValues = _.map(value, function(val) {
       return '"' + luceneEscape(val) + '"';

+ 1 - 1
public/app/plugins/datasource/prometheus/specs/result_transformer.jest.ts

@@ -49,7 +49,7 @@ describe('Prometheus Result Transformer', () => {
     });
 
     it('should column title include refId if response count is more than 2', () => {
-      var table = ctx.resultTransformer.transformMetricDataToTable(response.data.result, 2, "B");
+      var table = ctx.resultTransformer.transformMetricDataToTable(response.data.result, 2, 'B');
       expect(table.type).toBe('table');
       expect(table.columns).toEqual([
         { text: 'Time', type: 'time' },